resolved conflicts for merge of 47c888a9 to master

Change-Id: I4ba2fdc6374a93a892bb7651b0d174e495f09bf6
diff --git a/CleanSpec.mk b/CleanSpec.mk
index e6d9ebf..b8a9711 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -47,6 +47,11 @@
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/lib/libmedia_native.so)
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/symbols/system/lib/libmedia_native.so)
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libmedia_native.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libaudioflinger_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libaudioflinger.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libaudiopolicy_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libaudiopolicy.so)
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/camera/Android.mk b/camera/Android.mk
index 5cedab0..5774b6f 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -1,3 +1,17 @@
+# Copyright 2010 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 CAMERA_CLIENT_LOCAL_PATH:= $(call my-dir)
 include $(call all-subdir-makefiles)
 include $(CLEAR_VARS)
@@ -8,7 +22,7 @@
 	Camera.cpp \
 	CameraMetadata.cpp \
 	CameraParameters.cpp \
-	CameraParameters2.cpp \
+	CaptureResult.cpp \
 	ICamera.cpp \
 	ICameraClient.cpp \
 	ICameraService.cpp \
@@ -22,6 +36,7 @@
 	camera2/CaptureRequest.cpp \
 	ProCamera.cpp \
 	CameraBase.cpp \
+	VendorTagDescriptor.cpp
 
 LOCAL_SHARED_LIBRARIES := \
 	libcutils \
@@ -35,6 +50,7 @@
 
 LOCAL_C_INCLUDES += \
 	system/media/camera/include \
+	system/media/private/camera/include
 
 LOCAL_MODULE:= libcamera_client
 
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 7765914..1567cd1 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -25,6 +25,9 @@
 
 namespace android {
 
+#define ALIGN_TO(val, alignment) \
+    (((uintptr_t)(val) + ((alignment) - 1)) & ~((alignment) - 1))
+
 typedef Parcel::WritableBlob WritableBlob;
 typedef Parcel::ReadableBlob ReadableBlob;
 
@@ -270,7 +273,8 @@
     if ( (res = checkType(tag, TYPE_BYTE)) != OK) {
         return res;
     }
-    return updateImpl(tag, (const void*)string.string(), string.size());
+    // string.size() doesn't count the null termination character.
+    return updateImpl(tag, (const void*)string.string(), string.size() + 1);
 }
 
 status_t CameraMetadata::updateImpl(uint32_t tag, const void *data,
@@ -431,40 +435,70 @@
         *out = NULL;
     }
 
-    // arg0 = metadataSize (int32)
-    int32_t metadataSizeTmp = -1;
-    if ((err = data.readInt32(&metadataSizeTmp)) != OK) {
+    // See CameraMetadata::writeToParcel for parcel data layout diagram and explanation.
+    // arg0 = blobSize (int32)
+    int32_t blobSizeTmp = -1;
+    if ((err = data.readInt32(&blobSizeTmp)) != OK) {
         ALOGE("%s: Failed to read metadata size (error %d %s)",
               __FUNCTION__, err, strerror(-err));
         return err;
     }
-    const size_t metadataSize = static_cast<size_t>(metadataSizeTmp);
+    const size_t blobSize = static_cast<size_t>(blobSizeTmp);
+    const size_t alignment = get_camera_metadata_alignment();
 
-    if (metadataSize == 0) {
+    // Special case: zero blob size means zero sized (NULL) metadata.
+    if (blobSize == 0) {
         ALOGV("%s: Read 0-sized metadata", __FUNCTION__);
         return OK;
     }
 
-    // NOTE: this doesn't make sense to me. shouldnt the blob
+    if (blobSize <= alignment) {
+        ALOGE("%s: metadata blob is malformed, blobSize(%zu) should be larger than alignment(%zu)",
+                __FUNCTION__, blobSize, alignment);
+        return BAD_VALUE;
+    }
+
+    const size_t metadataSize = blobSize - alignment;
+
+    // NOTE: this doesn't make sense to me. shouldn't the blob
     // know how big it is? why do we have to specify the size
     // to Parcel::readBlob ?
-
     ReadableBlob blob;
     // arg1 = metadata (blob)
     do {
-        if ((err = data.readBlob(metadataSize, &blob)) != OK) {
-            ALOGE("%s: Failed to read metadata blob (sized %d). Possible "
+        if ((err = data.readBlob(blobSize, &blob)) != OK) {
+            ALOGE("%s: Failed to read metadata blob (sized %zu). Possible "
                   " serialization bug. Error %d %s",
-                  __FUNCTION__, metadataSize, err, strerror(-err));
+                  __FUNCTION__, blobSize, err, strerror(-err));
             break;
         }
-        const camera_metadata_t* tmp =
-                       reinterpret_cast<const camera_metadata_t*>(blob.data());
 
+        // arg2 = offset (blob)
+        // Must be after blob since we don't know offset until after writeBlob.
+        int32_t offsetTmp;
+        if ((err = data.readInt32(&offsetTmp)) != OK) {
+            ALOGE("%s: Failed to read metadata offsetTmp (error %d %s)",
+                  __FUNCTION__, err, strerror(-err));
+            break;
+        }
+        const size_t offset = static_cast<size_t>(offsetTmp);
+        if (offset >= alignment) {
+            ALOGE("%s: metadata offset(%zu) should be less than alignment(%zu)",
+                    __FUNCTION__, blobSize, alignment);
+            err = BAD_VALUE;
+            break;
+        }
+
+        const uintptr_t metadataStart = reinterpret_cast<uintptr_t>(blob.data()) + offset;
+        const camera_metadata_t* tmp =
+                       reinterpret_cast<const camera_metadata_t*>(metadataStart);
+        ALOGV("%s: alignment is: %zu, metadata start: %p, offset: %zu",
+                __FUNCTION__, alignment, tmp, offset);
         metadata = allocate_copy_camera_metadata_checked(tmp, metadataSize);
         if (metadata == NULL) {
             // We consider that allocation only fails if the validation
             // also failed, therefore the readFromParcel was a failure.
+            ALOGE("%s: metadata allocation and copy failed", __FUNCTION__);
             err = BAD_VALUE;
         }
     } while(0);
@@ -485,38 +519,79 @@
                                        const camera_metadata_t* metadata) {
     status_t res = OK;
 
-    // arg0 = metadataSize (int32)
+    /**
+     * Below is the camera metadata parcel layout:
+     *
+     * |--------------------------------------------|
+     * |             arg0: blobSize                 |
+     * |              (length = 4)                  |
+     * |--------------------------------------------|<--Skip the rest if blobSize == 0.
+     * |                                            |
+     * |                                            |
+     * |              arg1: blob                    |
+     * | (length = variable, see arg1 layout below) |
+     * |                                            |
+     * |                                            |
+     * |--------------------------------------------|
+     * |              arg2: offset                  |
+     * |              (length = 4)                  |
+     * |--------------------------------------------|
+     */
 
+    // arg0 = blobSize (int32)
     if (metadata == NULL) {
+        // Write zero blobSize for null metadata.
         return data.writeInt32(0);
     }
 
+    /**
+     * Always make the blob size sufficiently larger, as we need put alignment
+     * padding and metadata into the blob. Since we don't know the alignment
+     * offset before writeBlob. Then write the metadata to aligned offset.
+     */
     const size_t metadataSize = get_camera_metadata_compact_size(metadata);
-    res = data.writeInt32(static_cast<int32_t>(metadataSize));
+    const size_t alignment = get_camera_metadata_alignment();
+    const size_t blobSize = metadataSize + alignment;
+    res = data.writeInt32(static_cast<int32_t>(blobSize));
     if (res != OK) {
         return res;
     }
 
-    // arg1 = metadata (blob)
+    size_t offset = 0;
+    /**
+     * arg1 = metadata (blob).
+     *
+     * The blob size is the sum of front padding size, metadata size and back padding
+     * size, which is equal to metadataSize + alignment.
+     *
+     * The blob layout is:
+     * |------------------------------------|<----Start address of the blob (unaligned).
+     * |           front padding            |
+     * |          (size = offset)           |
+     * |------------------------------------|<----Aligned start address of metadata.
+     * |                                    |
+     * |                                    |
+     * |            metadata                |
+     * |       (size = metadataSize)        |
+     * |                                    |
+     * |                                    |
+     * |------------------------------------|
+     * |           back padding             |
+     * |     (size = alignment - offset)    |
+     * |------------------------------------|<----End address of blob.
+     *                                            (Blob start address + blob size).
+     */
     WritableBlob blob;
     do {
-        res = data.writeBlob(metadataSize, &blob);
+        res = data.writeBlob(blobSize, &blob);
         if (res != OK) {
             break;
         }
-        copy_camera_metadata(blob.data(), metadataSize, metadata);
-
-        IF_ALOGV() {
-            if (validate_camera_metadata_structure(
-                        (const camera_metadata_t*)blob.data(),
-                        &metadataSize) != OK) {
-                ALOGV("%s: Failed to validate metadata %p after writing blob",
-                       __FUNCTION__, blob.data());
-            } else {
-                ALOGV("%s: Metadata written to blob. Validation success",
-                        __FUNCTION__);
-            }
-        }
+        const uintptr_t metadataStart = ALIGN_TO(blob.data(), alignment);
+        offset = metadataStart - reinterpret_cast<uintptr_t>(blob.data());
+        ALOGV("%s: alignment is: %zu, metadata start: %p, offset: %zu",
+                __FUNCTION__, alignment, metadataStart, offset);
+        copy_camera_metadata(reinterpret_cast<void*>(metadataStart), metadataSize, metadata);
 
         // Not too big of a problem since receiving side does hard validation
         // Don't check the size since the compact size could be larger
@@ -528,6 +603,9 @@
     } while(false);
     blob.release();
 
+    // arg2 = offset (int32)
+    res = data.writeInt32(static_cast<int32_t>(offset));
+
     return res;
 }
 
diff --git a/camera/CameraParameters2.cpp b/camera/CameraParameters2.cpp
deleted file mode 100644
index eac79e1..0000000
--- a/camera/CameraParameters2.cpp
+++ /dev/null
@@ -1,381 +0,0 @@
-/*
-**
-** Copyright 2008, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#define LOG_TAG "CameraParams2"
-// #define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <string.h>
-#include <stdlib.h>
-#include <camera/CameraParameters2.h>
-
-namespace android {
-
-CameraParameters2::CameraParameters2()
-                : mMap()
-{
-}
-
-CameraParameters2::~CameraParameters2()
-{
-}
-
-String8 CameraParameters2::flatten() const
-{
-    String8 flattened("");
-    size_t size = mMap.size();
-
-    for (size_t i = 0; i < size; i++) {
-        String8 k, v;
-        k = mMap.keyAt(i);
-        v = mMap.valueAt(i);
-
-        flattened += k;
-        flattened += "=";
-        flattened += v;
-        if (i != size-1)
-            flattened += ";";
-    }
-
-    ALOGV("%s: Flattened params = %s", __FUNCTION__, flattened.string());
-
-    return flattened;
-}
-
-void CameraParameters2::unflatten(const String8 &params)
-{
-    const char *a = params.string();
-    const char *b;
-
-    mMap.clear();
-
-    for (;;) {
-        // Find the bounds of the key name.
-        b = strchr(a, '=');
-        if (b == 0)
-            break;
-
-        // Create the key string.
-        String8 k(a, (size_t)(b-a));
-
-        // Find the value.
-        a = b+1;
-        b = strchr(a, ';');
-        if (b == 0) {
-            // If there's no semicolon, this is the last item.
-            String8 v(a);
-            mMap.add(k, v);
-            break;
-        }
-
-        String8 v(a, (size_t)(b-a));
-        mMap.add(k, v);
-        a = b+1;
-    }
-}
-
-
-void CameraParameters2::set(const char *key, const char *value)
-{
-    // XXX i think i can do this with strspn()
-    if (strchr(key, '=') || strchr(key, ';')) {
-        //XXX ALOGE("Key \"%s\"contains invalid character (= or ;)", key);
-        return;
-    }
-
-    if (strchr(value, '=') || strchr(value, ';')) {
-        //XXX ALOGE("Value \"%s\"contains invalid character (= or ;)", value);
-        return;
-    }
-
-    // Replacing a value updates the key's order to be the new largest order
-    ssize_t res = mMap.replaceValueFor(String8(key), String8(value));
-    LOG_ALWAYS_FATAL_IF(res < 0, "replaceValueFor(%s,%s) failed", key, value);
-}
-
-void CameraParameters2::set(const char *key, int value)
-{
-    char str[16];
-    sprintf(str, "%d", value);
-    set(key, str);
-}
-
-void CameraParameters2::setFloat(const char *key, float value)
-{
-    char str[16];  // 14 should be enough. We overestimate to be safe.
-    snprintf(str, sizeof(str), "%g", value);
-    set(key, str);
-}
-
-const char *CameraParameters2::get(const char *key) const
-{
-    ssize_t idx = mMap.indexOfKey(String8(key));
-    if (idx < 0) {
-        return NULL;
-    } else {
-        return mMap.valueAt(idx).string();
-    }
-}
-
-int CameraParameters2::getInt(const char *key) const
-{
-    const char *v = get(key);
-    if (v == 0)
-        return -1;
-    return strtol(v, 0, 0);
-}
-
-float CameraParameters2::getFloat(const char *key) const
-{
-    const char *v = get(key);
-    if (v == 0) return -1;
-    return strtof(v, 0);
-}
-
-status_t CameraParameters2::compareSetOrder(const char *key1, const char *key2,
-        int *order) const {
-    if (key1 == NULL) {
-        ALOGE("%s: key1 must not be NULL", __FUNCTION__);
-        return BAD_VALUE;
-    } else if (key2 == NULL) {
-        ALOGE("%s: key2 must not be NULL", __FUNCTION__);
-        return BAD_VALUE;
-    } else if (order == NULL) {
-        ALOGE("%s: order must not be NULL", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    ssize_t index1 = mMap.indexOfKey(String8(key1));
-    ssize_t index2 = mMap.indexOfKey(String8(key2));
-    if (index1 < 0) {
-        ALOGW("%s: Key1 (%s) was not set", __FUNCTION__, key1);
-        return NAME_NOT_FOUND;
-    } else if (index2 < 0) {
-        ALOGW("%s: Key2 (%s) was not set", __FUNCTION__, key2);
-        return NAME_NOT_FOUND;
-    }
-
-    *order = (index1 == index2) ? 0  :
-             (index1 < index2)  ? -1 :
-             1;
-
-    return OK;
-}
-
-void CameraParameters2::remove(const char *key)
-{
-    mMap.removeItem(String8(key));
-}
-
-// Parse string like "640x480" or "10000,20000"
-static int parse_pair(const char *str, int *first, int *second, char delim,
-                      char **endptr = NULL)
-{
-    // Find the first integer.
-    char *end;
-    int w = (int)strtol(str, &end, 10);
-    // If a delimeter does not immediately follow, give up.
-    if (*end != delim) {
-        ALOGE("Cannot find delimeter (%c) in str=%s", delim, str);
-        return -1;
-    }
-
-    // Find the second integer, immediately after the delimeter.
-    int h = (int)strtol(end+1, &end, 10);
-
-    *first = w;
-    *second = h;
-
-    if (endptr) {
-        *endptr = end;
-    }
-
-    return 0;
-}
-
-static void parseSizesList(const char *sizesStr, Vector<Size> &sizes)
-{
-    if (sizesStr == 0) {
-        return;
-    }
-
-    char *sizeStartPtr = (char *)sizesStr;
-
-    while (true) {
-        int width, height;
-        int success = parse_pair(sizeStartPtr, &width, &height, 'x',
-                                 &sizeStartPtr);
-        if (success == -1 || (*sizeStartPtr != ',' && *sizeStartPtr != '\0')) {
-            ALOGE("Picture sizes string \"%s\" contains invalid character.", sizesStr);
-            return;
-        }
-        sizes.push(Size(width, height));
-
-        if (*sizeStartPtr == '\0') {
-            return;
-        }
-        sizeStartPtr++;
-    }
-}
-
-void CameraParameters2::setPreviewSize(int width, int height)
-{
-    char str[32];
-    sprintf(str, "%dx%d", width, height);
-    set(CameraParameters::KEY_PREVIEW_SIZE, str);
-}
-
-void CameraParameters2::getPreviewSize(int *width, int *height) const
-{
-    *width = *height = -1;
-    // Get the current string, if it doesn't exist, leave the -1x-1
-    const char *p = get(CameraParameters::KEY_PREVIEW_SIZE);
-    if (p == 0)  return;
-    parse_pair(p, width, height, 'x');
-}
-
-void CameraParameters2::getPreferredPreviewSizeForVideo(int *width, int *height) const
-{
-    *width = *height = -1;
-    const char *p = get(CameraParameters::KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO);
-    if (p == 0)  return;
-    parse_pair(p, width, height, 'x');
-}
-
-void CameraParameters2::getSupportedPreviewSizes(Vector<Size> &sizes) const
-{
-    const char *previewSizesStr = get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES);
-    parseSizesList(previewSizesStr, sizes);
-}
-
-void CameraParameters2::setVideoSize(int width, int height)
-{
-    char str[32];
-    sprintf(str, "%dx%d", width, height);
-    set(CameraParameters::KEY_VIDEO_SIZE, str);
-}
-
-void CameraParameters2::getVideoSize(int *width, int *height) const
-{
-    *width = *height = -1;
-    const char *p = get(CameraParameters::KEY_VIDEO_SIZE);
-    if (p == 0) return;
-    parse_pair(p, width, height, 'x');
-}
-
-void CameraParameters2::getSupportedVideoSizes(Vector<Size> &sizes) const
-{
-    const char *videoSizesStr = get(CameraParameters::KEY_SUPPORTED_VIDEO_SIZES);
-    parseSizesList(videoSizesStr, sizes);
-}
-
-void CameraParameters2::setPreviewFrameRate(int fps)
-{
-    set(CameraParameters::KEY_PREVIEW_FRAME_RATE, fps);
-}
-
-int CameraParameters2::getPreviewFrameRate() const
-{
-    return getInt(CameraParameters::KEY_PREVIEW_FRAME_RATE);
-}
-
-void CameraParameters2::getPreviewFpsRange(int *min_fps, int *max_fps) const
-{
-    *min_fps = *max_fps = -1;
-    const char *p = get(CameraParameters::KEY_PREVIEW_FPS_RANGE);
-    if (p == 0) return;
-    parse_pair(p, min_fps, max_fps, ',');
-}
-
-void CameraParameters2::setPreviewFpsRange(int min_fps, int max_fps)
-{
-    String8 str = String8::format("%d,%d", min_fps, max_fps);
-    set(CameraParameters::KEY_PREVIEW_FPS_RANGE, str.string());
-}
-
-void CameraParameters2::setPreviewFormat(const char *format)
-{
-    set(CameraParameters::KEY_PREVIEW_FORMAT, format);
-}
-
-const char *CameraParameters2::getPreviewFormat() const
-{
-    return get(CameraParameters::KEY_PREVIEW_FORMAT);
-}
-
-void CameraParameters2::setPictureSize(int width, int height)
-{
-    char str[32];
-    sprintf(str, "%dx%d", width, height);
-    set(CameraParameters::KEY_PICTURE_SIZE, str);
-}
-
-void CameraParameters2::getPictureSize(int *width, int *height) const
-{
-    *width = *height = -1;
-    // Get the current string, if it doesn't exist, leave the -1x-1
-    const char *p = get(CameraParameters::KEY_PICTURE_SIZE);
-    if (p == 0) return;
-    parse_pair(p, width, height, 'x');
-}
-
-void CameraParameters2::getSupportedPictureSizes(Vector<Size> &sizes) const
-{
-    const char *pictureSizesStr = get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES);
-    parseSizesList(pictureSizesStr, sizes);
-}
-
-void CameraParameters2::setPictureFormat(const char *format)
-{
-    set(CameraParameters::KEY_PICTURE_FORMAT, format);
-}
-
-const char *CameraParameters2::getPictureFormat() const
-{
-    return get(CameraParameters::KEY_PICTURE_FORMAT);
-}
-
-void CameraParameters2::dump() const
-{
-    ALOGD("dump: mMap.size = %d", mMap.size());
-    for (size_t i = 0; i < mMap.size(); i++) {
-        String8 k, v;
-        k = mMap.keyAt(i);
-        v = mMap.valueAt(i);
-        ALOGD("%s: %s\n", k.string(), v.string());
-    }
-}
-
-status_t CameraParameters2::dump(int fd, const Vector<String16>& args) const
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-    snprintf(buffer, 255, "CameraParameters2::dump: mMap.size = %zu\n", mMap.size());
-    result.append(buffer);
-    for (size_t i = 0; i < mMap.size(); i++) {
-        String8 k, v;
-        k = mMap.keyAt(i);
-        v = mMap.valueAt(i);
-        snprintf(buffer, 255, "\t%s: %s\n", k.string(), v.string());
-        result.append(buffer);
-    }
-    write(fd, result.string(), result.size());
-    return NO_ERROR;
-}
-
-}; // namespace android
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
new file mode 100644
index 0000000..c016e52
--- /dev/null
+++ b/camera/CaptureResult.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera-CaptureResult"
+#include <utils/Log.h>
+
+#include <camera/CaptureResult.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+bool CaptureResultExtras::isValid() {
+    return requestId >= 0;
+}
+
+status_t CaptureResultExtras::readFromParcel(Parcel *parcel) {
+    if (parcel == NULL) {
+        ALOGE("%s: Null parcel", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    parcel->readInt32(&requestId);
+    parcel->readInt32(&burstId);
+    parcel->readInt32(&afTriggerId);
+    parcel->readInt32(&precaptureTriggerId);
+    parcel->readInt64(&frameNumber);
+
+    return OK;
+}
+
+status_t CaptureResultExtras::writeToParcel(Parcel *parcel) const {
+    if (parcel == NULL) {
+        ALOGE("%s: Null parcel", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    parcel->writeInt32(requestId);
+    parcel->writeInt32(burstId);
+    parcel->writeInt32(afTriggerId);
+    parcel->writeInt32(precaptureTriggerId);
+    parcel->writeInt64(frameNumber);
+
+    return OK;
+}
+
+CaptureResult::CaptureResult() :
+        mMetadata(), mResultExtras() {
+}
+
+CaptureResult::CaptureResult(const CaptureResult &otherResult) {
+    mResultExtras = otherResult.mResultExtras;
+    mMetadata = otherResult.mMetadata;
+}
+
+status_t CaptureResult::readFromParcel(Parcel *parcel) {
+
+    ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
+
+    if (parcel == NULL) {
+        ALOGE("%s: parcel is null", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    mMetadata.clear();
+
+    status_t res = OK;
+    res = mMetadata.readFromParcel(parcel);
+    if (res != OK) {
+        ALOGE("%s: Failed to read metadata from parcel.",
+              __FUNCTION__);
+        return res;
+    }
+    ALOGV("%s: Read metadata from parcel", __FUNCTION__);
+
+    res = mResultExtras.readFromParcel(parcel);
+    if (res != OK) {
+        ALOGE("%s: Failed to read result extras from parcel.",
+                __FUNCTION__);
+        return res;
+    }
+    ALOGV("%s: Read result extras from parcel", __FUNCTION__);
+
+    return OK;
+}
+
+status_t CaptureResult::writeToParcel(Parcel *parcel) const {
+
+    ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
+
+    if (parcel == NULL) {
+        ALOGE("%s: parcel is null", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    status_t res;
+
+    res = mMetadata.writeToParcel(parcel);
+    if (res != OK) {
+        ALOGE("%s: Failed to write metadata to parcel", __FUNCTION__);
+        return res;
+    }
+    ALOGV("%s: Wrote metadata to parcel", __FUNCTION__);
+
+    res = mResultExtras.writeToParcel(parcel);
+    if (res != OK) {
+        ALOGE("%s: Failed to write result extras to parcel", __FUNCTION__);
+        return res;
+    }
+    ALOGV("%s: Wrote result extras to parcel", __FUNCTION__);
+
+    return OK;
+}
+
+}
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
index 5fc89fb..b86651f 100644
--- a/camera/ICameraService.cpp
+++ b/camera/ICameraService.cpp
@@ -17,6 +17,7 @@
 
 #define LOG_TAG "BpCameraService"
 #include <utils/Log.h>
+#include <utils/Errors.h>
 
 #include <stdint.h>
 #include <sys/types.h>
@@ -34,6 +35,7 @@
 #include <camera/camera2/ICameraDeviceUser.h>
 #include <camera/camera2/ICameraDeviceCallbacks.h>
 #include <camera/CameraMetadata.h>
+#include <camera/VendorTagDescriptor.h>
 
 namespace android {
 
@@ -143,6 +145,24 @@
         return result;
     }
 
+    // Get enumeration and description of vendor tags for camera
+    virtual status_t getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
+        remote()->transact(BnCameraService::GET_CAMERA_VENDOR_TAG_DESCRIPTOR, data, &reply);
+
+        if (readExceptionCode(reply)) return -EPROTO;
+        status_t result = reply.readInt32();
+
+        if (reply.readInt32() != 0) {
+            sp<VendorTagDescriptor> d;
+            if (VendorTagDescriptor::createFromParcel(&reply, /*out*/d) == OK) {
+                desc = d;
+            }
+        }
+        return result;
+    }
+
     // connect to camera service (android.hardware.Camera)
     virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
                              const String16 &clientPackageName, int clientUid,
@@ -275,6 +295,22 @@
             info.writeToParcel(reply);
             return NO_ERROR;
         } break;
+        case GET_CAMERA_VENDOR_TAG_DESCRIPTOR: {
+            CHECK_INTERFACE(ICameraService, data, reply);
+            sp<VendorTagDescriptor> d;
+            status_t result = getCameraVendorTagDescriptor(d);
+            reply->writeNoException();
+            reply->writeInt32(result);
+
+            // out-variables are after exception and return value
+            if (d == NULL) {
+                reply->writeInt32(0);
+            } else {
+                reply->writeInt32(1); // means the parcelable is included
+                d->writeToParcel(reply);
+            }
+            return NO_ERROR;
+        } break;
         case CONNECT: {
             CHECK_INTERFACE(ICameraService, data, reply);
             sp<ICameraClient> cameraClient =
@@ -284,7 +320,7 @@
             int32_t clientUid = data.readInt32();
             sp<ICamera> camera;
             status_t status = connect(cameraClient, cameraId,
-                    clientName, clientUid, /*out*/ camera);
+                    clientName, clientUid, /*out*/camera);
             reply->writeNoException();
             reply->writeInt32(status);
             if (camera != NULL) {
@@ -304,7 +340,7 @@
             int32_t clientUid = data.readInt32();
             sp<IProCameraUser> camera;
             status_t status = connectPro(cameraClient, cameraId,
-                    clientName, clientUid, /*out*/ camera);
+                    clientName, clientUid, /*out*/camera);
             reply->writeNoException();
             reply->writeInt32(status);
             if (camera != NULL) {
@@ -324,7 +360,7 @@
             int32_t clientUid = data.readInt32();
             sp<ICameraDeviceUser> camera;
             status_t status = connectDevice(cameraClient, cameraId,
-                    clientName, clientUid, /*out*/ camera);
+                    clientName, clientUid, /*out*/camera);
             reply->writeNoException();
             reply->writeInt32(status);
             if (camera != NULL) {
diff --git a/camera/ProCamera.cpp b/camera/ProCamera.cpp
index ba5a48c..48f8e8e 100644
--- a/camera/ProCamera.cpp
+++ b/camera/ProCamera.cpp
@@ -249,11 +249,14 @@
     sp <IProCameraUser> c = mCamera;
     if (c == 0) return NO_INIT;
 
-    sp<BufferQueue> bq = new BufferQueue();
-    sp<CpuConsumer> cc = new CpuConsumer(bq, heapCount/*, synchronousMode*/);
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    sp<CpuConsumer> cc = new CpuConsumer(consumer, heapCount
+            /*, synchronousMode*/);
     cc->setName(String8("ProCamera::mCpuConsumer"));
 
-    sp<Surface> stc = new Surface(bq);
+    sp<Surface> stc = new Surface(producer);
 
     status_t s = createStream(width, height, format,
                               stc->getIGraphicBufferProducer(),
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
new file mode 100644
index 0000000..a0a6a51
--- /dev/null
+++ b/camera/VendorTagDescriptor.cpp
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "VenderTagDescriptor"
+
+#include <binder/Parcel.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/Mutex.h>
+#include <utils/Vector.h>
+#include <system/camera_metadata.h>
+#include <camera_metadata_hidden.h>
+
+#include "camera/VendorTagDescriptor.h"
+
+#include <string.h>
+
+namespace android {
+
+extern "C" {
+
+static int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* v);
+static void vendor_tag_descriptor_get_all_tags(const vendor_tag_ops_t* v, uint32_t* tagArray);
+static const char* vendor_tag_descriptor_get_section_name(const vendor_tag_ops_t* v, uint32_t tag);
+static const char* vendor_tag_descriptor_get_tag_name(const vendor_tag_ops_t* v, uint32_t tag);
+static int vendor_tag_descriptor_get_tag_type(const vendor_tag_ops_t* v, uint32_t tag);
+
+} /* extern "C" */
+
+
+static Mutex sLock;
+static sp<VendorTagDescriptor> sGlobalVendorTagDescriptor;
+
+VendorTagDescriptor::VendorTagDescriptor() {}
+VendorTagDescriptor::~VendorTagDescriptor() {}
+
+status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps,
+            /*out*/
+            sp<VendorTagDescriptor>& descriptor) {
+    if (vOps == NULL) {
+        ALOGE("%s: vendor_tag_ops argument was NULL.", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    int tagCount = vOps->get_tag_count(vOps);
+    if (tagCount < 0 || tagCount > INT32_MAX) {
+        ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount);
+        return BAD_VALUE;
+    }
+
+    Vector<uint32_t> tagArray;
+    LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
+            "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
+
+    vOps->get_all_tags(vOps, /*out*/tagArray.editArray());
+
+    sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
+    desc->mTagCount = tagCount;
+
+    for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
+        uint32_t tag = tagArray[i];
+        if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
+            ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
+            return BAD_VALUE;
+        }
+        const char *tagName = vOps->get_tag_name(vOps, tag);
+        if (tagName == NULL) {
+            ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
+            return BAD_VALUE;
+        }
+        desc->mTagToNameMap.add(tag, String8(tagName));
+        const char *sectionName = vOps->get_section_name(vOps, tag);
+        if (sectionName == NULL) {
+            ALOGE("%s: no section name defined for vendor tag %d.", __FUNCTION__, tag);
+            return BAD_VALUE;
+        }
+        desc->mTagToSectionMap.add(tag, String8(sectionName));
+        int tagType = vOps->get_tag_type(vOps, tag);
+        if (tagType < 0 || tagType >= NUM_TYPES) {
+            ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
+            return BAD_VALUE;
+        }
+        desc->mTagToTypeMap.add(tag, tagType);
+    }
+    descriptor = desc;
+    return OK;
+}
+
+status_t VendorTagDescriptor::createFromParcel(const Parcel* parcel,
+            /*out*/
+            sp<VendorTagDescriptor>& descriptor) {
+    status_t res = OK;
+    if (parcel == NULL) {
+        ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    int32_t tagCount = 0;
+    if ((res = parcel->readInt32(&tagCount)) != OK) {
+        ALOGE("%s: could not read tag count from parcel", __FUNCTION__);
+        return res;
+    }
+
+    if (tagCount < 0 || tagCount > INT32_MAX) {
+        ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount);
+        return BAD_VALUE;
+    }
+
+    sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
+    desc->mTagCount = tagCount;
+
+    uint32_t tag;
+    int32_t tagType;
+    for (int32_t i = 0; i < tagCount; ++i) {
+        if ((res = parcel->readInt32(reinterpret_cast<int32_t*>(&tag))) != OK) {
+            ALOGE("%s: could not read tag id from parcel for index %d", __FUNCTION__, i);
+            break;
+        }
+        if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
+            ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
+            res = BAD_VALUE;
+            break;
+        }
+        if ((res = parcel->readInt32(&tagType)) != OK) {
+            ALOGE("%s: could not read tag type from parcel for tag %d", __FUNCTION__, tag);
+            break;
+        }
+        if (tagType < 0 || tagType >= NUM_TYPES) {
+            ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
+            res = BAD_VALUE;
+            break;
+        }
+        String8 tagName = parcel->readString8();
+        if (tagName.isEmpty()) {
+            ALOGE("%s: parcel tag name was NULL for tag %d.", __FUNCTION__, tag);
+            res = NOT_ENOUGH_DATA;
+            break;
+        }
+        String8 sectionName = parcel->readString8();
+        if (sectionName.isEmpty()) {
+            ALOGE("%s: parcel section name was NULL for tag %d.", __FUNCTION__, tag);
+            res = NOT_ENOUGH_DATA;
+            break;
+        }
+
+        desc->mTagToNameMap.add(tag, tagName);
+        desc->mTagToSectionMap.add(tag, sectionName);
+        desc->mTagToTypeMap.add(tag, tagType);
+    }
+
+    if (res != OK) {
+        return res;
+    }
+
+    descriptor = desc;
+    return res;
+}
+
+int VendorTagDescriptor::getTagCount() const {
+    size_t size = mTagToNameMap.size();
+    if (size == 0) {
+        return VENDOR_TAG_COUNT_ERR;
+    }
+    return size;
+}
+
+void VendorTagDescriptor::getTagArray(uint32_t* tagArray) const {
+    size_t size = mTagToNameMap.size();
+    for (size_t i = 0; i < size; ++i) {
+        tagArray[i] = mTagToNameMap.keyAt(i);
+    }
+}
+
+const char* VendorTagDescriptor::getSectionName(uint32_t tag) const {
+    ssize_t index = mTagToSectionMap.indexOfKey(tag);
+    if (index < 0) {
+        return VENDOR_SECTION_NAME_ERR;
+    }
+    return mTagToSectionMap.valueAt(index).string();
+}
+
+const char* VendorTagDescriptor::getTagName(uint32_t tag) const {
+    ssize_t index = mTagToNameMap.indexOfKey(tag);
+    if (index < 0) {
+        return VENDOR_TAG_NAME_ERR;
+    }
+    return mTagToNameMap.valueAt(index).string();
+}
+
+int VendorTagDescriptor::getTagType(uint32_t tag) const {
+    ssize_t index = mTagToNameMap.indexOfKey(tag);
+    if (index < 0) {
+        return VENDOR_TAG_TYPE_ERR;
+    }
+    return mTagToTypeMap.valueFor(tag);
+}
+
+status_t VendorTagDescriptor::writeToParcel(Parcel* parcel) const {
+    status_t res = OK;
+    if (parcel == NULL) {
+        ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    if ((res = parcel->writeInt32(mTagCount)) != OK) {
+        return res;
+    }
+
+    size_t size = mTagToNameMap.size();
+    uint32_t tag;
+    int32_t tagType;
+    for (size_t i = 0; i < size; ++i) {
+        tag = mTagToNameMap.keyAt(i);
+        String8 tagName = mTagToNameMap[i];
+        String8 sectionName = mTagToSectionMap.valueFor(tag);
+        tagType = mTagToTypeMap.valueFor(tag);
+        if ((res = parcel->writeInt32(tag)) != OK) break;
+        if ((res = parcel->writeInt32(tagType)) != OK) break;
+        if ((res = parcel->writeString8(tagName)) != OK) break;
+        if ((res = parcel->writeString8(sectionName)) != OK) break;
+    }
+
+    return res;
+}
+
+status_t VendorTagDescriptor::setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc) {
+    status_t res = OK;
+    Mutex::Autolock al(sLock);
+    sGlobalVendorTagDescriptor = desc;
+
+    vendor_tag_ops_t* opsPtr = NULL;
+    if (desc != NULL) {
+        opsPtr = &(desc->mVendorOps);
+        opsPtr->get_tag_count = vendor_tag_descriptor_get_tag_count;
+        opsPtr->get_all_tags = vendor_tag_descriptor_get_all_tags;
+        opsPtr->get_section_name = vendor_tag_descriptor_get_section_name;
+        opsPtr->get_tag_name = vendor_tag_descriptor_get_tag_name;
+        opsPtr->get_tag_type = vendor_tag_descriptor_get_tag_type;
+    }
+    if((res = set_camera_metadata_vendor_ops(opsPtr)) != OK) {
+        ALOGE("%s: Could not set vendor tag descriptor, received error %s (%d)."
+                , __FUNCTION__, strerror(-res), res);
+    }
+    return res;
+}
+
+void VendorTagDescriptor::clearGlobalVendorTagDescriptor() {
+    Mutex::Autolock al(sLock);
+    set_camera_metadata_vendor_ops(NULL);
+    sGlobalVendorTagDescriptor.clear();
+}
+
+sp<VendorTagDescriptor> VendorTagDescriptor::getGlobalVendorTagDescriptor() {
+    Mutex::Autolock al(sLock);
+    return sGlobalVendorTagDescriptor;
+}
+
+extern "C" {
+
+int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* v) {
+    Mutex::Autolock al(sLock);
+    if (sGlobalVendorTagDescriptor == NULL) {
+        ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
+        return VENDOR_TAG_COUNT_ERR;
+    }
+    return sGlobalVendorTagDescriptor->getTagCount();
+}
+
+void vendor_tag_descriptor_get_all_tags(const vendor_tag_ops_t* v, uint32_t* tagArray) {
+    Mutex::Autolock al(sLock);
+    if (sGlobalVendorTagDescriptor == NULL) {
+        ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
+        return;
+    }
+    sGlobalVendorTagDescriptor->getTagArray(tagArray);
+}
+
+const char* vendor_tag_descriptor_get_section_name(const vendor_tag_ops_t* v, uint32_t tag) {
+    Mutex::Autolock al(sLock);
+    if (sGlobalVendorTagDescriptor == NULL) {
+        ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
+        return VENDOR_SECTION_NAME_ERR;
+    }
+    return sGlobalVendorTagDescriptor->getSectionName(tag);
+}
+
+const char* vendor_tag_descriptor_get_tag_name(const vendor_tag_ops_t* v, uint32_t tag) {
+    Mutex::Autolock al(sLock);
+    if (sGlobalVendorTagDescriptor == NULL) {
+        ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
+        return VENDOR_TAG_NAME_ERR;
+    }
+    return sGlobalVendorTagDescriptor->getTagName(tag);
+}
+
+int vendor_tag_descriptor_get_tag_type(const vendor_tag_ops_t* v, uint32_t tag) {
+    Mutex::Autolock al(sLock);
+    if (sGlobalVendorTagDescriptor == NULL) {
+        ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
+        return VENDOR_TAG_TYPE_ERR;
+    }
+    return sGlobalVendorTagDescriptor->getTagType(tag);
+}
+
+} /* extern "C" */
+} /* namespace android */
diff --git a/camera/camera2/ICameraDeviceCallbacks.cpp b/camera/camera2/ICameraDeviceCallbacks.cpp
index 613358a..4cc7b5d 100644
--- a/camera/camera2/ICameraDeviceCallbacks.cpp
+++ b/camera/camera2/ICameraDeviceCallbacks.cpp
@@ -28,6 +28,7 @@
 
 #include <camera/camera2/ICameraDeviceCallbacks.h>
 #include "camera/CameraMetadata.h"
+#include "camera/CaptureResult.h"
 
 namespace android {
 
@@ -46,12 +47,14 @@
     {
     }
 
-    void onDeviceError(CameraErrorCode errorCode)
+    void onDeviceError(CameraErrorCode errorCode, const CaptureResultExtras& resultExtras)
     {
         ALOGV("onDeviceError");
         Parcel data, reply;
         data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
         data.writeInt32(static_cast<int32_t>(errorCode));
+        data.writeInt32(1); // to mark presence of CaptureResultExtras object
+        resultExtras.writeToParcel(&data);
         remote()->transact(CAMERA_ERROR, data, &reply, IBinder::FLAG_ONEWAY);
         data.writeNoException();
     }
@@ -65,25 +68,28 @@
         data.writeNoException();
     }
 
-    void onCaptureStarted(int32_t requestId, int64_t timestamp)
+    void onCaptureStarted(const CaptureResultExtras& result, int64_t timestamp)
     {
         ALOGV("onCaptureStarted");
         Parcel data, reply;
         data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
-        data.writeInt32(requestId);
+        data.writeInt32(1); // to mark presence of CaptureResultExtras object
+        result.writeToParcel(&data);
         data.writeInt64(timestamp);
         remote()->transact(CAPTURE_STARTED, data, &reply, IBinder::FLAG_ONEWAY);
         data.writeNoException();
     }
 
 
-    void onResultReceived(int32_t requestId, const CameraMetadata& result) {
+    void onResultReceived(const CameraMetadata& metadata,
+            const CaptureResultExtras& resultExtras) {
         ALOGV("onResultReceived");
         Parcel data, reply;
         data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
-        data.writeInt32(requestId);
         data.writeInt32(1); // to mark presence of metadata object
-        result.writeToParcel(&data);
+        metadata.writeToParcel(&data);
+        data.writeInt32(1); // to mark presence of CaptureResult object
+        resultExtras.writeToParcel(&data);
         remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY);
         data.writeNoException();
     }
@@ -104,7 +110,13 @@
             CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
             CameraErrorCode errorCode =
                     static_cast<CameraErrorCode>(data.readInt32());
-            onDeviceError(errorCode);
+            CaptureResultExtras resultExtras;
+            if (data.readInt32() != 0) {
+                resultExtras.readFromParcel(const_cast<Parcel*>(&data));
+            } else {
+                ALOGE("No CaptureResultExtras object is present!");
+            }
+            onDeviceError(errorCode, resultExtras);
             data.readExceptionCode();
             return NO_ERROR;
         } break;
@@ -118,23 +130,33 @@
         case CAPTURE_STARTED: {
             ALOGV("onCaptureStarted");
             CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
-            int32_t requestId = data.readInt32();
+            CaptureResultExtras result;
+            if (data.readInt32() != 0) {
+                result.readFromParcel(const_cast<Parcel*>(&data));
+            } else {
+                ALOGE("No CaptureResultExtras object is present in result!");
+            }
             int64_t timestamp = data.readInt64();
-            onCaptureStarted(requestId, timestamp);
+            onCaptureStarted(result, timestamp);
             data.readExceptionCode();
             return NO_ERROR;
         } break;
         case RESULT_RECEIVED: {
             ALOGV("onResultReceived");
             CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
-            int32_t requestId = data.readInt32();
-            CameraMetadata result;
+            CameraMetadata metadata;
             if (data.readInt32() != 0) {
-                result.readFromParcel(const_cast<Parcel*>(&data));
+                metadata.readFromParcel(const_cast<Parcel*>(&data));
             } else {
                 ALOGW("No metadata object is present in result");
             }
-            onResultReceived(requestId, result);
+            CaptureResultExtras resultExtras;
+            if (data.readInt32() != 0) {
+                resultExtras.readFromParcel(const_cast<Parcel*>(&data));
+            } else {
+                ALOGW("No capture result extras object is present in result");
+            }
+            onResultReceived(metadata, resultExtras);
             data.readExceptionCode();
             return NO_ERROR;
         } break;
diff --git a/camera/camera2/ICameraDeviceUser.cpp b/camera/camera2/ICameraDeviceUser.cpp
index 1e5822f..ad65955 100644
--- a/camera/camera2/ICameraDeviceUser.cpp
+++ b/camera/camera2/ICameraDeviceUser.cpp
@@ -35,6 +35,7 @@
 enum {
     DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
     SUBMIT_REQUEST,
+    SUBMIT_REQUEST_LIST,
     CANCEL_REQUEST,
     DELETE_STREAM,
     CREATE_STREAM,
@@ -75,7 +76,8 @@
         reply.readExceptionCode();
     }
 
-    virtual int submitRequest(sp<CaptureRequest> request, bool streaming)
+    virtual status_t submitRequest(sp<CaptureRequest> request, bool repeating,
+                              int64_t *lastFrameNumber)
     {
         Parcel data, reply;
         data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
@@ -89,15 +91,67 @@
         }
 
         // arg1 = streaming (bool)
-        data.writeInt32(streaming);
+        data.writeInt32(repeating);
 
         remote()->transact(SUBMIT_REQUEST, data, &reply);
 
         reply.readExceptionCode();
-        return reply.readInt32();
+        status_t res = reply.readInt32();
+
+        status_t resFrameNumber = BAD_VALUE;
+        if (reply.readInt32() != 0) {
+            if (lastFrameNumber != NULL) {
+                resFrameNumber = reply.readInt64(lastFrameNumber);
+            }
+        }
+
+	if ((res != NO_ERROR) || (resFrameNumber != NO_ERROR)) {
+            res = FAILED_TRANSACTION;
+        }
+        return res;
     }
 
-    virtual status_t cancelRequest(int requestId)
+    virtual status_t submitRequestList(List<sp<CaptureRequest> > requestList, bool repeating,
+                                  int64_t *lastFrameNumber)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+
+        data.writeInt32(requestList.size());
+
+        for (List<sp<CaptureRequest> >::iterator it = requestList.begin();
+                it != requestList.end(); ++it) {
+            sp<CaptureRequest> request = *it;
+            if (request != 0) {
+                data.writeInt32(1);
+                if (request->writeToParcel(&data) != OK) {
+                    return BAD_VALUE;
+                }
+            } else {
+                data.writeInt32(0);
+            }
+        }
+
+        data.writeInt32(repeating);
+
+        remote()->transact(SUBMIT_REQUEST_LIST, data, &reply);
+
+        reply.readExceptionCode();
+        status_t res = reply.readInt32();
+
+        status_t resFrameNumber = BAD_VALUE;
+        if (reply.readInt32() != 0) {
+            if (lastFrameNumber != NULL) {
+                resFrameNumber = reply.readInt64(lastFrameNumber);
+            }
+        }
+        if ((res != NO_ERROR) || (resFrameNumber != NO_ERROR)) {
+            res = FAILED_TRANSACTION;
+        }
+        return res;
+    }
+
+    virtual status_t cancelRequest(int requestId, int64_t *lastFrameNumber)
     {
         Parcel data, reply;
         data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
@@ -106,7 +160,18 @@
         remote()->transact(CANCEL_REQUEST, data, &reply);
 
         reply.readExceptionCode();
-        return reply.readInt32();
+        status_t res = reply.readInt32();
+
+        status_t resFrameNumber = BAD_VALUE;
+        if (reply.readInt32() != 0) {
+            if (lastFrameNumber != NULL) {
+                res = reply.readInt64(lastFrameNumber);
+            }
+        }
+        if ((res != NO_ERROR) || (resFrameNumber != NO_ERROR)) {
+            res = FAILED_TRANSACTION;
+        }
+        return res;
     }
 
     virtual status_t deleteStream(int streamId)
@@ -197,14 +262,25 @@
         return reply.readInt32();
     }
 
-    virtual status_t flush()
+    virtual status_t flush(int64_t *lastFrameNumber)
     {
         ALOGV("flush");
         Parcel data, reply;
         data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
         remote()->transact(FLUSH, data, &reply);
         reply.readExceptionCode();
-        return reply.readInt32();
+        status_t res = reply.readInt32();
+
+        status_t resFrameNumber = BAD_VALUE;
+        if (reply.readInt32() != 0) {
+            if (lastFrameNumber != NULL) {
+                res = reply.readInt64(lastFrameNumber);
+            }
+        }
+        if ((res != NO_ERROR) || (resFrameNumber != NO_ERROR)) {
+            res = FAILED_TRANSACTION;
+        }
+        return res;
     }
 
 private:
@@ -239,11 +315,43 @@
             }
 
             // arg1 = streaming (bool)
-            bool streaming = data.readInt32();
+            bool repeating = data.readInt32();
 
             // return code: requestId (int32)
             reply->writeNoException();
-            reply->writeInt32(submitRequest(request, streaming));
+            int64_t lastFrameNumber = -1;
+            reply->writeInt32(submitRequest(request, repeating, &lastFrameNumber));
+            reply->writeInt32(1);
+            reply->writeInt64(lastFrameNumber);
+
+            return NO_ERROR;
+        } break;
+        case SUBMIT_REQUEST_LIST: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+
+            List<sp<CaptureRequest> > requestList;
+            int requestListSize = data.readInt32();
+            for (int i = 0; i < requestListSize; i++) {
+                if (data.readInt32() != 0) {
+                    sp<CaptureRequest> request = new CaptureRequest();
+                    if (request->readFromParcel(const_cast<Parcel*>(&data)) != OK) {
+                        return BAD_VALUE;
+                    }
+                    requestList.push_back(request);
+                } else {
+                    sp<CaptureRequest> request = 0;
+                    requestList.push_back(request);
+                    ALOGE("A request is missing. Sending in null request.");
+                }
+            }
+
+            bool repeating = data.readInt32();
+
+            reply->writeNoException();
+            int64_t lastFrameNumber = -1;
+            reply->writeInt32(submitRequestList(requestList, repeating, &lastFrameNumber));
+            reply->writeInt32(1);
+            reply->writeInt64(lastFrameNumber);
 
             return NO_ERROR;
         } break;
@@ -251,7 +359,10 @@
             CHECK_INTERFACE(ICameraDeviceUser, data, reply);
             int requestId = data.readInt32();
             reply->writeNoException();
-            reply->writeInt32(cancelRequest(requestId));
+            int64_t lastFrameNumber = -1;
+            reply->writeInt32(cancelRequest(requestId, &lastFrameNumber));
+            reply->writeInt32(1);
+            reply->writeInt64(lastFrameNumber);
             return NO_ERROR;
         } break;
         case DELETE_STREAM: {
@@ -339,7 +450,10 @@
         case FLUSH: {
             CHECK_INTERFACE(ICameraDeviceUser, data, reply);
             reply->writeNoException();
-            reply->writeInt32(flush());
+            int64_t lastFrameNumber = -1;
+            reply->writeInt32(flush(&lastFrameNumber));
+            reply->writeInt32(1);
+            reply->writeInt64(lastFrameNumber);
             return NO_ERROR;
         }
         default:
diff --git a/camera/tests/Android.mk b/camera/tests/Android.mk
index ec13911..61385e5 100644
--- a/camera/tests/Android.mk
+++ b/camera/tests/Android.mk
@@ -1,9 +1,24 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 LOCAL_PATH:= $(call my-dir)
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
 	main.cpp \
 	ProCameraTests.cpp \
+	VendorTagDescriptorTests.cpp
 
 LOCAL_SHARED_LIBRARIES := \
 	libutils \
@@ -26,6 +41,8 @@
 	external/gtest/include \
 	external/stlport/stlport \
 	system/media/camera/include \
+	system/media/private/camera/include \
+	system/media/camera/tests \
 	frameworks/av/services/camera/libcameraservice \
 	frameworks/av/include/camera \
 	frameworks/native/include \
diff --git a/camera/tests/VendorTagDescriptorTests.cpp b/camera/tests/VendorTagDescriptorTests.cpp
new file mode 100644
index 0000000..6624e79
--- /dev/null
+++ b/camera/tests/VendorTagDescriptorTests.cpp
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "VendorTagDescriptorTests"
+
+#include <binder/Parcel.h>
+#include <camera/VendorTagDescriptor.h>
+#include <camera_metadata_tests_fake_vendor.h>
+#include <camera_metadata_hidden.h>
+#include <system/camera_vendor_tags.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/RefBase.h>
+
+#include <gtest/gtest.h>
+#include <stdint.h>
+
+using namespace android;
+
+enum {
+    BAD_TAG_ARRAY = 0xDEADBEEFu,
+    BAD_TAG = 0x8DEADBADu,
+};
+
+#define ARRAY_SIZE(a)      (sizeof(a) / sizeof((a)[0]))
+
+static bool ContainsTag(uint32_t* tagArray, size_t size, uint32_t tag) {
+    for (size_t i = 0; i < size; ++i) {
+        if (tag == tagArray[i]) return true;
+    }
+    return false;
+}
+
+#define EXPECT_CONTAINS_TAG(t, a) \
+    EXPECT_TRUE(ContainsTag(a, ARRAY_SIZE(a), t))
+
+#define ASSERT_NOT_NULL(x) \
+    ASSERT_TRUE((x) != NULL)
+
+extern "C" {
+
+static int default_get_tag_count(const vendor_tag_ops_t* vOps) {
+    return VENDOR_TAG_COUNT_ERR;
+}
+
+static void default_get_all_tags(const vendor_tag_ops_t* vOps, uint32_t* tagArray) {
+    //Noop
+}
+
+static const char* default_get_section_name(const vendor_tag_ops_t* vOps, uint32_t tag) {
+    return VENDOR_SECTION_NAME_ERR;
+}
+
+static const char* default_get_tag_name(const vendor_tag_ops_t* vOps, uint32_t tag) {
+    return VENDOR_TAG_NAME_ERR;
+}
+
+static int default_get_tag_type(const vendor_tag_ops_t* vOps, uint32_t tag) {
+    return VENDOR_TAG_TYPE_ERR;
+}
+
+} /*extern "C"*/
+
+// Set default vendor operations for a vendor_tag_ops struct
+static void FillWithDefaults(vendor_tag_ops_t* vOps) {
+    ASSERT_NOT_NULL(vOps);
+    vOps->get_tag_count = default_get_tag_count;
+    vOps->get_all_tags = default_get_all_tags;
+    vOps->get_section_name = default_get_section_name;
+    vOps->get_tag_name = default_get_tag_name;
+    vOps->get_tag_type = default_get_tag_type;
+}
+
+/**
+ * Test if values from VendorTagDescriptor methods match corresponding values
+ * from vendor_tag_ops functions.
+ */
+TEST(VendorTagDescriptorTest, ConsistentWithVendorTags) {
+    sp<VendorTagDescriptor> vDesc;
+    const vendor_tag_ops_t *vOps = &fakevendor_ops;
+    EXPECT_EQ(OK, VendorTagDescriptor::createDescriptorFromOps(vOps, /*out*/vDesc));
+
+    ASSERT_NOT_NULL(vDesc);
+
+    // Ensure reasonable tag count
+    int tagCount = vDesc->getTagCount();
+    EXPECT_EQ(tagCount, vOps->get_tag_count(vOps));
+
+    uint32_t descTagArray[tagCount];
+    uint32_t opsTagArray[tagCount];
+
+    // Get all tag ids
+    vDesc->getTagArray(descTagArray);
+    vOps->get_all_tags(vOps, opsTagArray);
+
+    ASSERT_NOT_NULL(descTagArray);
+    ASSERT_NOT_NULL(opsTagArray);
+
+    uint32_t tag;
+    for (int i = 0; i < tagCount; ++i) {
+        // For each tag id, check whether type, section name, tag name match
+        tag = descTagArray[i];
+        EXPECT_CONTAINS_TAG(tag, opsTagArray);
+        EXPECT_EQ(vDesc->getTagType(tag), vOps->get_tag_type(vOps, tag));
+        EXPECT_STREQ(vDesc->getSectionName(tag), vOps->get_section_name(vOps, tag));
+        EXPECT_STREQ(vDesc->getTagName(tag), vOps->get_tag_name(vOps, tag));
+    }
+}
+
+/**
+ * Test if values from VendorTagDescriptor methods stay consistent after being
+ * parcelled/unparcelled.
+ */
+TEST(VendorTagDescriptorTest, ConsistentAcrossParcel) {
+    sp<VendorTagDescriptor> vDescOriginal, vDescParceled;
+    const vendor_tag_ops_t *vOps = &fakevendor_ops;
+    EXPECT_EQ(OK, VendorTagDescriptor::createDescriptorFromOps(vOps, /*out*/vDescOriginal));
+
+    ASSERT_TRUE(vDescOriginal != NULL);
+
+    Parcel p;
+
+    // Check whether parcel read/write succeed
+    EXPECT_EQ(OK, vDescOriginal->writeToParcel(&p));
+    p.setDataPosition(0);
+    ASSERT_EQ(OK, VendorTagDescriptor::createFromParcel(&p, vDescParceled));
+
+    // Ensure consistent tag count
+    int tagCount = vDescOriginal->getTagCount();
+    ASSERT_EQ(tagCount, vDescParceled->getTagCount());
+
+    uint32_t descTagArray[tagCount];
+    uint32_t desc2TagArray[tagCount];
+
+    // Get all tag ids
+    vDescOriginal->getTagArray(descTagArray);
+    vDescParceled->getTagArray(desc2TagArray);
+
+    ASSERT_NOT_NULL(descTagArray);
+    ASSERT_NOT_NULL(desc2TagArray);
+
+    uint32_t tag;
+    for (int i = 0; i < tagCount; ++i) {
+        // For each tag id, check consistency between the two vendor tag
+        // descriptors for each type, section name, tag name
+        tag = descTagArray[i];
+        EXPECT_CONTAINS_TAG(tag, desc2TagArray);
+        EXPECT_EQ(vDescOriginal->getTagType(tag), vDescParceled->getTagType(tag));
+        EXPECT_STREQ(vDescOriginal->getSectionName(tag), vDescParceled->getSectionName(tag));
+        EXPECT_STREQ(vDescOriginal->getTagName(tag), vDescParceled->getTagName(tag));
+    }
+}
+
+/**
+ * Test defaults and error conditions.
+ */
+TEST(VendorTagDescriptorTest, ErrorConditions) {
+    sp<VendorTagDescriptor> vDesc;
+    vendor_tag_ops_t vOps;
+    FillWithDefaults(&vOps);
+
+    // Ensure create fails when using null vOps
+    EXPECT_EQ(BAD_VALUE, VendorTagDescriptor::createDescriptorFromOps(/*vOps*/NULL, vDesc));
+
+    // Ensure create works when there are no vtags defined in a well-formed vOps
+    ASSERT_EQ(OK, VendorTagDescriptor::createDescriptorFromOps(&vOps, vDesc));
+
+    // Ensure defaults are returned when no vtags are defined, or tag is unknown
+    EXPECT_EQ(VENDOR_TAG_COUNT_ERR, vDesc->getTagCount());
+    uint32_t* tagArray = reinterpret_cast<uint32_t*>(BAD_TAG_ARRAY);
+    uint32_t* testArray = tagArray;
+    vDesc->getTagArray(tagArray);
+    EXPECT_EQ(testArray, tagArray);
+    EXPECT_EQ(VENDOR_SECTION_NAME_ERR, vDesc->getSectionName(BAD_TAG));
+    EXPECT_EQ(VENDOR_TAG_NAME_ERR, vDesc->getTagName(BAD_TAG));
+    EXPECT_EQ(VENDOR_TAG_TYPE_ERR, vDesc->getTagType(BAD_TAG));
+
+    // Make sure global can be set/cleared
+    const vendor_tag_ops_t *fakeOps = &fakevendor_ops;
+    sp<VendorTagDescriptor> prevGlobal = VendorTagDescriptor::getGlobalVendorTagDescriptor();
+    VendorTagDescriptor::clearGlobalVendorTagDescriptor();
+
+    EXPECT_TRUE(VendorTagDescriptor::getGlobalVendorTagDescriptor() == NULL);
+    EXPECT_EQ(OK, VendorTagDescriptor::setAsGlobalVendorTagDescriptor(vDesc));
+    EXPECT_TRUE(VendorTagDescriptor::getGlobalVendorTagDescriptor() != NULL);
+    EXPECT_EQ(VENDOR_SECTION_NAME_ERR, vDesc->getSectionName(BAD_TAG));
+    EXPECT_EQ(OK, VendorTagDescriptor::setAsGlobalVendorTagDescriptor(prevGlobal));
+    EXPECT_EQ(prevGlobal, VendorTagDescriptor::getGlobalVendorTagDescriptor());
+}
+
diff --git a/cmds/screenrecord/Android.mk b/cmds/screenrecord/Android.mk
index d77fdb6..6747e60 100644
--- a/cmds/screenrecord/Android.mk
+++ b/cmds/screenrecord/Android.mk
@@ -19,6 +19,7 @@
 LOCAL_SRC_FILES := \
 	screenrecord.cpp \
 	EglWindow.cpp \
+	FrameOutput.cpp \
 	TextRenderer.cpp \
 	Overlay.cpp \
 	Program.cpp
diff --git a/cmds/screenrecord/EglWindow.cpp b/cmds/screenrecord/EglWindow.cpp
index aa0517f..c16f2ad 100644
--- a/cmds/screenrecord/EglWindow.cpp
+++ b/cmds/screenrecord/EglWindow.cpp
@@ -35,11 +35,16 @@
 
 
 status_t EglWindow::createWindow(const sp<IGraphicBufferProducer>& surface) {
-    status_t err = eglSetupContext();
+    if (mEglSurface != EGL_NO_SURFACE) {
+        ALOGE("surface already created");
+        return UNKNOWN_ERROR;
+    }
+    status_t err = eglSetupContext(false);
     if (err != NO_ERROR) {
         return err;
     }
 
+    // Cache the current dimensions.  We're not expecting these to change.
     surface->query(NATIVE_WINDOW_WIDTH, &mWidth);
     surface->query(NATIVE_WINDOW_HEIGHT, &mHeight);
 
@@ -56,6 +61,34 @@
     return NO_ERROR;
 }
 
+status_t EglWindow::createPbuffer(int width, int height) {
+    if (mEglSurface != EGL_NO_SURFACE) {
+        ALOGE("surface already created");
+        return UNKNOWN_ERROR;
+    }
+    status_t err = eglSetupContext(true);
+    if (err != NO_ERROR) {
+        return err;
+    }
+
+    mWidth = width;
+    mHeight = height;
+
+    EGLint pbufferAttribs[] = {
+            EGL_WIDTH, width,
+            EGL_HEIGHT, height,
+            EGL_NONE
+    };
+    mEglSurface = eglCreatePbufferSurface(mEglDisplay, mEglConfig, pbufferAttribs);
+    if (mEglSurface == EGL_NO_SURFACE) {
+        ALOGE("eglCreatePbufferSurface error: %#x", eglGetError());
+        eglRelease();
+        return UNKNOWN_ERROR;
+    }
+
+    return NO_ERROR;
+}
+
 status_t EglWindow::makeCurrent() const {
     if (!eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface, mEglContext)) {
         ALOGE("eglMakeCurrent failed: %#x", eglGetError());
@@ -64,7 +97,7 @@
     return NO_ERROR;
 }
 
-status_t EglWindow::eglSetupContext() {
+status_t EglWindow::eglSetupContext(bool forPbuffer) {
     EGLBoolean result;
 
     mEglDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY);
@@ -82,17 +115,28 @@
     ALOGV("Initialized EGL v%d.%d", majorVersion, minorVersion);
 
     EGLint numConfigs = 0;
-    EGLint configAttribs[] = {
-        EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
-        EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
-        EGL_RECORDABLE_ANDROID, 1,
-        EGL_RED_SIZE, 8,
-        EGL_GREEN_SIZE, 8,
-        EGL_BLUE_SIZE, 8,
-        EGL_NONE
+    EGLint windowConfigAttribs[] = {
+            EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
+            EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+            EGL_RECORDABLE_ANDROID, 1,
+            EGL_RED_SIZE, 8,
+            EGL_GREEN_SIZE, 8,
+            EGL_BLUE_SIZE, 8,
+            // no alpha
+            EGL_NONE
     };
-    result = eglChooseConfig(mEglDisplay, configAttribs, &mEglConfig, 1,
-            &numConfigs);
+    EGLint pbufferConfigAttribs[] = {
+            EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
+            EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+            EGL_RED_SIZE, 8,
+            EGL_GREEN_SIZE, 8,
+            EGL_BLUE_SIZE, 8,
+            EGL_ALPHA_SIZE, 8,
+            EGL_NONE
+    };
+    result = eglChooseConfig(mEglDisplay,
+            forPbuffer ? pbufferConfigAttribs : windowConfigAttribs,
+            &mEglConfig, 1, &numConfigs);
     if (result != EGL_TRUE) {
         ALOGE("eglChooseConfig error: %#x", eglGetError());
         return UNKNOWN_ERROR;
diff --git a/cmds/screenrecord/EglWindow.h b/cmds/screenrecord/EglWindow.h
index 02a2efc..69d0c31 100644
--- a/cmds/screenrecord/EglWindow.h
+++ b/cmds/screenrecord/EglWindow.h
@@ -44,6 +44,9 @@
     // Creates an EGL window for the supplied surface.
     status_t createWindow(const sp<IGraphicBufferProducer>& surface);
 
+    // Creates an EGL pbuffer surface.
+    status_t createPbuffer(int width, int height);
+
     // Return width and height values (obtained from IGBP).
     int getWidth() const { return mWidth; }
     int getHeight() const { return mHeight; }
@@ -65,7 +68,7 @@
     EglWindow& operator=(const EglWindow&);
 
     // Init display, create config and context.
-    status_t eglSetupContext();
+    status_t eglSetupContext(bool forPbuffer);
     void eglRelease();
 
     // Basic EGL goodies.
diff --git a/cmds/screenrecord/FrameOutput.cpp b/cmds/screenrecord/FrameOutput.cpp
new file mode 100644
index 0000000..06b1f70
--- /dev/null
+++ b/cmds/screenrecord/FrameOutput.cpp
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ScreenRecord"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "FrameOutput.h"
+
+using namespace android;
+
+static const bool kShowTiming = false;      // set to "true" for debugging
+static const int kGlBytesPerPixel = 4;      // GL_RGBA
+static const int kOutBytesPerPixel = 3;     // RGB only
+
+inline void FrameOutput::setValueLE(uint8_t* buf, uint32_t value) {
+    // Since we're running on an Android device, we're (almost) guaranteed
+    // to be little-endian, and (almost) guaranteed that unaligned 32-bit
+    // writes will work without any performance penalty... but do it
+    // byte-by-byte anyway.
+    buf[0] = (uint8_t) value;
+    buf[1] = (uint8_t) (value >> 8);
+    buf[2] = (uint8_t) (value >> 16);
+    buf[3] = (uint8_t) (value >> 24);
+}
+
+status_t FrameOutput::createInputSurface(int width, int height,
+        sp<IGraphicBufferProducer>* pBufferProducer) {
+    status_t err;
+
+    err = mEglWindow.createPbuffer(width, height);
+    if (err != NO_ERROR) {
+        return err;
+    }
+    mEglWindow.makeCurrent();
+
+    glViewport(0, 0, width, height);
+    glDisable(GL_DEPTH_TEST);
+    glDisable(GL_CULL_FACE);
+
+    // Shader for rendering the external texture.
+    err = mExtTexProgram.setup(Program::PROGRAM_EXTERNAL_TEXTURE);
+    if (err != NO_ERROR) {
+        return err;
+    }
+
+    // Input side (buffers from virtual display).
+    glGenTextures(1, &mExtTextureName);
+    if (mExtTextureName == 0) {
+        ALOGE("glGenTextures failed: %#x", glGetError());
+        return UNKNOWN_ERROR;
+    }
+
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    mGlConsumer = new GLConsumer(consumer, mExtTextureName,
+                GL_TEXTURE_EXTERNAL_OES);
+    mGlConsumer->setName(String8("virtual display"));
+    mGlConsumer->setDefaultBufferSize(width, height);
+    mGlConsumer->setDefaultMaxBufferCount(5);
+    mGlConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_TEXTURE);
+
+    mGlConsumer->setFrameAvailableListener(this);
+
+    mPixelBuf = new uint8_t[width * height * kGlBytesPerPixel];
+
+    *pBufferProducer = producer;
+
+    ALOGD("FrameOutput::createInputSurface OK");
+    return NO_ERROR;
+}
+
+status_t FrameOutput::copyFrame(FILE* fp, long timeoutUsec) {
+    Mutex::Autolock _l(mMutex);
+    ALOGV("copyFrame %ld\n", timeoutUsec);
+
+    if (!mFrameAvailable) {
+        nsecs_t timeoutNsec = (nsecs_t)timeoutUsec * 1000;
+        int cc = mEventCond.waitRelative(mMutex, timeoutNsec);
+        if (cc == -ETIMEDOUT) {
+            ALOGV("cond wait timed out");
+            return ETIMEDOUT;
+        } else if (cc != 0) {
+            ALOGW("cond wait returned error %d", cc);
+            return cc;
+        }
+    }
+    if (!mFrameAvailable) {
+        // This happens when Ctrl-C is hit.  Apparently POSIX says that the
+        // pthread wait call doesn't return EINTR, treating this instead as
+        // an instance of a "spurious wakeup".  We didn't get a frame, so
+        // we just treat it as a timeout.
+        return ETIMEDOUT;
+    }
+
+    // A frame is available.  Clear the flag for the next round.
+    mFrameAvailable = false;
+
+    float texMatrix[16];
+    mGlConsumer->updateTexImage();
+    mGlConsumer->getTransformMatrix(texMatrix);
+
+    // The data is in an external texture, so we need to render it to the
+    // pbuffer to get access to RGB pixel data.  We also want to flip it
+    // upside-down for easy conversion to a bitmap.
+    int width = mEglWindow.getWidth();
+    int height = mEglWindow.getHeight();
+    status_t err = mExtTexProgram.blit(mExtTextureName, texMatrix, 0, 0,
+            width, height, true);
+    if (err != NO_ERROR) {
+        return err;
+    }
+
+    // GLES only guarantees that glReadPixels() will work with GL_RGBA, so we
+    // need to get 4 bytes/pixel and reduce it.  Depending on the size of the
+    // screen and the device capabilities, this can take a while.
+    int64_t startWhenNsec, pixWhenNsec, endWhenNsec;
+    if (kShowTiming) {
+        startWhenNsec = systemTime(CLOCK_MONOTONIC);
+    }
+    GLenum glErr;
+    glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, mPixelBuf);
+    if ((glErr = glGetError()) != GL_NO_ERROR) {
+        ALOGE("glReadPixels failed: %#x", glErr);
+        return UNKNOWN_ERROR;
+    }
+    if (kShowTiming) {
+        pixWhenNsec = systemTime(CLOCK_MONOTONIC);
+    }
+    reduceRgbaToRgb(mPixelBuf, width * height);
+    if (kShowTiming) {
+        endWhenNsec = systemTime(CLOCK_MONOTONIC);
+        ALOGD("got pixels (get=%.3f ms, reduce=%.3fms)",
+                (pixWhenNsec - startWhenNsec) / 1000000.0,
+                (endWhenNsec - pixWhenNsec) / 1000000.0);
+    }
+
+    // Fill out the header.
+    size_t headerLen = sizeof(uint32_t) * 5;
+    size_t rgbDataLen = width * height * kOutBytesPerPixel;
+    size_t packetLen = headerLen - sizeof(uint32_t) + rgbDataLen;
+    uint8_t header[headerLen];
+    setValueLE(&header[0], packetLen);
+    setValueLE(&header[4], width);
+    setValueLE(&header[8], height);
+    setValueLE(&header[12], width * kOutBytesPerPixel);
+    setValueLE(&header[16], HAL_PIXEL_FORMAT_RGB_888);
+
+    // Currently using buffered I/O rather than writev().  Not expecting it
+    // to make much of a difference, but it might be worth a test for larger
+    // frame sizes.
+    if (kShowTiming) {
+        startWhenNsec = systemTime(CLOCK_MONOTONIC);
+    }
+    fwrite(header, 1, headerLen, fp);
+    fwrite(mPixelBuf, 1, rgbDataLen, fp);
+    fflush(fp);
+    if (kShowTiming) {
+        endWhenNsec = systemTime(CLOCK_MONOTONIC);
+        ALOGD("wrote pixels (%.3f ms)",
+                (endWhenNsec - startWhenNsec) / 1000000.0);
+    }
+
+    if (ferror(fp)) {
+        // errno may not be useful; log it anyway
+        ALOGE("write failed (errno=%d)", errno);
+        return UNKNOWN_ERROR;
+    }
+
+    return NO_ERROR;
+}
+
+void FrameOutput::reduceRgbaToRgb(uint8_t* buf, unsigned int pixelCount) {
+    // Convert RGBA to RGB.
+    //
+    // Unaligned 32-bit accesses are allowed on ARM, so we could do this
+    // with 32-bit copies advancing at different rates (taking care at the
+    // end to not go one byte over).
+    const uint8_t* readPtr = buf;
+    for (unsigned int i = 0; i < pixelCount; i++) {
+        *buf++ = *readPtr++;
+        *buf++ = *readPtr++;
+        *buf++ = *readPtr++;
+        readPtr++;
+    }
+}
+
+// Callback; executes on arbitrary thread.
+void FrameOutput::onFrameAvailable() {
+    Mutex::Autolock _l(mMutex);
+    mFrameAvailable = true;
+    mEventCond.signal();
+}
diff --git a/cmds/screenrecord/FrameOutput.h b/cmds/screenrecord/FrameOutput.h
new file mode 100644
index 0000000..c1148d0
--- /dev/null
+++ b/cmds/screenrecord/FrameOutput.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SCREENRECORD_FRAMEOUTPUT_H
+#define SCREENRECORD_FRAMEOUTPUT_H
+
+#include "Program.h"
+#include "EglWindow.h"
+
+#include <gui/BufferQueue.h>
+#include <gui/GLConsumer.h>
+
+namespace android {
+
+/*
+ * Support for "frames" output format.
+ */
+class FrameOutput : public GLConsumer::FrameAvailableListener {
+public:
+    FrameOutput() : mFrameAvailable(false),
+        mExtTextureName(0),
+        mPixelBuf(NULL)
+        {}
+
+    // Create an "input surface", similar in purpose to a MediaCodec input
+    // surface, that the virtual display can send buffers to.  Also configures
+    // EGL with a pbuffer surface on the current thread.
+    status_t createInputSurface(int width, int height,
+            sp<IGraphicBufferProducer>* pBufferProducer);
+
+    // Copy one from input to output.  If no frame is available, this will wait up to the
+    // specified number of microseconds.
+    //
+    // Returns ETIMEDOUT if the timeout expired before we found a frame.
+    status_t copyFrame(FILE* fp, long timeoutUsec);
+
+    // Prepare to copy frames.  Makes the EGL context used by this object current.
+    void prepareToCopy() {
+        mEglWindow.makeCurrent();
+    }
+
+private:
+    FrameOutput(const FrameOutput&);
+    FrameOutput& operator=(const FrameOutput&);
+
+    // Destruction via RefBase.
+    virtual ~FrameOutput() {
+        delete[] mPixelBuf;
+    }
+
+    // (overrides GLConsumer::FrameAvailableListener method)
+    virtual void onFrameAvailable();
+
+    // Reduces RGBA to RGB, in place.
+    static void reduceRgbaToRgb(uint8_t* buf, unsigned int pixelCount);
+
+    // Put a 32-bit value into a buffer, in little-endian byte order.
+    static void setValueLE(uint8_t* buf, uint32_t value);
+
+    // Used to wait for the FrameAvailableListener callback.
+    Mutex mMutex;
+    Condition mEventCond;
+
+    // Set by the FrameAvailableListener callback.
+    bool mFrameAvailable;
+
+    // This receives frames from the virtual display and makes them available
+    // as an external texture.
+    sp<GLConsumer> mGlConsumer;
+
+    // EGL display / context / surface.
+    EglWindow mEglWindow;
+
+    // GL rendering support.
+    Program mExtTexProgram;
+
+    // External texture, updated by GLConsumer.
+    GLuint mExtTextureName;
+
+    // Pixel data buffer.
+    uint8_t* mPixelBuf;
+};
+
+}; // namespace android
+
+#endif /*SCREENRECORD_FRAMEOUTPUT_H*/
diff --git a/cmds/screenrecord/Overlay.cpp b/cmds/screenrecord/Overlay.cpp
index 2e98874..94f560d 100644
--- a/cmds/screenrecord/Overlay.cpp
+++ b/cmds/screenrecord/Overlay.cpp
@@ -84,7 +84,7 @@
     assert(mState == RUNNING);
 
     ALOGV("Overlay::start successful");
-    *pBufferProducer = mBufferQueue;
+    *pBufferProducer = mProducer;
     return NO_ERROR;
 }
 
@@ -169,8 +169,9 @@
         return UNKNOWN_ERROR;
     }
 
-    mBufferQueue = new BufferQueue(/*new GraphicBufferAlloc()*/);
-    mGlConsumer = new GLConsumer(mBufferQueue, mExtTextureName,
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&mProducer, &consumer);
+    mGlConsumer = new GLConsumer(consumer, mExtTextureName,
                 GL_TEXTURE_EXTERNAL_OES);
     mGlConsumer->setName(String8("virtual display"));
     mGlConsumer->setDefaultBufferSize(width, height);
@@ -187,7 +188,7 @@
     ALOGV("Overlay::release_l");
     mOutputSurface.clear();
     mGlConsumer.clear();
-    mBufferQueue.clear();
+    mProducer.clear();
 
     mTexProgram.release();
     mExtTexProgram.release();
diff --git a/cmds/screenrecord/Overlay.h b/cmds/screenrecord/Overlay.h
index b8473b4..b1b5c29 100644
--- a/cmds/screenrecord/Overlay.h
+++ b/cmds/screenrecord/Overlay.h
@@ -47,7 +47,6 @@
         mLastFrameNumber(-1),
         mTotalDroppedFrames(0)
         {}
-    virtual ~Overlay() { assert(mState == UNINITIALIZED || mState == STOPPED); }
 
     // Creates a thread that performs the overlay.  Pass in the surface that
     // output will be sent to.
@@ -71,6 +70,9 @@
     Overlay(const Overlay&);
     Overlay& operator=(const Overlay&);
 
+    // Destruction via RefBase.
+    virtual ~Overlay() { assert(mState == UNINITIALIZED || mState == STOPPED); }
+
     // Draw the initial info screen.
     static void doDrawInfoPage(const EglWindow& window,
             const Program& texRender, TextRenderer& textRenderer);
@@ -120,9 +122,9 @@
     // surface.
     sp<IGraphicBufferProducer> mOutputSurface;
 
-    // Our queue.  The producer side is passed to the virtual display, the
-    // consumer side feeds into our GLConsumer.
-    sp<BufferQueue> mBufferQueue;
+    // Producer side of queue, passed into the virtual display.
+    // The consumer end feeds into our GLConsumer.
+    sp<IGraphicBufferProducer> mProducer;
 
     // This receives frames from the virtual display and makes them available
     // as an external texture.
diff --git a/cmds/screenrecord/Program.cpp b/cmds/screenrecord/Program.cpp
index a198204..73cae6e 100644
--- a/cmds/screenrecord/Program.cpp
+++ b/cmds/screenrecord/Program.cpp
@@ -201,7 +201,7 @@
 
 
 status_t Program::blit(GLuint texName, const float* texMatrix,
-        int32_t x, int32_t y, int32_t w, int32_t h) const {
+        int32_t x, int32_t y, int32_t w, int32_t h, bool invert) const {
     ALOGV("Program::blit %d xy=%d,%d wh=%d,%d", texName, x, y, w, h);
 
     const float pos[] = {
@@ -218,7 +218,7 @@
     };
     status_t err;
 
-    err = beforeDraw(texName, texMatrix, pos, uv);
+    err = beforeDraw(texName, texMatrix, pos, uv, invert);
     if (err == NO_ERROR) {
         glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
         err = afterDraw();
@@ -232,7 +232,7 @@
 
     status_t err;
 
-    err = beforeDraw(texName, texMatrix, vertices, texes);
+    err = beforeDraw(texName, texMatrix, vertices, texes, false);
     if (err == NO_ERROR) {
         glDrawArrays(GL_TRIANGLES, 0, count);
         err = afterDraw();
@@ -241,7 +241,7 @@
 }
 
 status_t Program::beforeDraw(GLuint texName, const float* texMatrix,
-        const float* vertices, const float* texes) const {
+        const float* vertices, const float* texes, bool invert) const {
     // Create an orthographic projection matrix based on viewport size.
     GLint vp[4];
     glGetIntegerv(GL_VIEWPORT, vp);
@@ -251,6 +251,10 @@
         0.0f,               0.0f,               1.0f,   0.0f,
         -1.0f,              1.0f,               0.0f,   1.0f,
     };
+    if (invert) {
+        screenToNdc[5] = -screenToNdc[5];
+        screenToNdc[13] = -screenToNdc[13];
+    }
 
     glUseProgram(mProgram);
 
diff --git a/cmds/screenrecord/Program.h b/cmds/screenrecord/Program.h
index e47bc0d..558be8d 100644
--- a/cmds/screenrecord/Program.h
+++ b/cmds/screenrecord/Program.h
@@ -51,9 +51,11 @@
     // Release the program and associated resources.
     void release();
 
-    // Blit the specified texture to { x, y, x+w, y+h }.
+    // Blit the specified texture to { x, y, x+w, y+h }.  Inverts the
+    // content if "invert" is set.
     status_t blit(GLuint texName, const float* texMatrix,
-            int32_t x, int32_t y, int32_t w, int32_t h) const;
+            int32_t x, int32_t y, int32_t w, int32_t h,
+            bool invert = false) const;
 
     // Draw a number of triangles.
     status_t drawTriangles(GLuint texName, const float* texMatrix,
@@ -67,7 +69,7 @@
 
     // Common code for draw functions.
     status_t beforeDraw(GLuint texName, const float* texMatrix,
-            const float* vertices, const float* texes) const;
+            const float* vertices, const float* texes, bool invert) const;
     status_t afterDraw() const;
 
     // GLES 2 shader utilities.
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 61f83e3..a17fc51 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -45,10 +45,12 @@
 #include <signal.h>
 #include <getopt.h>
 #include <sys/wait.h>
+#include <termios.h>
 #include <assert.h>
 
 #include "screenrecord.h"
 #include "Overlay.h"
+#include "FrameOutput.h"
 
 using namespace android;
 
@@ -57,10 +59,14 @@
 static const uint32_t kMaxTimeLimitSec = 180;       // 3 minutes
 static const uint32_t kFallbackWidth = 1280;        // 720p
 static const uint32_t kFallbackHeight = 720;
+static const char* kMimeTypeAvc = "video/avc";
 
 // Command-line parameters.
 static bool gVerbose = false;           // chatty on stdout
 static bool gRotate = false;            // rotate 90 degrees
+static enum {
+    FORMAT_MP4, FORMAT_H264, FORMAT_FRAMES
+} gOutputFormat = FORMAT_MP4;           // data format for output
 static bool gSizeSpecified = false;     // was size explicitly requested?
 static bool gWantInfoScreen = false;    // do we want initial info screen?
 static bool gWantFrameTime = false;     // do we want times on each frame?
@@ -140,14 +146,14 @@
     status_t err;
 
     if (gVerbose) {
-        printf("Configuring recorder for %dx%d video at %.2fMbps\n",
-                gVideoWidth, gVideoHeight, gBitRate / 1000000.0);
+        printf("Configuring recorder for %dx%d %s at %.2fMbps\n",
+                gVideoWidth, gVideoHeight, kMimeTypeAvc, gBitRate / 1000000.0);
     }
 
     sp<AMessage> format = new AMessage;
     format->setInt32("width", gVideoWidth);
     format->setInt32("height", gVideoHeight);
-    format->setString("mime", "video/avc");
+    format->setString("mime", kMimeTypeAvc);
     format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
     format->setInt32("bitrate", gBitRate);
     format->setFloat("frame-rate", displayFps);
@@ -157,16 +163,18 @@
     looper->setName("screenrecord_looper");
     looper->start();
     ALOGV("Creating codec");
-    sp<MediaCodec> codec = MediaCodec::CreateByType(looper, "video/avc", true);
+    sp<MediaCodec> codec = MediaCodec::CreateByType(looper, kMimeTypeAvc, true);
     if (codec == NULL) {
-        fprintf(stderr, "ERROR: unable to create video/avc codec instance\n");
+        fprintf(stderr, "ERROR: unable to create %s codec instance\n",
+                kMimeTypeAvc);
         return UNKNOWN_ERROR;
     }
 
     err = codec->configure(format, NULL, NULL,
             MediaCodec::CONFIGURE_FLAG_ENCODE);
     if (err != NO_ERROR) {
-        fprintf(stderr, "ERROR: unable to configure codec (err=%d)\n", err);
+        fprintf(stderr, "ERROR: unable to configure %s codec at %dx%d (err=%d)\n",
+                kMimeTypeAvc, gVideoWidth, gVideoHeight, err);
         codec->release();
         return err;
     }
@@ -298,10 +306,12 @@
  * input frames are coming from the virtual display as fast as SurfaceFlinger
  * wants to send them.
  *
+ * Exactly one of muxer or rawFp must be non-null.
+ *
  * The muxer must *not* have been started before calling.
  */
 static status_t runEncoder(const sp<MediaCodec>& encoder,
-        const sp<MediaMuxer>& muxer, const sp<IBinder>& mainDpy,
+        const sp<MediaMuxer>& muxer, FILE* rawFp, const sp<IBinder>& mainDpy,
         const sp<IBinder>& virtualDpy, uint8_t orientation) {
     static int kTimeout = 250000;   // be responsive on signal
     status_t err;
@@ -311,6 +321,8 @@
     int64_t endWhenNsec = startWhenNsec + seconds_to_nanoseconds(gTimeLimitSec);
     DisplayInfo mainDpyInfo;
 
+    assert((rawFp == NULL && muxer != NULL) || (rawFp != NULL && muxer == NULL));
+
     Vector<sp<ABuffer> > buffers;
     err = encoder->getOutputBuffers(&buffers);
     if (err != NO_ERROR) {
@@ -342,15 +354,16 @@
         case NO_ERROR:
             // got a buffer
             if ((flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) != 0) {
-                // ignore this -- we passed the CSD into MediaMuxer when
-                // we got the format change notification
-                ALOGV("Got codec config buffer (%u bytes); ignoring", size);
-                size = 0;
+                ALOGV("Got codec config buffer (%u bytes)", size);
+                if (muxer != NULL) {
+                    // ignore this -- we passed the CSD into MediaMuxer when
+                    // we got the format change notification
+                    size = 0;
+                }
             }
             if (size != 0) {
                 ALOGV("Got data in buffer %d, size=%d, pts=%lld",
                         bufIndex, size, ptsUsec);
-                assert(trackIdx != -1);
 
                 { // scope
                     ATRACE_NAME("orientation");
@@ -379,14 +392,23 @@
                     ptsUsec = systemTime(SYSTEM_TIME_MONOTONIC) / 1000;
                 }
 
-                // The MediaMuxer docs are unclear, but it appears that we
-                // need to pass either the full set of BufferInfo flags, or
-                // (flags & BUFFER_FLAG_SYNCFRAME).
-                //
-                // If this blocks for too long we could drop frames.  We may
-                // want to queue these up and do them on a different thread.
-                { // scope
+                if (muxer == NULL) {
+                    fwrite(buffers[bufIndex]->data(), 1, size, rawFp);
+                    // Flush the data immediately in case we're streaming.
+                    // We don't want to do this if all we've written is
+                    // the SPS/PPS data because mplayer gets confused.
+                    if ((flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) == 0) {
+                        fflush(rawFp);
+                    }
+                } else {
+                    // The MediaMuxer docs are unclear, but it appears that we
+                    // need to pass either the full set of BufferInfo flags, or
+                    // (flags & BUFFER_FLAG_SYNCFRAME).
+                    //
+                    // If this blocks for too long we could drop frames.  We may
+                    // want to queue these up and do them on a different thread.
                     ATRACE_NAME("write sample");
+                    assert(trackIdx != -1);
                     err = muxer->writeSampleData(buffers[bufIndex], trackIdx,
                             ptsUsec, flags);
                     if (err != NO_ERROR) {
@@ -418,12 +440,14 @@
                 ALOGV("Encoder format changed");
                 sp<AMessage> newFormat;
                 encoder->getOutputFormat(&newFormat);
-                trackIdx = muxer->addTrack(newFormat);
-                ALOGV("Starting muxer");
-                err = muxer->start();
-                if (err != NO_ERROR) {
-                    fprintf(stderr, "Unable to start muxer (err=%d)\n", err);
-                    return err;
+                if (muxer != NULL) {
+                    trackIdx = muxer->addTrack(newFormat);
+                    ALOGV("Starting muxer");
+                    err = muxer->start();
+                    if (err != NO_ERROR) {
+                        fprintf(stderr, "Unable to start muxer (err=%d)\n", err);
+                        return err;
+                    }
                 }
             }
             break;
@@ -457,7 +481,45 @@
 }
 
 /*
- * Main "do work" method.
+ * Raw H.264 byte stream output requested.  Send the output to stdout
+ * if desired.  If the output is a tty, reconfigure it to avoid the
+ * CRLF line termination that we see with "adb shell" commands.
+ */
+static FILE* prepareRawOutput(const char* fileName) {
+    FILE* rawFp = NULL;
+
+    if (strcmp(fileName, "-") == 0) {
+        if (gVerbose) {
+            fprintf(stderr, "ERROR: verbose output and '-' not compatible");
+            return NULL;
+        }
+        rawFp = stdout;
+    } else {
+        rawFp = fopen(fileName, "w");
+        if (rawFp == NULL) {
+            fprintf(stderr, "fopen raw failed: %s\n", strerror(errno));
+            return NULL;
+        }
+    }
+
+    int fd = fileno(rawFp);
+    if (isatty(fd)) {
+        // best effort -- reconfigure tty for "raw"
+        ALOGD("raw video output to tty (fd=%d)", fd);
+        struct termios term;
+        if (tcgetattr(fd, &term) == 0) {
+            cfmakeraw(&term);
+            if (tcsetattr(fd, TCSANOW, &term) == 0) {
+                ALOGD("tty successfully configured for raw");
+            }
+        }
+    }
+
+    return rawFp;
+}
+
+/*
+ * Main "do work" start point.
  *
  * Configures codec, muxer, and virtual display, then starts moving bits
  * around.
@@ -499,30 +561,40 @@
 
     // Configure and start the encoder.
     sp<MediaCodec> encoder;
+    sp<FrameOutput> frameOutput;
     sp<IGraphicBufferProducer> encoderInputSurface;
-    err = prepareEncoder(mainDpyInfo.fps, &encoder, &encoderInputSurface);
+    if (gOutputFormat != FORMAT_FRAMES) {
+        err = prepareEncoder(mainDpyInfo.fps, &encoder, &encoderInputSurface);
 
-    if (err != NO_ERROR && !gSizeSpecified) {
-        // fallback is defined for landscape; swap if we're in portrait
-        bool needSwap = gVideoWidth < gVideoHeight;
-        uint32_t newWidth = needSwap ? kFallbackHeight : kFallbackWidth;
-        uint32_t newHeight = needSwap ? kFallbackWidth : kFallbackHeight;
-        if (gVideoWidth != newWidth && gVideoHeight != newHeight) {
-            ALOGV("Retrying with 720p");
-            fprintf(stderr, "WARNING: failed at %dx%d, retrying at %dx%d\n",
-                    gVideoWidth, gVideoHeight, newWidth, newHeight);
-            gVideoWidth = newWidth;
-            gVideoHeight = newHeight;
-            err = prepareEncoder(mainDpyInfo.fps, &encoder,
-                    &encoderInputSurface);
+        if (err != NO_ERROR && !gSizeSpecified) {
+            // fallback is defined for landscape; swap if we're in portrait
+            bool needSwap = gVideoWidth < gVideoHeight;
+            uint32_t newWidth = needSwap ? kFallbackHeight : kFallbackWidth;
+            uint32_t newHeight = needSwap ? kFallbackWidth : kFallbackHeight;
+            if (gVideoWidth != newWidth && gVideoHeight != newHeight) {
+                ALOGV("Retrying with 720p");
+                fprintf(stderr, "WARNING: failed at %dx%d, retrying at %dx%d\n",
+                        gVideoWidth, gVideoHeight, newWidth, newHeight);
+                gVideoWidth = newWidth;
+                gVideoHeight = newHeight;
+                err = prepareEncoder(mainDpyInfo.fps, &encoder,
+                        &encoderInputSurface);
+            }
+        }
+        if (err != NO_ERROR) return err;
+
+        // From here on, we must explicitly release() the encoder before it goes
+        // out of scope, or we will get an assertion failure from stagefright
+        // later on in a different thread.
+    } else {
+        // We're not using an encoder at all.  The "encoder input surface" we hand to
+        // SurfaceFlinger will just feed directly to us.
+        frameOutput = new FrameOutput();
+        err = frameOutput->createInputSurface(gVideoWidth, gVideoHeight, &encoderInputSurface);
+        if (err != NO_ERROR) {
+            return err;
         }
     }
-    if (err != NO_ERROR) return err;
-
-    // From here on, we must explicitly release() the encoder before it goes
-    // out of scope, or we will get an assertion failure from stagefright
-    // later on in a different thread.
-
 
     // Draw the "info" page by rendering a frame with GLES and sending
     // it directly to the encoder.
@@ -539,7 +611,7 @@
         overlay = new Overlay();
         err = overlay->start(encoderInputSurface, &bufferProducer);
         if (err != NO_ERROR) {
-            encoder->release();
+            if (encoder != NULL) encoder->release();
             return err;
         }
         if (gVerbose) {
@@ -554,40 +626,91 @@
     sp<IBinder> dpy;
     err = prepareVirtualDisplay(mainDpyInfo, bufferProducer, &dpy);
     if (err != NO_ERROR) {
-        encoder->release();
+        if (encoder != NULL) encoder->release();
         return err;
     }
 
-    // Configure muxer.  We have to wait for the CSD blob from the encoder
-    // before we can start it.
-    sp<MediaMuxer> muxer = new MediaMuxer(fileName,
-            MediaMuxer::OUTPUT_FORMAT_MPEG_4);
-    if (gRotate) {
-        muxer->setOrientationHint(90);  // TODO: does this do anything?
+    sp<MediaMuxer> muxer = NULL;
+    FILE* rawFp = NULL;
+    switch (gOutputFormat) {
+        case FORMAT_MP4: {
+            // Configure muxer.  We have to wait for the CSD blob from the encoder
+            // before we can start it.
+            muxer = new MediaMuxer(fileName, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+            if (gRotate) {
+                muxer->setOrientationHint(90);  // TODO: does this do anything?
+            }
+            break;
+        }
+        case FORMAT_H264:
+        case FORMAT_FRAMES: {
+            rawFp = prepareRawOutput(fileName);
+            if (rawFp == NULL) {
+                if (encoder != NULL) encoder->release();
+                return -1;
+            }
+            break;
+        }
+        default:
+            fprintf(stderr, "ERROR: unknown format %d\n", gOutputFormat);
+            abort();
     }
 
-    // Main encoder loop.
-    err = runEncoder(encoder, muxer, mainDpy, dpy, mainDpyInfo.orientation);
-    if (err != NO_ERROR) {
-        fprintf(stderr, "Encoder failed (err=%d)\n", err);
-        // fall through to cleanup
-    }
+    if (gOutputFormat == FORMAT_FRAMES) {
+        // TODO: if we want to make this a proper feature, we should output
+        //       an outer header with version info.  Right now we never change
+        //       the frame size or format, so we could conceivably just send
+        //       the current frame header once and then follow it with an
+        //       unbroken stream of data.
 
-    if (gVerbose) {
-        printf("Stopping encoder and muxer\n");
+        // Make the EGL context current again.  This gets unhooked if we're
+        // using "--bugreport" mode.
+        // TODO: figure out if we can eliminate this
+        frameOutput->prepareToCopy();
+
+        while (!gStopRequested) {
+            // Poll for frames, the same way we do for MediaCodec.  We do
+            // all of the work on the main thread.
+            //
+            // Ideally we'd sleep indefinitely and wake when the
+            // stop was requested, but this will do for now.  (It almost
+            // works because wait() wakes when a signal hits, but we
+            // need to handle the edge cases.)
+            err = frameOutput->copyFrame(rawFp, 250000);
+            if (err == ETIMEDOUT) {
+                err = NO_ERROR;
+            } else if (err != NO_ERROR) {
+                ALOGE("Got error %d from copyFrame()", err);
+                break;
+            }
+        }
+    } else {
+        // Main encoder loop.
+        err = runEncoder(encoder, muxer, rawFp, mainDpy, dpy,
+                mainDpyInfo.orientation);
+        if (err != NO_ERROR) {
+            fprintf(stderr, "Encoder failed (err=%d)\n", err);
+            // fall through to cleanup
+        }
+
+        if (gVerbose) {
+            printf("Stopping encoder and muxer\n");
+        }
     }
 
     // Shut everything down, starting with the producer side.
     encoderInputSurface = NULL;
     SurfaceComposerClient::destroyDisplay(dpy);
-    if (overlay != NULL) {
-        overlay->stop();
+    if (overlay != NULL) overlay->stop();
+    if (encoder != NULL) encoder->stop();
+    if (muxer != NULL) {
+        // If we don't stop muxer explicitly, i.e. let the destructor run,
+        // it may hang (b/11050628).
+        muxer->stop();
+    } else if (rawFp != stdout) {
+        fclose(rawFp);
     }
-    encoder->stop();
-    // If we don't stop muxer explicitly, i.e. let the destructor run,
-    // it may hang (b/11050628).
-    muxer->stop();
-    encoder->release();
+    if (encoder != NULL) encoder->release();
 
     return err;
 }
@@ -749,10 +872,12 @@
         { "size",               required_argument,  NULL, 's' },
         { "bit-rate",           required_argument,  NULL, 'b' },
         { "time-limit",         required_argument,  NULL, 't' },
+        { "bugreport",          no_argument,        NULL, 'u' },
+        // "unofficial" options
         { "show-device-info",   no_argument,        NULL, 'i' },
         { "show-frame-time",    no_argument,        NULL, 'f' },
-        { "bugreport",          no_argument,        NULL, 'u' },
         { "rotate",             no_argument,        NULL, 'r' },
+        { "output-format",      required_argument,  NULL, 'o' },
         { NULL,                 0,                  NULL, 0 }
     };
 
@@ -804,20 +929,32 @@
                 return 2;
             }
             break;
+        case 'u':
+            gWantInfoScreen = true;
+            gWantFrameTime = true;
+            break;
         case 'i':
             gWantInfoScreen = true;
             break;
         case 'f':
             gWantFrameTime = true;
             break;
-        case 'u':
-            gWantInfoScreen = true;
-            gWantFrameTime = true;
-            break;
         case 'r':
             // experimental feature
             gRotate = true;
             break;
+        case 'o':
+            if (strcmp(optarg, "mp4") == 0) {
+                gOutputFormat = FORMAT_MP4;
+            } else if (strcmp(optarg, "h264") == 0) {
+                gOutputFormat = FORMAT_H264;
+            } else if (strcmp(optarg, "frames") == 0) {
+                gOutputFormat = FORMAT_FRAMES;
+            } else {
+                fprintf(stderr, "Unknown format '%s'\n", optarg);
+                return 2;
+            }
+            break;
         default:
             if (ic != '?') {
                 fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
@@ -831,17 +968,19 @@
         return 2;
     }
 
-    // MediaMuxer tries to create the file in the constructor, but we don't
-    // learn about the failure until muxer.start(), which returns a generic
-    // error code without logging anything.  We attempt to create the file
-    // now for better diagnostics.
     const char* fileName = argv[optind];
-    int fd = open(fileName, O_CREAT | O_RDWR, 0644);
-    if (fd < 0) {
-        fprintf(stderr, "Unable to open '%s': %s\n", fileName, strerror(errno));
-        return 1;
+    if (gOutputFormat == FORMAT_MP4) {
+        // MediaMuxer tries to create the file in the constructor, but we don't
+        // learn about the failure until muxer.start(), which returns a generic
+        // error code without logging anything.  We attempt to create the file
+        // now for better diagnostics.
+        int fd = open(fileName, O_CREAT | O_RDWR, 0644);
+        if (fd < 0) {
+            fprintf(stderr, "Unable to open '%s': %s\n", fileName, strerror(errno));
+            return 1;
+        }
+        close(fd);
     }
-    close(fd);
 
     status_t err = recordScreen(fileName);
     if (err == NO_ERROR) {
diff --git a/cmds/screenrecord/screenrecord.h b/cmds/screenrecord/screenrecord.h
index 95e8a68..9b058c2 100644
--- a/cmds/screenrecord/screenrecord.h
+++ b/cmds/screenrecord/screenrecord.h
@@ -18,6 +18,6 @@
 #define SCREENRECORD_SCREENRECORD_H
 
 #define kVersionMajor 1
-#define kVersionMinor 1
+#define kVersionMinor 2
 
 #endif /*SCREENRECORD_SCREENRECORD_H*/
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index 5d2d721..1b2f792 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -23,6 +23,7 @@
 #include <gui/Surface.h>
 #include <media/AudioTrack.h>
 #include <media/ICrypto.h>
+#include <media/IMediaHTTPService.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -275,7 +276,8 @@
 
     mExtractor = new NuMediaExtractor;
 
-    status_t err = mExtractor->setDataSource(mPath.c_str());
+    status_t err = mExtractor->setDataSource(
+            NULL /* httpService */, mPath.c_str());
 
     if (err != OK) {
         mExtractor.clear();
diff --git a/cmds/stagefright/SineSource.cpp b/cmds/stagefright/SineSource.cpp
index 14b4306..587077a 100644
--- a/cmds/stagefright/SineSource.cpp
+++ b/cmds/stagefright/SineSource.cpp
@@ -24,7 +24,7 @@
     }
 }
 
-status_t SineSource::start(MetaData *params) {
+status_t SineSource::start(MetaData * /* params */) {
     CHECK(!mStarted);
 
     mGroup = new MediaBufferGroup;
@@ -58,7 +58,7 @@
 }
 
 status_t SineSource::read(
-        MediaBuffer **out, const ReadOptions *options) {
+        MediaBuffer **out, const ReadOptions * /* options */) {
     *out = NULL;
 
     MediaBuffer *buffer;
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index d125ad1..fd02bcc 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -24,6 +24,7 @@
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
 #include <media/ICrypto.h>
+#include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayerService.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -76,7 +77,7 @@
     static int64_t kTimeout = 500ll;
 
     sp<NuMediaExtractor> extractor = new NuMediaExtractor;
-    if (extractor->setDataSource(path) != OK) {
+    if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
         fprintf(stderr, "unable to instantiate extractor.\n");
         return 1;
     }
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index 90daea2..f4a33e8 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -20,6 +20,7 @@
 #include <utils/Log.h>
 
 #include <binder/ProcessState.h>
+#include <media/IMediaHTTPService.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
@@ -59,7 +60,7 @@
         int trimEndTimeMs,
         int rotationDegrees) {
     sp<NuMediaExtractor> extractor = new NuMediaExtractor;
-    if (extractor->setDataSource(path) != OK) {
+    if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
         fprintf(stderr, "unable to instantiate extractor. %s\n", path);
         return 1;
     }
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index b7a40c2..fdc352e 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -296,7 +296,7 @@
 }
 #else
 
-int main(int argc, char **argv) {
+int main(int /* argc */, char ** /* argv */) {
     android::ProcessState::self()->startThreadPool();
 
     OMXClient client;
diff --git a/cmds/stagefright/sf2.cpp b/cmds/stagefright/sf2.cpp
index b2b9ce5..3c0c7ec 100644
--- a/cmds/stagefright/sf2.cpp
+++ b/cmds/stagefright/sf2.cpp
@@ -19,8 +19,12 @@
 #include <inttypes.h>
 #include <utils/Log.h>
 
+#include <signal.h>
+
 #include <binder/ProcessState.h>
 
+#include <media/IMediaHTTPService.h>
+
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -43,6 +47,18 @@
 
 using namespace android;
 
+volatile static bool ctrlc = false;
+
+static sighandler_t oldhandler = NULL;
+
+static void mysighandler(int signum) {
+    if (signum == SIGINT) {
+        ctrlc = true;
+        return;
+    }
+    oldhandler(signum);
+}
+
 struct Controller : public AHandler {
     Controller(const char *uri, bool decodeAudio,
                const sp<Surface> &surface, bool renderToSurface)
@@ -63,7 +79,30 @@
     virtual ~Controller() {
     }
 
+    virtual void printStatistics() {
+        int64_t delayUs = ALooper::GetNowUs() - mStartTimeUs;
+
+        if (mDecodeAudio) {
+            printf("%" PRId64 " bytes received. %.2f KB/sec\n",
+            mTotalBytesReceived,
+            mTotalBytesReceived * 1E6 / 1024 / delayUs);
+        } else {
+            printf("%d frames decoded, %.2f fps. %" PRId64 " bytes "
+                    "received. %.2f KB/sec\n",
+            mNumOutputBuffersReceived,
+            mNumOutputBuffersReceived * 1E6 / delayUs,
+            mTotalBytesReceived,
+            mTotalBytesReceived * 1E6 / 1024 / delayUs);
+        }
+    }
+
     virtual void onMessageReceived(const sp<AMessage> &msg) {
+        if (ctrlc) {
+            printf("\n");
+            printStatistics();
+            (new AMessage(kWhatStop, id()))->post();
+            ctrlc = false;
+        }
         switch (msg->what()) {
             case kWhatStart:
             {
@@ -76,7 +115,8 @@
 #endif
 
                 sp<DataSource> dataSource =
-                    DataSource::CreateFromURI(mURI.c_str());
+                    DataSource::CreateFromURI(
+                            NULL /* httpService */, mURI.c_str());
 
                 sp<MediaExtractor> extractor =
                     MediaExtractor::Create(dataSource);
@@ -99,7 +139,10 @@
                         break;
                     }
                 }
-                CHECK(mSource != NULL);
+                if (mSource == NULL) {
+                    printf("no %s track found\n", mDecodeAudio ? "audio" : "video");
+                    exit (1);
+                }
 
                 CHECK_EQ(mSource->start(), (status_t)OK);
 
@@ -181,21 +224,7 @@
                         || what == ACodec::kWhatError) {
                     printf((what == ACodec::kWhatEOS) ? "$\n" : "E\n");
 
-                    int64_t delayUs = ALooper::GetNowUs() - mStartTimeUs;
-
-                    if (mDecodeAudio) {
-                        printf("%" PRId64 " bytes received. %.2f KB/sec\n",
-                               mTotalBytesReceived,
-                               mTotalBytesReceived * 1E6 / 1024 / delayUs);
-                    } else {
-                        printf("%d frames decoded, %.2f fps. %" PRId64 " bytes "
-                               "received. %.2f KB/sec\n",
-                               mNumOutputBuffersReceived,
-                               mNumOutputBuffersReceived * 1E6 / delayUs,
-                               mTotalBytesReceived,
-                               mTotalBytesReceived * 1E6 / 1024 / delayUs);
-                    }
-
+                    printStatistics();
                     (new AMessage(kWhatStop, id()))->post();
                 } else if (what == ACodec::kWhatFlushCompleted) {
                     mSeekState = SEEK_FLUSH_COMPLETED;
@@ -639,6 +668,8 @@
 
     looper->registerHandler(controller);
 
+    signal(SIGINT, mysighandler);
+
     controller->startAsync();
 
     CHECK_EQ(looper->start(true /* runOnCallingThread */), (status_t)OK);
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index ab2c54b..b70afe6 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -29,6 +29,7 @@
 
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
+#include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayerService.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include "include/NuCachedSource2.h"
@@ -938,9 +939,11 @@
         } else {
             CHECK(useSurfaceTexAlloc);
 
-            sp<BufferQueue> bq = new BufferQueue();
-            sp<GLConsumer> texture = new GLConsumer(bq, 0 /* tex */);
-            gSurface = new Surface(bq);
+            sp<IGraphicBufferProducer> producer;
+            sp<IGraphicBufferConsumer> consumer;
+            BufferQueue::createBufferQueue(&producer, &consumer);
+            sp<GLConsumer> texture = new GLConsumer(consumer, 0 /* tex */);
+            gSurface = new Surface(producer);
         }
 
         CHECK_EQ((status_t)OK,
@@ -958,7 +961,8 @@
 
         const char *filename = argv[k];
 
-        sp<DataSource> dataSource = DataSource::CreateFromURI(filename);
+        sp<DataSource> dataSource =
+            DataSource::CreateFromURI(NULL /* httpService */, filename);
 
         if (strncasecmp(filename, "sine:", 5) && dataSource == NULL) {
             fprintf(stderr, "Unable to create data source.\n");
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index dba67a9..0566d14 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -21,6 +21,7 @@
 #include <binder/ProcessState.h>
 #include <cutils/properties.h> // for property_get
 
+#include <media/IMediaHTTPService.h>
 #include <media/IStreamSource.h>
 #include <media/mediaplayer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -159,7 +160,9 @@
 MyConvertingStreamSource::MyConvertingStreamSource(const char *filename)
     : mCurrentBufferIndex(-1),
       mCurrentBufferOffset(0) {
-    sp<DataSource> dataSource = DataSource::CreateFromURI(filename);
+    sp<DataSource> dataSource =
+        DataSource::CreateFromURI(NULL /* httpService */, filename);
+
     CHECK(dataSource != NULL);
 
     sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
@@ -371,7 +374,7 @@
     }
 
     sp<IMediaPlayer> player =
-        service->create(client, 0);
+        service->create(client, AUDIO_SESSION_ALLOCATE);
 
     if (player != NULL && player->setDataSource(source) == NO_ERROR) {
         player->setVideoSurfaceTexture(surface->getIGraphicBufferProducer());
diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
index 234aef2..f400732 100644
--- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
+++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
@@ -316,6 +316,7 @@
 
     if (-1 < fileDesc) {
         if (FwdLockFile_attach(fileDesc) < 0) {
+            close(fileDesc);
             return mimeString;
         }
         const char* pMimeType = FwdLockFile_GetContentType(fileDesc);
diff --git a/include/camera/CameraParameters2.h b/include/camera/CameraParameters2.h
deleted file mode 100644
index 88ad812..0000000
--- a/include/camera/CameraParameters2.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_CAMERA_PARAMETERS2_H
-#define ANDROID_HARDWARE_CAMERA_PARAMETERS2_H
-
-#include <utils/Vector.h>
-#include <utils/String8.h>
-#include "CameraParameters.h"
-
-namespace android {
-
-/**
- * A copy of CameraParameters plus ABI-breaking changes. Needed
- * because some camera HALs directly link to CameraParameters and cannot
- * tolerate an ABI change.
- */
-class CameraParameters2
-{
-public:
-    CameraParameters2();
-    CameraParameters2(const String8 &params) { unflatten(params); }
-    ~CameraParameters2();
-
-    String8 flatten() const;
-    void unflatten(const String8 &params);
-
-    void set(const char *key, const char *value);
-    void set(const char *key, int value);
-    void setFloat(const char *key, float value);
-    // Look up string value by key.
-    // -- The string remains valid until the next set/remove of the same key,
-    //    or until the map gets cleared.
-    const char *get(const char *key) const;
-    int getInt(const char *key) const;
-    float getFloat(const char *key) const;
-
-    // Compare the order that key1 was set vs the order that key2 was set.
-    //
-    // Sets the order parameter to an integer less than, equal to, or greater
-    // than zero if key1's set order was respectively, to be less than, to
-    // match, or to be greater than key2's set order.
-    //
-    // Error codes:
-    //  * NAME_NOT_FOUND - if either key has not been set previously
-    //  * BAD_VALUE - if any of the parameters are NULL
-    status_t compareSetOrder(const char *key1, const char *key2,
-            /*out*/
-            int *order) const;
-
-    void remove(const char *key);
-
-    void setPreviewSize(int width, int height);
-    void getPreviewSize(int *width, int *height) const;
-    void getSupportedPreviewSizes(Vector<Size> &sizes) const;
-
-    // Set the dimensions in pixels to the given width and height
-    // for video frames. The given width and height must be one
-    // of the supported dimensions returned from
-    // getSupportedVideoSizes(). Must not be called if
-    // getSupportedVideoSizes() returns an empty Vector of Size.
-    void setVideoSize(int width, int height);
-    // Retrieve the current dimensions (width and height)
-    // in pixels for video frames, which must be one of the
-    // supported dimensions returned from getSupportedVideoSizes().
-    // Must not be called if getSupportedVideoSizes() returns an
-    // empty Vector of Size.
-    void getVideoSize(int *width, int *height) const;
-    // Retrieve a Vector of supported dimensions (width and height)
-    // in pixels for video frames. If sizes returned from the method
-    // is empty, the camera does not support calls to setVideoSize()
-    // or getVideoSize(). In adddition, it also indicates that
-    // the camera only has a single output, and does not have
-    // separate output for video frames and preview frame.
-    void getSupportedVideoSizes(Vector<Size> &sizes) const;
-    // Retrieve the preferred preview size (width and height) in pixels
-    // for video recording. The given width and height must be one of
-    // supported preview sizes returned from getSupportedPreviewSizes().
-    // Must not be called if getSupportedVideoSizes() returns an empty
-    // Vector of Size. If getSupportedVideoSizes() returns an empty
-    // Vector of Size, the width and height returned from this method
-    // is invalid, and is "-1x-1".
-    void getPreferredPreviewSizeForVideo(int *width, int *height) const;
-
-    void setPreviewFrameRate(int fps);
-    int getPreviewFrameRate() const;
-    void getPreviewFpsRange(int *min_fps, int *max_fps) const;
-    void setPreviewFpsRange(int min_fps, int max_fps);
-    void setPreviewFormat(const char *format);
-    const char *getPreviewFormat() const;
-    void setPictureSize(int width, int height);
-    void getPictureSize(int *width, int *height) const;
-    void getSupportedPictureSizes(Vector<Size> &sizes) const;
-    void setPictureFormat(const char *format);
-    const char *getPictureFormat() const;
-
-    void dump() const;
-    status_t dump(int fd, const Vector<String16>& args) const;
-
-private:
-
-    // Quick and dirty map that maintains insertion order
-    template <typename KeyT, typename ValueT>
-    struct OrderedKeyedVector {
-
-        ssize_t add(const KeyT& key, const ValueT& value) {
-            return mList.add(Pair(key, value));
-        }
-
-        size_t size() const {
-            return mList.size();
-        }
-
-        const KeyT& keyAt(size_t idx) const {
-            return mList[idx].mKey;
-        }
-
-        const ValueT& valueAt(size_t idx) const {
-            return mList[idx].mValue;
-        }
-
-        const ValueT& valueFor(const KeyT& key) const {
-            ssize_t i = indexOfKey(key);
-            LOG_ALWAYS_FATAL_IF(i<0, "%s: key not found", __PRETTY_FUNCTION__);
-
-            return valueAt(i);
-        }
-
-        ssize_t indexOfKey(const KeyT& key) const {
-                size_t vectorIdx = 0;
-                for (; vectorIdx < mList.size(); ++vectorIdx) {
-                    if (mList[vectorIdx].mKey == key) {
-                        return (ssize_t) vectorIdx;
-                    }
-                }
-
-                return NAME_NOT_FOUND;
-        }
-
-        ssize_t removeItem(const KeyT& key) {
-            size_t vectorIdx = (size_t) indexOfKey(key);
-
-            if (vectorIdx < 0) {
-                return vectorIdx;
-            }
-
-            return mList.removeAt(vectorIdx);
-        }
-
-        void clear() {
-            mList.clear();
-        }
-
-        // Same as removing and re-adding. The key's index changes to max.
-        ssize_t replaceValueFor(const KeyT& key, const ValueT& value) {
-            removeItem(key);
-            return add(key, value);
-        }
-
-    private:
-
-        struct Pair {
-            Pair() : mKey(), mValue() {}
-            Pair(const KeyT& key, const ValueT& value) :
-                    mKey(key),
-                    mValue(value) {}
-            KeyT   mKey;
-            ValueT mValue;
-        };
-
-        Vector<Pair> mList;
-    };
-
-    /**
-     * Order matters: Keys that are set() later are stored later in the map.
-     *
-     * If two keys have meaning that conflict, then the later-set key
-     * wins.
-     *
-     * For example, preview FPS and preview FPS range conflict since only
-     * we only want to use the FPS range if that's the last thing that was set.
-     * So in that case, only use preview FPS range if it was set later than
-     * the preview FPS.
-     */
-    OrderedKeyedVector<String8,String8>    mMap;
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/CaptureResult.h b/include/camera/CaptureResult.h
new file mode 100644
index 0000000..6e47a16
--- /dev/null
+++ b/include/camera/CaptureResult.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAPTURERESULT_H
+#define ANDROID_HARDWARE_CAPTURERESULT_H
+
+#include <utils/RefBase.h>
+#include <camera/CameraMetadata.h>
+
+namespace android {
+
+/**
+ * CaptureResultExtras is a structure to encapsulate various indices for a capture result.
+ * These indices are framework-internal and not sent to the HAL.
+ */
+struct CaptureResultExtras {
+    /**
+     * An integer to index the request sequence that this result belongs to.
+     */
+    int32_t requestId;
+
+    /**
+     * An integer to index this result inside a request sequence, starting from 0.
+     */
+    int32_t burstId;
+
+    /**
+     * TODO: Add documentation for this field.
+     */
+    int32_t afTriggerId;
+
+    /**
+     * TODO: Add documentation for this field.
+     */
+    int32_t precaptureTriggerId;
+
+    /**
+     * A 64bit integer to index the frame number associated with this result.
+     */
+    int64_t frameNumber;
+
+    /**
+     * Constructor initializes object as invalid by setting requestId to be -1.
+     */
+    CaptureResultExtras()
+        : requestId(-1),
+          burstId(0),
+          afTriggerId(0),
+          precaptureTriggerId(0),
+          frameNumber(0) {
+    }
+
+    /**
+     * This function returns true if it's a valid CaptureResultExtras object.
+     * Otherwise, returns false. It is valid only when requestId is non-negative.
+     */
+    bool isValid();
+
+    status_t                readFromParcel(Parcel* parcel);
+    status_t                writeToParcel(Parcel* parcel) const;
+};
+
+struct CaptureResult : public virtual LightRefBase<CaptureResult> {
+    CameraMetadata          mMetadata;
+    CaptureResultExtras     mResultExtras;
+
+    CaptureResult();
+
+    CaptureResult(const CaptureResult& otherResult);
+
+    status_t                readFromParcel(Parcel* parcel);
+    status_t                writeToParcel(Parcel* parcel) const;
+};
+
+}
+
+#endif /* ANDROID_HARDWARE_CAPTURERESULT_H */
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
index f342122..6e48f22 100644
--- a/include/camera/ICameraService.h
+++ b/include/camera/ICameraService.h
@@ -31,6 +31,7 @@
 class ICameraDeviceUser;
 class ICameraDeviceCallbacks;
 class CameraMetadata;
+class VendorTagDescriptor;
 
 class ICameraService : public IInterface
 {
@@ -47,6 +48,7 @@
         ADD_LISTENER,
         REMOVE_LISTENER,
         GET_CAMERA_CHARACTERISTICS,
+        GET_CAMERA_VENDOR_TAG_DESCRIPTOR,
     };
 
     enum {
@@ -58,10 +60,16 @@
 
     virtual int32_t  getNumberOfCameras() = 0;
     virtual status_t getCameraInfo(int cameraId,
-                                          struct CameraInfo* cameraInfo) = 0;
+            /*out*/
+            struct CameraInfo* cameraInfo) = 0;
 
     virtual status_t getCameraCharacteristics(int cameraId,
-                                              CameraMetadata* cameraInfo) = 0;
+            /*out*/
+            CameraMetadata* cameraInfo) = 0;
+
+    virtual status_t getCameraVendorTagDescriptor(
+            /*out*/
+            sp<VendorTagDescriptor>& desc) = 0;
 
     // Returns 'OK' if operation succeeded
     // - Errors: ALREADY_EXISTS if the listener was already added
diff --git a/include/camera/VendorTagDescriptor.h b/include/camera/VendorTagDescriptor.h
new file mode 100644
index 0000000..ea21d31
--- /dev/null
+++ b/include/camera/VendorTagDescriptor.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VENDOR_TAG_DESCRIPTOR_H
+
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/RefBase.h>
+#include <system/camera_vendor_tags.h>
+
+#include <stdint.h>
+
+namespace android {
+
+class Parcel;
+
+/**
+ * VendorTagDescriptor objects are parcelable containers for the vendor tag
+ * definitions provided, and are typically used to pass the vendor tag
+ * information enumerated by the HAL to clients of the camera service.
+ */
+class VendorTagDescriptor
+        : public LightRefBase<VendorTagDescriptor> {
+    public:
+        virtual ~VendorTagDescriptor();
+
+        /**
+         * The following 'get*' methods implement the corresponding
+         * functions defined in
+         * system/media/camera/include/system/camera_vendor_tags.h
+         */
+
+        // Returns the number of vendor tags defined.
+        int getTagCount() const;
+
+        // Returns an array containing the id's of vendor tags defined.
+        void getTagArray(uint32_t* tagArray) const;
+
+        // Returns the section name string for a given vendor tag id.
+        const char* getSectionName(uint32_t tag) const;
+
+        // Returns the tag name string for a given vendor tag id.
+        const char* getTagName(uint32_t tag) const;
+
+        // Returns the tag type for a given vendor tag id.
+        int getTagType(uint32_t tag) const;
+
+        /**
+         * Write the VendorTagDescriptor object into the given parcel.
+         *
+         * Returns OK on success, or a negative error code.
+         */
+        status_t writeToParcel(
+                /*out*/
+                Parcel* parcel) const;
+
+        // Static methods:
+
+        /**
+         * Create a VendorTagDescriptor object from the given parcel.
+         *
+         * Returns OK on success, or a negative error code.
+         */
+        static status_t createFromParcel(const Parcel* parcel,
+                /*out*/
+                sp<VendorTagDescriptor>& descriptor);
+
+        /**
+         * Create a VendorTagDescriptor object from the given vendor_tag_ops_t
+         * struct.
+         *
+         * Returns OK on success, or a negative error code.
+         */
+        static status_t createDescriptorFromOps(const vendor_tag_ops_t* vOps,
+                /*out*/
+                sp<VendorTagDescriptor>& descriptor);
+
+        /**
+         * Sets the global vendor tag descriptor to use for this process.
+         * Camera metadata operations that access vendor tags will use the
+         * vendor tag definitions set this way.
+         *
+         * Returns OK on success, or a negative error code.
+         */
+        static status_t setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc);
+
+        /**
+         * Clears the global vendor tag descriptor used by this process.
+         */
+        static void clearGlobalVendorTagDescriptor();
+
+        /**
+         * Returns the global vendor tag descriptor used by this process.
+         * This will contain NULL if no vendor tags are defined.
+         */
+        static sp<VendorTagDescriptor> getGlobalVendorTagDescriptor();
+    protected:
+        VendorTagDescriptor();
+        KeyedVector<uint32_t, String8> mTagToNameMap;
+        KeyedVector<uint32_t, String8> mTagToSectionMap;
+        KeyedVector<uint32_t, int32_t> mTagToTypeMap;
+        // must be int32_t to be compatible with Parcel::writeInt32
+        int32_t mTagCount;
+    private:
+        vendor_tag_ops mVendorOps;
+};
+
+} /* namespace android */
+
+#define VENDOR_TAG_DESCRIPTOR_H
+#endif /* VENDOR_TAG_DESCRIPTOR_H */
diff --git a/include/camera/camera2/ICameraDeviceCallbacks.h b/include/camera/camera2/ICameraDeviceCallbacks.h
index 8dac4f2..f059b3d 100644
--- a/include/camera/camera2/ICameraDeviceCallbacks.h
+++ b/include/camera/camera2/ICameraDeviceCallbacks.h
@@ -24,9 +24,12 @@
 #include <utils/Timers.h>
 #include <system/camera.h>
 
+#include <camera/CaptureResult.h>
+
 namespace android {
 class CameraMetadata;
 
+
 class ICameraDeviceCallbacks : public IInterface
 {
     /**
@@ -45,18 +48,19 @@
     };
 
     // One way
-    virtual void            onDeviceError(CameraErrorCode errorCode) = 0;
+    virtual void            onDeviceError(CameraErrorCode errorCode,
+                                          const CaptureResultExtras& resultExtras) = 0;
 
     // One way
     virtual void            onDeviceIdle() = 0;
 
     // One way
-    virtual void            onCaptureStarted(int32_t requestId,
+    virtual void            onCaptureStarted(const CaptureResultExtras& resultExtras,
                                              int64_t timestamp) = 0;
 
     // One way
-    virtual void            onResultReceived(int32_t requestId,
-                                             const CameraMetadata& result) = 0;
+    virtual void            onResultReceived(const CameraMetadata& metadata,
+                                             const CaptureResultExtras& resultExtras) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
index f71f302..913696f 100644
--- a/include/camera/camera2/ICameraDeviceUser.h
+++ b/include/camera/camera2/ICameraDeviceUser.h
@@ -19,6 +19,7 @@
 
 #include <binder/IInterface.h>
 #include <binder/Parcel.h>
+#include <utils/List.h>
 
 struct camera_metadata;
 
@@ -30,6 +31,10 @@
 class CaptureRequest;
 class CameraMetadata;
 
+enum {
+    NO_IN_FLIGHT_REPEATING_FRAMES = -1,
+};
+
 class ICameraDeviceUser : public IInterface
 {
     /**
@@ -44,9 +49,34 @@
      * Request Handling
      **/
 
+    /**
+     * For streaming requests, output lastFrameNumber is the last frame number
+     * of the previous repeating request.
+     * For non-streaming requests, output lastFrameNumber is the expected last
+     * frame number of the current request.
+     */
     virtual int             submitRequest(sp<CaptureRequest> request,
-                                          bool streaming = false) = 0;
-    virtual status_t        cancelRequest(int requestId) = 0;
+                                          bool streaming = false,
+                                          /*out*/
+                                          int64_t* lastFrameNumber = NULL) = 0;
+
+    /**
+     * For streaming requests, output lastFrameNumber is the last frame number
+     * of the previous repeating request.
+     * For non-streaming requests, output lastFrameNumber is the expected last
+     * frame number of the current request.
+     */
+    virtual int             submitRequestList(List<sp<CaptureRequest> > requestList,
+                                              bool streaming = false,
+                                              /*out*/
+                                              int64_t* lastFrameNumber = NULL) = 0;
+
+    /**
+     * Output lastFrameNumber is the last frame number of the previous repeating request.
+     */
+    virtual status_t        cancelRequest(int requestId,
+                                          /*out*/
+                                          int64_t* lastFrameNumber = NULL) = 0;
 
     virtual status_t        deleteStream(int streamId) = 0;
     virtual status_t        createStream(
@@ -64,8 +94,12 @@
     // Wait until all the submitted requests have finished processing
     virtual status_t        waitUntilIdle() =  0;
 
-    // Flush all pending and in-progress work as quickly as possible.
-    virtual status_t        flush() = 0;
+    /**
+     * Flush all pending and in-progress work as quickly as possible.
+     * Output lastFrameNumber is the last frame number of the previous repeating request.
+     */
+    virtual status_t        flush(/*out*/
+                                  int64_t* lastFrameNumber = NULL) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h
index ef392f0..7be449c 100644
--- a/include/media/AudioBufferProvider.h
+++ b/include/media/AudioBufferProvider.h
@@ -61,6 +61,17 @@
     //  buffer->frameCount  0
     virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0;
 
+    // Release (a portion of) the buffer previously obtained by getNextBuffer().
+    // It is permissible to call releaseBuffer() multiple times per getNextBuffer().
+    // On entry:
+    //  buffer->frameCount  number of frames to release, must be <= number of frames
+    //                      obtained but not yet released
+    //  buffer->raw         unused
+    // On return:
+    //  buffer->frameCount  0; implementation MUST set to zero
+    //  buffer->raw         undefined; implementation is PERMITTED to set to any value,
+    //                      so if caller needs to continue using this buffer it must
+    //                      keep track of the pointer itself
     virtual void releaseBuffer(Buffer* buffer) = 0;
 };
 
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 05d834d..f98002d 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -36,7 +36,7 @@
 
 // ----------------------------------------------------------------------------
 
-class effect_param_cblk_t;
+struct effect_param_cblk_t;
 
 // ----------------------------------------------------------------------------
 
@@ -217,8 +217,9 @@
      *      higher priorities, 0 being the normal priority.
      * cbf:         optional callback function (see effect_callback_t)
      * user:        pointer to context for use by the callback receiver.
-     * sessionID:   audio session this effect is associated to. If 0, the effect will be global to
-     *      the output mix. If not 0, the effect will be applied to all players
+     * sessionID:   audio session this effect is associated to.
+     *      If equal to AUDIO_SESSION_OUTPUT_MIX, the effect will be global to
+     *      the output mix.  Otherwise, the effect will be applied to all players
      *      (AudioTrack or MediaPLayer) within the same audio session.
      * io:  HAL audio output or input stream to which this effect must be attached. Leave at 0 for
      *      automatic output selection by AudioFlinger.
@@ -229,8 +230,8 @@
                   int32_t priority = 0,
                   effect_callback_t cbf = NULL,
                   void* user = NULL,
-                  int sessionId = 0,
-                  audio_io_handle_t io = 0
+                  int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+                  audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
                   );
 
     /* Constructor.
@@ -241,8 +242,8 @@
                     int32_t priority = 0,
                     effect_callback_t cbf = NULL,
                     void* user = NULL,
-                    int sessionId = 0,
-                    audio_io_handle_t io = 0
+                    int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+                    audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
                     );
 
     /* Terminates the AudioEffect and unregisters it from AudioFlinger.
@@ -263,8 +264,8 @@
                             int32_t priority = 0,
                             effect_callback_t cbf = NULL,
                             void* user = NULL,
-                            int sessionId = 0,
-                            audio_io_handle_t io = 0
+                            int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+                            audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
                             );
 
     /* Result of constructing the AudioEffect. This must be checked
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 052064d..b3c44a8 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -26,7 +26,7 @@
 
 // ----------------------------------------------------------------------------
 
-class audio_track_cblk_t;
+struct audio_track_cblk_t;
 class AudioRecordClientProxy;
 
 // ----------------------------------------------------------------------------
@@ -39,8 +39,12 @@
      * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
      */
     enum event_type {
-        EVENT_MORE_DATA = 0,        // Request to read more data from PCM buffer.
-        EVENT_OVERRUN = 1,          // PCM buffer overrun occurred.
+        EVENT_MORE_DATA = 0,        // Request to read available data from buffer.
+                                    // If this event is delivered but the callback handler
+                                    // does not want to read the available data, the handler must
+                                    // explicitly
+                                    // ignore the event by setting frameCount to zero.
+        EVENT_OVERRUN = 1,          // Buffer overrun occurred.
         EVENT_MARKER = 2,           // Record head is at the specified marker position
                                     // (See setMarkerPosition()).
         EVENT_NEW_POS = 3,          // Record head is at a new position
@@ -60,9 +64,10 @@
         size_t      frameCount;     // number of sample frames corresponding to size;
                                     // on input it is the number of frames available,
                                     // on output is the number of frames actually drained
-                                    // (currently ignored, but will make the primary field in future)
+                                    // (currently ignored but will make the primary field in future)
 
         size_t      size;           // input/output in bytes == frameCount * frameSize
+                                    // on output is the number of bytes actually drained
                                     // FIXME this is redundant with respect to frameCount,
                                     // and TRANSFER_OBTAIN mode is broken for 8-bit data
                                     // since we don't define the frame format
@@ -76,7 +81,7 @@
 
     /* As a convenience, if a callback is supplied, a handler thread
      * is automatically created with the appropriate priority. This thread
-     * invokes the callback when a new buffer becomes ready or various conditions occur.
+     * invokes the callback when a new buffer becomes available or various conditions occur.
      * Parameters:
      *
      * event:   type of event notified (see enum AudioRecord::event_type).
@@ -99,6 +104,8 @@
      *  - NO_ERROR: successful operation
      *  - NO_INIT: audio server or audio hardware not initialized
      *  - BAD_VALUE: unsupported configuration
+     * frameCount is guaranteed to be non-zero if status is NO_ERROR,
+     * and is undefined otherwise.
      */
 
      static status_t getMinFrameCount(size_t* frameCount,
@@ -109,7 +116,7 @@
     /* How data is transferred from AudioRecord
      */
     enum transfer_type {
-        TRANSFER_DEFAULT,   // not specified explicitly; determine from other parameters
+        TRANSFER_DEFAULT,   // not specified explicitly; determine from the other parameters
         TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA
         TRANSFER_OBTAIN,    // FIXME deprecated: call obtainBuffer() and releaseBuffer()
         TRANSFER_SYNC,      // synchronous read()
@@ -137,7 +144,7 @@
      *                     be larger if the requested size is not compatible with current audio HAL
      *                     latency.  Zero means to use a default value.
      * cbf:                Callback function. If not null, this function is called periodically
-     *                     to consume new PCM data and inform of marker, position updates, etc.
+     *                     to consume new data and inform of marker, position updates, etc.
      * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
      *                     frames are ready in record track output buffer.
@@ -151,11 +158,11 @@
                                     uint32_t sampleRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    int frameCount      = 0,
+                                    size_t frameCount = 0,
                                     callback_t cbf = NULL,
                                     void* user = NULL,
-                                    int notificationFrames = 0,
-                                    int sessionId = 0,
+                                    uint32_t notificationFrames = 0,
+                                    int sessionId = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE);
 
@@ -171,9 +178,10 @@
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful intialization
      *  - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use
-     *  - BAD_VALUE: invalid parameter (channels, format, sampleRate...)
+     *  - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...)
      *  - NO_INIT: audio server or audio hardware not initialized
      *  - PERMISSION_DENIED: recording is not allowed for the requesting process
+     * If status is not equal to NO_ERROR, don't call any other APIs on this AudioRecord.
      *
      * Parameters not listed in the AudioRecord constructors above:
      *
@@ -183,16 +191,16 @@
                             uint32_t sampleRate,
                             audio_format_t format,
                             audio_channel_mask_t channelMask,
-                            int frameCount      = 0,
+                            size_t frameCount = 0,
                             callback_t cbf = NULL,
                             void* user = NULL,
-                            int notificationFrames = 0,
+                            uint32_t notificationFrames = 0,
                             bool threadCanCallJava = false,
-                            int sessionId = 0,
+                            int sessionId = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
                             audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE);
 
-    /* Result of constructing the AudioRecord. This must be checked
+    /* Result of constructing the AudioRecord. This must be checked for successful initialization
      * before using any AudioRecord API (except for set()), because using
      * an uninitialized AudioRecord produces undefined results.
      * See set() method above for possible return codes.
@@ -221,7 +229,7 @@
             status_t    start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
                               int triggerSession = 0);
 
-    /* Stop a track. If set, the callback will cease being called.  Note that obtainBuffer() still
+    /* Stop a track.  The callback will cease being called.  Note that obtainBuffer() still
      * works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK.
      */
             void        stop();
@@ -236,7 +244,7 @@
      * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition
      * with marker == 0 cancels marker notification callback.
      * To set a marker at a position which would compute as 0,
-     * a workaround is to the set the marker at a nearby position such as ~0 or 1.
+     * a workaround is to set the marker at a nearby position such as ~0 or 1.
      * If the AudioRecord has been opened with no callback function associated,
      * the operation will fail.
      *
@@ -378,8 +386,10 @@
      * returning the current value by this function call.  Such loss typically occurs when the
      * user space process is blocked longer than the capacity of audio driver buffers.
      * Units: the number of input audio frames.
+     * FIXME The side-effect of resetting the counter may be incompatible with multi-client.
+     * Consider making it more like AudioTrack::getUnderrunFrames which doesn't have side effects.
      */
-            unsigned int  getInputFramesLost() const;
+            uint32_t    getInputFramesLost() const;
 
 private:
     /* copying audio record objects is not allowed */
@@ -412,6 +422,7 @@
         bool                mPaused;    // whether thread is requested to pause at next loop entry
         bool                mPausedInt; // whether thread internally requests pause
         nsecs_t             mPausedNs;  // if mPausedInt then associated timeout, otherwise ignored
+        bool                mIgnoreNextPausedInt;   // whether to ignore next mPausedInt request
     };
 
             // body of AudioRecordThread::threadLoop()
@@ -422,9 +433,10 @@
             //      NS_INACTIVE inactive so don't run again until re-started
             //      NS_NEVER    never again
             static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
-            nsecs_t processAudioBuffer(const sp<AudioRecordThread>& thread);
+            nsecs_t processAudioBuffer();
 
             // caller must hold lock on mLock for all _l methods
+
             status_t openRecord_l(size_t epoch);
 
             // FIXME enum is faster than strcmp() for parameter 'from'
@@ -446,12 +458,13 @@
                                                     // notification callback
     uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                     // notification callback
-    bool                    mRefreshRemaining;  // processAudioBuffer() should refresh next 2
+    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
+                                                    // mRemainingFrames and mRetryOnPartialBuffer
 
     // These are private to processAudioBuffer(), and are not protected by a lock
     uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
     bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
-    int                     mObservedSequence;      // last observed value of mSequence
+    uint32_t                mObservedSequence;      // last observed value of mSequence
 
     uint32_t                mMarkerPosition;    // in wrapping (overflow) frame units
     bool                    mMarkerReached;
@@ -460,9 +473,13 @@
 
     status_t                mStatus;
 
+    size_t                  mFrameCount;            // corresponds to current IAudioRecord, value is
+                                                    // reported back by AudioFlinger to the client
+    size_t                  mReqFrameCount;         // frame count to request the first or next time
+                                                    // a new IAudioRecord is needed, non-decreasing
+
     // constant after constructor or set()
     uint32_t                mSampleRate;
-    size_t                  mFrameCount;
     audio_format_t          mFormat;
     uint32_t                mChannelCount;
     size_t                  mFrameSize;         // app-level frame size == AudioFlinger frame size
@@ -473,12 +490,11 @@
     int                     mSessionId;
     transfer_type           mTransfer;
 
-    audio_io_handle_t       mInput;             // returned by AudioSystem::getInput()
-
-    // may be changed if IAudioRecord object is re-created
+    // Next 4 fields may be changed if IAudioRecord is re-created, but always != 0
     sp<IAudioRecord>        mAudioRecord;
     sp<IMemory>             mCblkMemory;
     audio_track_cblk_t*     mCblk;              // re-load after mLock.unlock()
+    audio_io_handle_t       mInput;             // returned by AudioSystem::getInput()
 
     int                     mPreviousPriority;  // before start()
     SchedPolicy             mPreviousSchedulingGroup;
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 4c22412..402b479 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -67,20 +67,24 @@
 
     // returns true in *state if tracks are active on the specified stream or have been active
     // in the past inPastMs milliseconds
-    static status_t isStreamActive(audio_stream_type_t stream, bool *state, uint32_t inPastMs = 0);
+    static status_t isStreamActive(audio_stream_type_t stream, bool *state, uint32_t inPastMs);
     // returns true in *state if tracks are active for what qualifies as remote playback
     // on the specified stream or have been active in the past inPastMs milliseconds. Remote
     // playback isn't mutually exclusive with local playback.
     static status_t isStreamActiveRemotely(audio_stream_type_t stream, bool *state,
-            uint32_t inPastMs = 0);
+            uint32_t inPastMs);
     // returns true in *state if a recorder is currently recording with the specified source
     static status_t isSourceActive(audio_source_t source, bool *state);
 
     // set/get audio hardware parameters. The function accepts a list of parameters
     // key value pairs in the form: key1=value1;key2=value2;...
     // Some keys are reserved for standard parameters (See AudioParameter class).
+    // The versions with audio_io_handle_t are intended for internal media framework use only.
     static status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs);
     static String8  getParameters(audio_io_handle_t ioHandle, const String8& keys);
+    // The versions without audio_io_handle_t are intended for JNI.
+    static status_t setParameters(const String8& keyValuePairs);
+    static String8  getParameters(const String8& keys);
 
     static void setErrorCallback(audio_error_callback cb);
 
@@ -90,12 +94,14 @@
     static float linearToLog(int volume);
     static int logToLinear(float volume);
 
+    // Returned samplingRate and frameCount output values are guaranteed
+    // to be non-zero if status == NO_ERROR
     static status_t getOutputSamplingRate(uint32_t* samplingRate,
-            audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+            audio_stream_type_t stream);
     static status_t getOutputFrameCount(size_t* frameCount,
-            audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+            audio_stream_type_t stream);
     static status_t getOutputLatency(uint32_t* latency,
-            audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+            audio_stream_type_t stream);
     static status_t getSamplingRate(audio_io_handle_t output,
                                           audio_stream_type_t streamType,
                                           uint32_t* samplingRate);
@@ -107,19 +113,18 @@
     // returns the audio output stream latency in ms. Corresponds to
     // audio_stream_out->get_latency()
     static status_t getLatency(audio_io_handle_t output,
-                               audio_stream_type_t stream,
                                uint32_t* latency);
 
     static bool routedToA2dpOutput(audio_stream_type_t streamType);
 
+    // return status NO_ERROR implies *buffSize > 0
     static status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
         audio_channel_mask_t channelMask, size_t* buffSize);
 
     static status_t setVoiceVolume(float volume);
 
     // return the number of audio frames written by AudioFlinger to audio HAL and
-    // audio dsp to DAC since the output on which the specified stream is playing
-    // has exited standby.
+    // audio dsp to DAC since the specified output I/O handle has exited standby.
     // returned status (from utils/Errors.h) can be:
     // - NO_ERROR: successful operation, halFrames and dspFrames point to valid data
     // - INVALID_OPERATION: Not supported on current hardware platform
@@ -128,15 +133,20 @@
     // necessary to check returned status before using the returned values.
     static status_t getRenderPosition(audio_io_handle_t output,
                                       uint32_t *halFrames,
-                                      uint32_t *dspFrames,
-                                      audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+                                      uint32_t *dspFrames);
 
     // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid
-    static size_t getInputFramesLost(audio_io_handle_t ioHandle);
+    static uint32_t getInputFramesLost(audio_io_handle_t ioHandle);
 
+    // Allocate a new audio session ID and return that new ID.
+    // If unable to contact AudioFlinger, returns AUDIO_SESSION_ALLOCATE instead.
+    // FIXME If AudioFlinger were to ever exhaust the session ID namespace,
+    //       this method could fail by returning either AUDIO_SESSION_ALLOCATE
+    //       or an unspecified existing session ID.
     static int newAudioSessionId();
-    static void acquireAudioSessionId(int audioSession);
-    static void releaseAudioSessionId(int audioSession);
+
+    static void acquireAudioSessionId(int audioSession, pid_t pid);
+    static void releaseAudioSessionId(int audioSession, pid_t pid);
 
     // types of io configuration change events received with ioConfigChanged()
     enum io_config_event {
@@ -155,7 +165,8 @@
     class OutputDescriptor {
     public:
         OutputDescriptor()
-        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0)  {}
+        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0)
+            {}
 
         uint32_t samplingRate;
         audio_format_t format;
@@ -193,24 +204,32 @@
     static status_t setPhoneState(audio_mode_t state);
     static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
     static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
+
+    // Client must successfully hand off the handle reference to AudioFlinger via createTrack(),
+    // or release it with releaseOutput().
     static audio_io_handle_t getOutput(audio_stream_type_t stream,
                                         uint32_t samplingRate = 0,
                                         audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
                                         audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                         const audio_offload_info_t *offloadInfo = NULL);
+
     static status_t startOutput(audio_io_handle_t output,
                                 audio_stream_type_t stream,
-                                int session = 0);
+                                int session);
     static status_t stopOutput(audio_io_handle_t output,
                                audio_stream_type_t stream,
-                               int session = 0);
+                               int session);
     static void releaseOutput(audio_io_handle_t output);
+
+    // Client must successfully hand off the handle reference to AudioFlinger via openRecord(),
+    // or release it with releaseInput().
     static audio_io_handle_t getInput(audio_source_t inputSource,
-                                    uint32_t samplingRate = 0,
-                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                    audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO,
-                                    int sessionId = 0);
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    int sessionId);
+
     static status_t startInput(audio_io_handle_t input);
     static status_t stopInput(audio_io_handle_t input);
     static void releaseInput(audio_io_handle_t input);
@@ -302,8 +321,6 @@
 
     static sp<IAudioPolicyService> gAudioPolicyService;
 
-    // mapping between stream types and outputs
-    static DefaultKeyedVector<audio_stream_type_t, audio_io_handle_t> gStreamOutputMap;
     // list of output descriptors containing cached parameters
     // (sampling rate, framecount, channel count...)
     static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h
index c29c7e5..99e9c3e 100644
--- a/include/media/AudioTimestamp.h
+++ b/include/media/AudioTimestamp.h
@@ -19,6 +19,8 @@
 
 #include <time.h>
 
+namespace android {
+
 class AudioTimestamp {
 public:
     AudioTimestamp() : mPosition(0) {
@@ -30,4 +32,6 @@
     struct timespec mTime;     // corresponding CLOCK_MONOTONIC when frame is expected to present
 };
 
+}   // namespace
+
 #endif  // ANDROID_AUDIO_TIMESTAMP_H
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 4736369..2c48bbf 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -27,7 +27,7 @@
 
 // ----------------------------------------------------------------------------
 
-class audio_track_cblk_t;
+struct audio_track_cblk_t;
 class AudioTrackClientProxy;
 class StaticAudioTrackClientProxy;
 
@@ -36,11 +36,6 @@
 class AudioTrack : public RefBase
 {
 public:
-    enum channel_index {
-        MONO   = 0,
-        LEFT   = 0,
-        RIGHT  = 1
-    };
 
     /* Events used by AudioTrack callback function (callback_t).
      * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
@@ -82,6 +77,7 @@
                                   // (currently ignored, but will make the primary field in future)
 
         size_t      size;         // input/output in bytes == frameCount * frameSize
+                                  // on input it is unused
                                   // on output is the number of bytes actually filled
                                   // FIXME this is redundant with respect to frameCount,
                                   // and TRANSFER_OBTAIN mode is broken for 8-bit data
@@ -91,7 +87,7 @@
             void*       raw;
             short*      i16;      // signed 16-bit
             int8_t*     i8;       // unsigned 8-bit, offset by 0x80
-        };
+        };                        // input: unused, output: pointer to buffer
     };
 
     /* As a convenience, if a callback is supplied, a handler thread
@@ -123,6 +119,8 @@
      *  - NO_ERROR: successful operation
      *  - NO_INIT: audio server or audio hardware not initialized
      *  - BAD_VALUE: unsupported configuration
+     * frameCount is guaranteed to be non-zero if status is NO_ERROR,
+     * and is undefined otherwise.
      */
 
     static status_t getMinFrameCount(size_t* frameCount,
@@ -158,7 +156,7 @@
      * sampleRate:         Data source sampling rate in Hz.
      * format:             Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
      *                     16 bits per sample).
-     * channelMask:        Channel mask.
+     * channelMask:        Channel mask, such that audio_is_output_channel(channelMask) is true.
      * frameCount:         Minimum size of track PCM buffer in frames. This defines the
      *                     application's contribution to the
      *                     latency of the track. The actual size selected by the AudioTrack could be
@@ -180,15 +178,16 @@
                                     uint32_t sampleRate,
                                     audio_format_t format,
                                     audio_channel_mask_t,
-                                    int frameCount       = 0,
+                                    size_t frameCount    = 0,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                     callback_t cbf       = NULL,
                                     void* user           = NULL,
-                                    int notificationFrames = 0,
-                                    int sessionId        = 0,
+                                    uint32_t notificationFrames = 0,
+                                    int sessionId        = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     const audio_offload_info_t *offloadInfo = NULL,
-                                    int uid = -1);
+                                    int uid = -1,
+                                    pid_t pid = -1);
 
     /* Creates an audio track and registers it with AudioFlinger.
      * With this constructor, the track is configured for static buffer mode.
@@ -209,11 +208,12 @@
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                     callback_t cbf      = NULL,
                                     void* user          = NULL,
-                                    int notificationFrames = 0,
-                                    int sessionId       = 0,
+                                    uint32_t notificationFrames = 0,
+                                    int sessionId       = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     const audio_offload_info_t *offloadInfo = NULL,
-                                    int uid = -1);
+                                    int uid = -1,
+                                    pid_t pid = -1);
 
     /* Terminates the AudioTrack and unregisters it from AudioFlinger.
      * Also destroys all resources associated with the AudioTrack.
@@ -241,17 +241,18 @@
                             uint32_t sampleRate,
                             audio_format_t format,
                             audio_channel_mask_t channelMask,
-                            int frameCount      = 0,
+                            size_t frameCount   = 0,
                             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                             callback_t cbf      = NULL,
                             void* user          = NULL,
-                            int notificationFrames = 0,
+                            uint32_t notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
-                            int sessionId       = 0,
+                            int sessionId       = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
                             const audio_offload_info_t *offloadInfo = NULL,
-                            int uid = -1);
+                            int uid = -1,
+                            pid_t pid = -1);
 
     /* Result of constructing the AudioTrack. This must be checked for successful initialization
      * before using any AudioTrack API (except for set()), because using
@@ -279,7 +280,7 @@
             size_t      frameSize() const   { return mFrameSize; }
 
             uint32_t    channelCount() const { return mChannelCount; }
-            uint32_t    frameCount() const  { return mFrameCount; }
+            size_t      frameCount() const  { return mFrameCount; }
 
     /* Return the static buffer specified in constructor or set(), or 0 for streaming mode */
             sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
@@ -336,7 +337,7 @@
      */
             status_t    setSampleRate(uint32_t sampleRate);
 
-    /* Return current source sample rate in Hz, or 0 if unknown */
+    /* Return current source sample rate in Hz */
             uint32_t    getSampleRate() const;
 
     /* Enables looping and sets the start and end points of looping.
@@ -361,7 +362,7 @@
     /* Sets marker position. When playback reaches the number of frames specified, a callback with
      * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
      * notification callback.  To set a marker at a position which would compute as 0,
-     * a workaround is to the set the marker at a nearby position such as ~0 or 1.
+     * a workaround is to set the marker at a nearby position such as ~0 or 1.
      * If the AudioTrack has been opened with no callback function associated, the operation will
      * fail.
      *
@@ -450,9 +451,10 @@
      *  none.
      *
      * Returned value:
-     *  handle on audio hardware output
+     *  handle on audio hardware output, or AUDIO_IO_HANDLE_NONE if the
+     *  track needed to be re-created but that failed
      */
-            audio_io_handle_t    getOutput();
+            audio_io_handle_t    getOutput() const;
 
     /* Returns the unique session ID associated with this track.
      *
@@ -528,15 +530,6 @@
                                      struct timespec *elapsed = NULL, size_t *nonContig = NULL);
 public:
 
-//EL_FIXME to be reconciled with new obtainBuffer() return codes and control block proxy
-//            enum {
-//            NO_MORE_BUFFERS = 0x80000001,   // same name in AudioFlinger.h, ok to be different value
-//            TEAR_DOWN       = 0x80000002,
-//            STOPPED = 1,
-//            STREAM_END_WAIT,
-//            STREAM_END
-//        };
-
     /* Release a filled buffer of "audioBuffer->frameCount" frames for AudioFlinger to process. */
     // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
             void        releaseBuffer(Buffer* audioBuffer);
@@ -551,8 +544,11 @@
      *      WOULD_BLOCK         when obtainBuffer() returns same, or
      *                          AudioTrack was stopped during the write
      *      or any other error code returned by IAudioTrack::start() or restoreTrack_l().
+     * Default behavior is to only return until all data has been transferred. Set 'blocking' to
+     * false for the method to return immediately without waiting to try multiple times to write
+     * the full content of the buffer.
      */
-            ssize_t     write(const void* buffer, size_t size);
+            ssize_t     write(const void* buffer, size_t size, bool blocking = true);
 
     /*
      * Dumps the state of an audio track.
@@ -566,7 +562,7 @@
             uint32_t    getUnderrunFrames() const;
 
     /* Get the flags */
-            audio_output_flags_t getFlags() const { return mFlags; }
+            audio_output_flags_t getFlags() const { AutoMutex _l(mLock); return mFlags; }
 
     /* Set parameters - only possible when using direct output */
             status_t    setParameters(const String8& keyValuePairs);
@@ -626,53 +622,50 @@
             //      NS_INACTIVE inactive so don't run again until re-started
             //      NS_NEVER    never again
             static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
-            nsecs_t processAudioBuffer(const sp<AudioTrackThread>& thread);
-            status_t processStreamEnd(int32_t waitCount);
+            nsecs_t processAudioBuffer();
 
+            bool     isOffloaded() const;
 
             // caller must hold lock on mLock for all _l methods
 
-            status_t createTrack_l(audio_stream_type_t streamType,
-                                 uint32_t sampleRate,
-                                 audio_format_t format,
-                                 size_t frameCount,
-                                 audio_output_flags_t flags,
-                                 const sp<IMemory>& sharedBuffer,
-                                 audio_io_handle_t output,
-                                 size_t epoch);
+            status_t createTrack_l(size_t epoch);
 
             // can only be called when mState != STATE_ACTIVE
             void flush_l();
 
             void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
-            audio_io_handle_t getOutput_l();
 
             // FIXME enum is faster than strcmp() for parameter 'from'
             status_t restoreTrack_l(const char *from);
 
-            bool     isOffloaded() const
+            bool     isOffloaded_l() const
                 { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
 
-    // Next 3 fields may be changed if IAudioTrack is re-created, but always != 0
+    // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
     audio_track_cblk_t*     mCblk;                  // re-load after mLock.unlock()
+    audio_io_handle_t       mOutput;                // returned by AudioSystem::getOutput()
 
     sp<AudioTrackThread>    mAudioTrackThread;
+
     float                   mVolume[2];
     float                   mSendLevel;
     mutable uint32_t        mSampleRate;            // mutable because getSampleRate() can update it.
-    size_t                  mFrameCount;            // corresponds to current IAudioTrack
-    size_t                  mReqFrameCount;         // frame count to request the next time a new
-                                                    // IAudioTrack is needed
-
+    size_t                  mFrameCount;            // corresponds to current IAudioTrack, value is
+                                                    // reported back by AudioFlinger to the client
+    size_t                  mReqFrameCount;         // frame count to request the first or next time
+                                                    // a new IAudioTrack is needed, non-decreasing
 
     // constant after constructor or set()
     audio_format_t          mFormat;                // as requested by client, not forced to 16-bit
     audio_stream_type_t     mStreamType;
     uint32_t                mChannelCount;
     audio_channel_mask_t    mChannelMask;
+    sp<IMemory>             mSharedBuffer;
     transfer_type           mTransfer;
+    audio_offload_info_t    mOffloadInfoCopy;
+    const audio_offload_info_t* mOffloadInfo;
 
     // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data.  For 8-bit PCM data, it's
     // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer.
@@ -705,21 +698,25 @@
     uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                     // notification callback,
                                                     // at initial source sample rate
-    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh next 2
+    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
+                                                    // mRemainingFrames and mRetryOnPartialBuffer
 
     // These are private to processAudioBuffer(), and are not protected by a lock
     uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
     bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
     uint32_t                mObservedSequence;      // last observed value of mSequence
 
-    sp<IMemory>             mSharedBuffer;
     uint32_t                mLoopPeriod;            // in frames, zero means looping is disabled
+
     uint32_t                mMarkerPosition;        // in wrapping (overflow) frame units
     bool                    mMarkerReached;
     uint32_t                mNewPosition;           // in frames
     uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
 
     audio_output_flags_t    mFlags;
+        // const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD.
+        // mLock must be held to read or write those bits reliably.
+
     int                     mSessionId;
     int                     mAuxEffectId;
 
@@ -739,7 +736,6 @@
     sp<AudioTrackClientProxy>       mProxy;         // primary owner of the memory
 
     bool                    mInUnderrun;            // whether track is currently in underrun state
-    String8                 mName;                  // server's name for this IAudioTrack
     uint32_t                mPausedPosition;
 
 private:
@@ -754,8 +750,8 @@
 
     sp<DeathNotifier>       mDeathNotifier;
     uint32_t                mSequence;              // incremented for each new IAudioTrack attempt
-    audio_io_handle_t       mOutput;                // cached output io handle
     int                     mClientUid;
+    pid_t                   mClientPid;
 };
 
 class TimedAudioTrack : public AudioTrack
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 282f275..9101f06 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -64,25 +64,27 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
-                                size_t frameCount,
+                                size_t *pFrameCount,
                                 track_flags_t *flags,
                                 const sp<IMemory>& sharedBuffer,
+                                // On successful return, AudioFlinger takes over the handle
+                                // reference and will release it when the track is destroyed.
+                                // However on failure, the client is responsible for release.
                                 audio_io_handle_t output,
                                 pid_t tid,  // -1 means unused, otherwise must be valid non-0
                                 int *sessionId,
-                                // input: ignored
-                                // output: server's description of IAudioTrack for display in logs.
-                                // Don't attempt to parse, as the format could change.
-                                String8& name,
                                 int clientUid,
                                 status_t *status) = 0;
 
     virtual sp<IAudioRecord> openRecord(
+                                // On successful return, AudioFlinger takes over the handle
+                                // reference and will release it when the track is destroyed.
+                                // However on failure, the client is responsible for release.
                                 audio_io_handle_t input,
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
-                                size_t frameCount,
+                                size_t *pFrameCount,
                                 track_flags_t *flags,
                                 pid_t tid,  // -1 means unused, otherwise must be valid non-0
                                 int *sessionId,
@@ -163,7 +165,7 @@
                                         audio_channel_mask_t *pChannelMask) = 0;
     virtual status_t closeInput(audio_io_handle_t input) = 0;
 
-    virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output) = 0;
+    virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
 
     virtual status_t setVoiceVolume(float volume) = 0;
 
@@ -174,8 +176,8 @@
 
     virtual int newAudioSessionId() = 0;
 
-    virtual void acquireAudioSessionId(int audioSession) = 0;
-    virtual void releaseAudioSessionId(int audioSession) = 0;
+    virtual void acquireAudioSessionId(int audioSession, pid_t pid) = 0;
+    virtual void releaseAudioSessionId(int audioSession, pid_t pid) = 0;
 
     virtual status_t queryNumberEffects(uint32_t *numEffects) const = 0;
 
@@ -188,6 +190,7 @@
                                     effect_descriptor_t *pDesc,
                                     const sp<IEffectClient>& client,
                                     int32_t priority,
+                                    // AudioFlinger doesn't take over handle reference from client
                                     audio_io_handle_t output,
                                     int sessionId,
                                     status_t *status,
diff --git a/include/media/IMediaHTTPConnection.h b/include/media/IMediaHTTPConnection.h
new file mode 100644
index 0000000..2a63eb7
--- /dev/null
+++ b/include/media/IMediaHTTPConnection.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef I_MEDIA_HTTP_CONNECTION_H_
+
+#define I_MEDIA_HTTP_CONNECTION_H_
+
+#include <binder/IInterface.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+
+struct IMediaHTTPConnection;
+
+/** MUST stay in sync with IMediaHTTPConnection.aidl */
+
+struct IMediaHTTPConnection : public IInterface {
+    DECLARE_META_INTERFACE(MediaHTTPConnection);
+
+    virtual bool connect(
+            const char *uri, const KeyedVector<String8, String8> *headers) = 0;
+
+    virtual void disconnect() = 0;
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
+    virtual off64_t getSize() = 0;
+    virtual status_t getMIMEType(String8 *mimeType) = 0;
+    virtual status_t getUri(String8 *uri) = 0;
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(IMediaHTTPConnection);
+};
+
+}  // namespace android
+
+#endif  // I_MEDIA_HTTP_CONNECTION_H_
diff --git a/include/media/IMediaHTTPService.h b/include/media/IMediaHTTPService.h
new file mode 100644
index 0000000..f66d6c8
--- /dev/null
+++ b/include/media/IMediaHTTPService.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef I_MEDIA_HTTP_SERVICE_H_
+
+#define I_MEDIA_HTTP_SERVICE_H_
+
+#include <binder/IInterface.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct IMediaHTTPConnection;
+
+/** MUST stay in sync with IMediaHTTPService.aidl */
+
+struct IMediaHTTPService : public IInterface {
+    DECLARE_META_INTERFACE(MediaHTTPService);
+
+    virtual sp<IMediaHTTPConnection> makeHTTPConnection() = 0;
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(IMediaHTTPService);
+};
+
+}  // namespace android
+
+#endif  // I_MEDIA_HTTP_SERVICE_H_
diff --git a/include/media/IMediaMetadataRetriever.h b/include/media/IMediaMetadataRetriever.h
index 6dbb2d7..2529800 100644
--- a/include/media/IMediaMetadataRetriever.h
+++ b/include/media/IMediaMetadataRetriever.h
@@ -26,6 +26,8 @@
 
 namespace android {
 
+struct IMediaHTTPService;
+
 class IMediaMetadataRetriever: public IInterface
 {
 public:
@@ -33,6 +35,7 @@
     virtual void            disconnect() = 0;
 
     virtual status_t        setDataSource(
+            const sp<IMediaHTTPService> &httpService,
             const char *srcUrl,
             const KeyedVector<String8, String8> *headers = NULL) = 0;
 
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index 0cbd269..db62cd5 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -33,6 +33,7 @@
 class Surface;
 class IStreamSource;
 class IGraphicBufferProducer;
+struct IMediaHTTPService;
 
 class IMediaPlayer: public IInterface
 {
@@ -41,8 +42,11 @@
 
     virtual void            disconnect() = 0;
 
-    virtual status_t        setDataSource(const char *url,
-                                    const KeyedVector<String8, String8>* headers) = 0;
+    virtual status_t        setDataSource(
+            const sp<IMediaHTTPService> &httpService,
+            const char *url,
+            const KeyedVector<String8, String8>* headers) = 0;
+
     virtual status_t        setDataSource(int fd, int64_t offset, int64_t length) = 0;
     virtual status_t        setDataSource(const sp<IStreamSource>& source) = 0;
     virtual status_t        setVideoSurfaceTexture(
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index 2998b37..5b45376 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -34,6 +34,7 @@
 struct ICrypto;
 struct IDrm;
 struct IHDCP;
+struct IMediaHTTPService;
 class IMediaRecorder;
 class IOMX;
 class IRemoteDisplay;
@@ -49,9 +50,14 @@
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
     virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0;
 
-    virtual status_t         decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
-                                    audio_format_t* pFormat,
-                                    const sp<IMemoryHeap>& heap, size_t *pSize) = 0;
+    virtual status_t         decode(
+            const sp<IMediaHTTPService> &httpService,
+            const char* url,
+            uint32_t *pSampleRate,
+            int* pNumChannels,
+            audio_format_t* pFormat,
+            const sp<IMemoryHeap>& heap, size_t *pSize) = 0;
+
     virtual status_t         decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate,
                                     int* pNumChannels, audio_format_t* pFormat,
                                     const sp<IMemoryHeap>& heap, size_t *pSize) = 0;
@@ -93,9 +99,6 @@
 
     virtual void addBatteryData(uint32_t params) = 0;
     virtual status_t pullBatteryData(Parcel* reply) = 0;
-
-    virtual status_t updateProxyConfig(
-            const char *host, int32_t port, const char *exclusionList) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 6643736..f6f9e7a 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -143,6 +143,8 @@
         INTERNAL_OPTION_SUSPEND,  // data is a bool
         INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY,  // data is an int64_t
         INTERNAL_OPTION_MAX_TIMESTAMP_GAP, // data is int64_t
+        INTERNAL_OPTION_START_TIME, // data is an int64_t
+        INTERNAL_OPTION_TIME_LAPSE, // data is an int64_t[2]
     };
     virtual status_t setInternalOption(
             node_id node,
diff --git a/include/media/MediaMetadataRetrieverInterface.h b/include/media/MediaMetadataRetrieverInterface.h
index ecc3b65..bb6b97b 100644
--- a/include/media/MediaMetadataRetrieverInterface.h
+++ b/include/media/MediaMetadataRetrieverInterface.h
@@ -24,6 +24,8 @@
 
 namespace android {
 
+struct IMediaHTTPService;
+
 // Abstract base class
 class MediaMetadataRetrieverBase : public RefBase
 {
@@ -32,6 +34,7 @@
     virtual             ~MediaMetadataRetrieverBase() {}
 
     virtual status_t    setDataSource(
+            const sp<IMediaHTTPService> &httpService,
             const char *url,
             const KeyedVector<String8, String8> *headers = NULL) = 0;
 
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 26d8729..87717da 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -137,6 +137,7 @@
     }
 
     virtual status_t    setDataSource(
+            const sp<IMediaHTTPService> &httpService,
             const char *url,
             const KeyedVector<String8, String8> *headers = NULL) = 0;
 
@@ -213,11 +214,6 @@
         return INVALID_OPERATION;
     }
 
-    virtual status_t updateProxyConfig(
-            const char *host, int32_t port, const char *exclusionList) {
-        return INVALID_OPERATION;
-    }
-
 private:
     friend class MediaPlayerService;
 
diff --git a/include/media/mediametadataretriever.h b/include/media/mediametadataretriever.h
index 0df77c1..b35cf32 100644
--- a/include/media/mediametadataretriever.h
+++ b/include/media/mediametadataretriever.h
@@ -25,6 +25,7 @@
 
 namespace android {
 
+struct IMediaHTTPService;
 class IMediaPlayerService;
 class IMediaMetadataRetriever;
 
@@ -68,6 +69,7 @@
     void disconnect();
 
     status_t setDataSource(
+            const sp<IMediaHTTPService> &httpService,
             const char *dataSourceUrl,
             const KeyedVector<String8, String8> *headers = NULL);
 
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 4c05fc3..3ca3095 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -189,6 +189,8 @@
     virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) = 0;
 };
 
+struct IMediaHTTPService;
+
 class MediaPlayer : public BnMediaPlayerClient,
                     public virtual IMediaDeathNotifier
 {
@@ -199,6 +201,7 @@
             void            disconnect();
 
             status_t        setDataSource(
+                    const sp<IMediaHTTPService> &httpService,
                     const char *url,
                     const KeyedVector<String8, String8> *headers);
 
@@ -220,13 +223,19 @@
             status_t        getDuration(int *msec);
             status_t        reset();
             status_t        setAudioStreamType(audio_stream_type_t type);
+            status_t        getAudioStreamType(audio_stream_type_t *type);
             status_t        setLooping(int loop);
             bool            isLooping();
             status_t        setVolume(float leftVolume, float rightVolume);
             void            notify(int msg, int ext1, int ext2, const Parcel *obj = NULL);
-    static  status_t        decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
-                                   audio_format_t* pFormat,
-                                   const sp<IMemoryHeap>& heap, size_t *pSize);
+    static  status_t        decode(
+            const sp<IMediaHTTPService> &httpService,
+            const char* url,
+            uint32_t *pSampleRate,
+            int* pNumChannels,
+            audio_format_t* pFormat,
+            const sp<IMemoryHeap>& heap,
+            size_t *pSize);
     static  status_t        decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate,
                                    int* pNumChannels, audio_format_t* pFormat,
                                    const sp<IMemoryHeap>& heap, size_t *pSize);
@@ -242,9 +251,6 @@
             status_t        setRetransmitEndpoint(const char* addrString, uint16_t port);
             status_t        setNextMediaPlayer(const sp<MediaPlayer>& player);
 
-            status_t updateProxyConfig(
-                    const char *host, int32_t port, const char *exclusionList);
-
 private:
             void            clear_l();
             status_t        seekTo_l(int msec);
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 88a42a0..142cb90 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -39,7 +39,7 @@
 enum video_source {
     VIDEO_SOURCE_DEFAULT = 0,
     VIDEO_SOURCE_CAMERA = 1,
-    VIDEO_SOURCE_GRALLOC_BUFFER = 2,
+    VIDEO_SOURCE_SURFACE = 2,
 
     VIDEO_SOURCE_LIST_END  // must be last - used to validate audio source type
 };
diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h
index a73403b..4537679 100644
--- a/include/media/mediascanner.h
+++ b/include/media/mediascanner.h
@@ -21,6 +21,7 @@
 #include <utils/threads.h>
 #include <utils/List.h>
 #include <utils/Errors.h>
+#include <utils/String8.h>
 #include <pthread.h>
 
 struct dirent;
@@ -29,6 +30,7 @@
 
 class MediaScannerClient;
 class StringArray;
+class CharacterEncodingDetector;
 
 enum MediaScanResult {
     // This file or directory was scanned successfully.
@@ -94,15 +96,9 @@
     virtual status_t setMimeType(const char* mimeType) = 0;
 
 protected:
-    void convertValues(uint32_t encoding);
-
-protected:
-    // cached name and value strings, for native encoding support.
-    StringArray*    mNames;
-    StringArray*    mValues;
-
-    // default encoding based on MediaScanner::mLocale string
-    uint32_t        mLocaleEncoding;
+    // default encoding from MediaScanner::mLocale
+    String8 mLocale;
+    CharacterEncodingDetector *mEncodingDetector;
 };
 
 }; // namespace android
diff --git a/include/media/nbaio/AudioBufferProviderSource.h b/include/media/nbaio/AudioBufferProviderSource.h
index 2c4aaff..b16e20a 100644
--- a/include/media/nbaio/AudioBufferProviderSource.h
+++ b/include/media/nbaio/AudioBufferProviderSource.h
@@ -27,7 +27,7 @@
 class AudioBufferProviderSource : public NBAIO_Source {
 
 public:
-    AudioBufferProviderSource(AudioBufferProvider *provider, NBAIO_Format format);
+    AudioBufferProviderSource(AudioBufferProvider *provider, const NBAIO_Format& format);
     virtual ~AudioBufferProviderSource();
 
     // NBAIO_Port interface
diff --git a/include/media/nbaio/AudioStreamInSource.h b/include/media/nbaio/AudioStreamInSource.h
index 07d8c89..eaea63c 100644
--- a/include/media/nbaio/AudioStreamInSource.h
+++ b/include/media/nbaio/AudioStreamInSource.h
@@ -43,7 +43,7 @@
 
     // This is an over-estimate, and could dupe the caller into making a blocking read()
     // FIXME Use an audio HAL API to query the buffer filling status when it's available.
-    virtual ssize_t availableToRead() { return mStreamBufferSizeBytes >> mBitShift; }
+    virtual ssize_t availableToRead() { return mStreamBufferSizeBytes / mFrameSize; }
 
     virtual ssize_t read(void *buffer, size_t count);
 
diff --git a/include/media/nbaio/AudioStreamOutSink.h b/include/media/nbaio/AudioStreamOutSink.h
index 7948d40..9949b88 100644
--- a/include/media/nbaio/AudioStreamOutSink.h
+++ b/include/media/nbaio/AudioStreamOutSink.h
@@ -43,7 +43,7 @@
 
     // This is an over-estimate, and could dupe the caller into making a blocking write()
     // FIXME Use an audio HAL API to query the buffer emptying status when it's available.
-    virtual ssize_t availableToWrite() const { return mStreamBufferSizeBytes >> mBitShift; }
+    virtual ssize_t availableToWrite() const { return mStreamBufferSizeBytes / mFrameSize; }
 
     virtual ssize_t write(const void *buffer, size_t count);
 
diff --git a/include/media/nbaio/MonoPipe.h b/include/media/nbaio/MonoPipe.h
index d3802fe..b09b35f 100644
--- a/include/media/nbaio/MonoPipe.h
+++ b/include/media/nbaio/MonoPipe.h
@@ -41,7 +41,7 @@
     // Note: whatever shares this object with another thread needs to do so in an SMP-safe way (like
     // creating it the object before creating the other thread, or storing the object with a
     // release_store). Otherwise the other thread could see a partially-constructed object.
-    MonoPipe(size_t reqFrames, NBAIO_Format format, bool writeCanBlock = false);
+    MonoPipe(size_t reqFrames, const NBAIO_Format& format, bool writeCanBlock = false);
     virtual ~MonoPipe();
 
     // NBAIO_Port interface
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index 1da0c73..be0c15b 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -29,6 +29,7 @@
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <media/AudioTimestamp.h>
+#include <system/audio.h>
 
 namespace android {
 
@@ -52,31 +53,41 @@
 // the combinations that are actually needed within AudioFlinger.  If the list of combinations grows
 // too large, then this decision should be re-visited.
 // Sample rate and channel count are explicit, PCM interleaved 16-bit is assumed.
-typedef unsigned NBAIO_Format;
-enum {
-    Format_Invalid
+struct NBAIO_Format {
+// FIXME make this a class, and change Format_... global methods to class methods
+//private:
+    unsigned    mSampleRate;
+    unsigned    mChannelCount;
+    audio_format_t  mFormat;
+    size_t      mFrameSize;
 };
 
-// Return the frame size of an NBAIO_Format in bytes
-size_t Format_frameSize(NBAIO_Format format);
+extern const NBAIO_Format Format_Invalid;
 
-// Return the frame size of an NBAIO_Format as a bit shift
-size_t Format_frameBitShift(NBAIO_Format format);
+// Return the frame size of an NBAIO_Format in bytes
+size_t Format_frameSize(const NBAIO_Format& format);
 
 // Convert a sample rate in Hz and channel count to an NBAIO_Format
-NBAIO_Format Format_from_SR_C(unsigned sampleRate, unsigned channelCount);
+// FIXME rename
+NBAIO_Format Format_from_SR_C(unsigned sampleRate, unsigned channelCount, audio_format_t format);
 
 // Return the sample rate in Hz of an NBAIO_Format
-unsigned Format_sampleRate(NBAIO_Format format);
+unsigned Format_sampleRate(const NBAIO_Format& format);
 
 // Return the channel count of an NBAIO_Format
-unsigned Format_channelCount(NBAIO_Format format);
+unsigned Format_channelCount(const NBAIO_Format& format);
 
 // Callbacks used by NBAIO_Sink::writeVia() and NBAIO_Source::readVia() below.
 typedef ssize_t (*writeVia_t)(void *user, void *buffer, size_t count);
 typedef ssize_t (*readVia_t)(void *user, const void *buffer,
                              size_t count, int64_t readPTS);
 
+// Check whether an NBAIO_Format is valid
+bool Format_isValid(const NBAIO_Format& format);
+
+// Compare two NBAIO_Format values
+bool Format_isEqual(const NBAIO_Format& format1, const NBAIO_Format& format2);
+
 // Abstract class (interface) representing a data port.
 class NBAIO_Port : public RefBase {
 
@@ -115,15 +126,15 @@
     virtual NBAIO_Format format() const { return mNegotiated ? mFormat : Format_Invalid; }
 
 protected:
-    NBAIO_Port(NBAIO_Format format) : mNegotiated(false), mFormat(format),
-                                      mBitShift(Format_frameBitShift(format)) { }
+    NBAIO_Port(const NBAIO_Format& format) : mNegotiated(false), mFormat(format),
+                                             mFrameSize(Format_frameSize(format)) { }
     virtual ~NBAIO_Port() { }
 
     // Implementations are free to ignore these if they don't need them
 
     bool            mNegotiated;    // mNegotiated implies (mFormat != Format_Invalid)
     NBAIO_Format    mFormat;        // (mFormat != Format_Invalid) does not imply mNegotiated
-    size_t          mBitShift;      // assign in parallel with any assignment to mFormat
+    size_t          mFrameSize;     // assign in parallel with any assignment to mFormat
 };
 
 // Abstract class (interface) representing a non-blocking data sink, for use by a data provider.
@@ -220,7 +231,7 @@
     virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; }
 
 protected:
-    NBAIO_Sink(NBAIO_Format format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0) { }
+    NBAIO_Sink(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0) { }
     virtual ~NBAIO_Sink() { }
 
     // Implementations are free to ignore these if they don't need them
@@ -311,7 +322,7 @@
     virtual void    onTimestamp(const AudioTimestamp& timestamp) { }
 
 protected:
-    NBAIO_Source(NBAIO_Format format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0) { }
+    NBAIO_Source(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0) { }
     virtual ~NBAIO_Source() { }
 
     // Implementations are free to ignore these if they don't need them
diff --git a/include/media/nbaio/Pipe.h b/include/media/nbaio/Pipe.h
index 79a4eee..c784129 100644
--- a/include/media/nbaio/Pipe.h
+++ b/include/media/nbaio/Pipe.h
@@ -30,7 +30,7 @@
 
 public:
     // maxFrames will be rounded up to a power of 2, and all slots are available. Must be >= 2.
-    Pipe(size_t maxFrames, NBAIO_Format format);
+    Pipe(size_t maxFrames, const NBAIO_Format& format);
     virtual ~Pipe();
 
     // NBAIO_Port interface
diff --git a/include/media/nbaio/SourceAudioBufferProvider.h b/include/media/nbaio/SourceAudioBufferProvider.h
index cdfb6fe..daf6bc3 100644
--- a/include/media/nbaio/SourceAudioBufferProvider.h
+++ b/include/media/nbaio/SourceAudioBufferProvider.h
@@ -41,7 +41,7 @@
 
 private:
     const sp<NBAIO_Source> mSource;     // the wrapped source
-    /*const*/ size_t    mFrameBitShift; // log2(frame size in bytes)
+    /*const*/ size_t    mFrameSize; // frame size in bytes
     void*               mAllocated; // pointer to base of allocated memory
     size_t              mSize;      // size of mAllocated in frames
     size_t              mOffset;    // frame offset within mAllocated of valid data
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 7ba5acc..8ec7f1c 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -67,8 +67,6 @@
 
     void signalRequestIDRFrame();
 
-    bool isConfiguredForAdaptivePlayback() { return mIsConfiguredForAdaptivePlayback; }
-
     struct PortDescription : public RefBase {
         size_t countBuffers();
         IOMX::buffer_id bufferIDAt(size_t index) const;
@@ -178,6 +176,8 @@
     sp<MemoryDealer> mDealer[2];
 
     sp<ANativeWindow> mNativeWindow;
+    sp<AMessage> mInputFormat;
+    sp<AMessage> mOutputFormat;
 
     Vector<BufferInfo> mBuffers[2];
     bool mPortEOS[2];
@@ -189,7 +189,7 @@
     bool mIsEncoder;
     bool mUseMetadataOnEncoderOutput;
     bool mShutdownInProgress;
-    bool mIsConfiguredForAdaptivePlayback;
+    bool mExplicitShutdown;
 
     // If "mKeepComponentAllocated" we only transition back to Loaded state
     // and do not release the component instance.
@@ -203,10 +203,16 @@
     unsigned mDequeueCounter;
     bool mStoreMetaDataInOutputBuffers;
     int32_t mMetaDataBuffersToSubmit;
+    size_t mNumUndequeuedBuffers;
 
     int64_t mRepeatFrameDelayUs;
     int64_t mMaxPtsGapUs;
 
+    int64_t mTimePerFrameUs;
+    int64_t mTimePerCaptureUs;
+
+    bool mCreateInputBuffersSuspended;
+
     status_t setCyclicIntraMacroblockRefresh(const sp<AMessage> &msg, int32_t mode);
     status_t allocateBuffersOnPort(OMX_U32 portIndex);
     status_t freeBuffersOnPort(OMX_U32 portIndex);
@@ -300,6 +306,7 @@
     void processDeferredMessages();
 
     void sendFormatChange(const sp<AMessage> &reply);
+    status_t getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify);
 
     void signalError(
             OMX_ERRORTYPE error = OMX_ErrorUndefined,
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index a829916..dd0a106 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -172,7 +172,7 @@
                  const sp<IGraphicBufferProducer>& surface,
                  bool storeMetaDataInVideoBuffers);
 
-    virtual void startCameraRecording();
+    virtual status_t startCameraRecording();
     virtual void releaseRecordingFrame(const sp<IMemory>& frame);
 
     // Returns true if need to skip the current frame.
@@ -185,6 +185,8 @@
     virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
             const sp<IMemory> &data);
 
+    void releaseCamera();
+
 private:
     friend class CameraSourceListener;
 
@@ -233,7 +235,6 @@
                     int32_t frameRate);
 
     void stopCameraRecording();
-    void releaseCamera();
     status_t reset();
 
     CameraSource(const CameraSource &);
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 157b1aa..f8787dd 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -31,6 +31,7 @@
 namespace android {
 
 struct AMessage;
+struct IMediaHTTPService;
 class String8;
 
 class DataSource : public RefBase {
@@ -43,6 +44,7 @@
     };
 
     static sp<DataSource> CreateFromURI(
+            const sp<IMediaHTTPService> &httpService,
             const char *uri,
             const KeyedVector<String8, String8> *headers = NULL);
 
diff --git a/include/media/stagefright/DataURISource.h b/include/media/stagefright/DataURISource.h
new file mode 100644
index 0000000..693562e
--- /dev/null
+++ b/include/media/stagefright/DataURISource.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DATA_URI_SOURCE_H_
+
+#define DATA_URI_SOURCE_H_
+
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct ABuffer;
+
+struct DataURISource : public DataSource {
+    static sp<DataURISource> Create(const char *uri);
+
+    virtual status_t initCheck() const;
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+    virtual status_t getSize(off64_t *size);
+
+protected:
+    virtual ~DataURISource();
+
+private:
+    sp<ABuffer> mBuffer;
+
+    DataURISource(const sp<ABuffer> &buffer);
+
+    DISALLOW_EVIL_CONSTRUCTORS(DataURISource);
+};
+
+}  // namespace android
+
+#endif  // DATA_URI_SOURCE_H_
+
diff --git a/include/media/stagefright/FileSource.h b/include/media/stagefright/FileSource.h
index be152e7..a981d1c 100644
--- a/include/media/stagefright/FileSource.h
+++ b/include/media/stagefright/FileSource.h
@@ -30,6 +30,7 @@
 class FileSource : public DataSource {
 public:
     FileSource(const char *filename);
+    // FileSource takes ownership and will close the fd
     FileSource(int fd, int64_t offset, int64_t length);
 
     virtual status_t initCheck() const;
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 76aa503..276543b 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -106,6 +106,7 @@
     status_t signalEndOfInputStream();
 
     status_t getOutputFormat(sp<AMessage> *format) const;
+    status_t getInputFormat(sp<AMessage> *format) const;
 
     status_t getInputBuffers(Vector<sp<ABuffer> > *buffers) const;
     status_t getOutputBuffers(Vector<sp<ABuffer> > *buffers) const;
@@ -159,6 +160,7 @@
         kWhatGetBuffers                     = 'getB',
         kWhatFlush                          = 'flus',
         kWhatGetOutputFormat                = 'getO',
+        kWhatGetInputFormat                 = 'getI',
         kWhatDequeueInputTimedOut           = 'dITO',
         kWhatDequeueOutputTimedOut          = 'dOTO',
         kWhatCodecNotify                    = 'codc',
@@ -199,6 +201,7 @@
     sp<Surface> mNativeWindow;
     SoftwareRenderer *mSoftRenderer;
     sp<AMessage> mOutputFormat;
+    sp<AMessage> mInputFormat;
 
     List<size_t> mAvailPortBuffers[2];
     Vector<BufferInfo> mPortBuffers[2];
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index 590623b..01a5daf 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -60,6 +60,7 @@
         SECTION_DECODER,
         SECTION_ENCODERS,
         SECTION_ENCODER,
+        SECTION_INCLUDE,
     };
 
     struct CodecInfo {
@@ -73,7 +74,9 @@
 
     status_t mInitCheck;
     Section mCurrentSection;
+    Vector<Section> mPastSections;
     int32_t mDepth;
+    AString mHrefBase;
 
     Vector<CodecInfo> mCodecInfos;
     KeyedVector<AString, size_t> mCodecQuirks;
@@ -83,7 +86,8 @@
     ~MediaCodecList();
 
     status_t initCheck() const;
-    void parseXMLFile(FILE *file);
+    void parseXMLFile(const char *path);
+    void parseTopLevelXMLFile(const char *path);
 
     static void StartElementHandlerWrapper(
             void *me, const char *name, const char **attrs);
@@ -93,6 +97,7 @@
     void startElementHandler(const char *name, const char **attrs);
     void endElementHandler(const char *name);
 
+    status_t includeXMLFile(const char **attrs);
     status_t addMediaCodecFromAttributes(bool encoder, const char **attrs);
     void addMediaCodec(bool encoder, const char *name, const char *type = NULL);
 
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
new file mode 100644
index 0000000..4b18a0b
--- /dev/null
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MediaCodecSource_H_
+#define MediaCodecSource_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AHandlerReflector.h>
+#include <media/stagefright/MediaSource.h>
+
+namespace android {
+
+class ALooper;
+class AMessage;
+class IGraphicBufferProducer;
+class MediaCodec;
+class MetaData;
+
+struct MediaCodecSource : public MediaSource,
+                          public MediaBufferObserver {
+    enum FlagBits {
+        FLAG_USE_SURFACE_INPUT      = 1,
+        FLAG_USE_METADATA_INPUT     = 2,
+    };
+
+    static sp<MediaCodecSource> Create(
+            const sp<ALooper> &looper,
+            const sp<AMessage> &format,
+            const sp<MediaSource> &source,
+            uint32_t flags = 0);
+
+    bool isVideo() const { return mIsVideo; }
+    sp<IGraphicBufferProducer> getGraphicBufferProducer();
+
+    // MediaSource
+    virtual status_t start(MetaData *params = NULL);
+    virtual status_t stop();
+    virtual status_t pause();
+    virtual sp<MetaData> getFormat() { return mMeta; }
+    virtual status_t read(
+            MediaBuffer **buffer,
+            const ReadOptions *options = NULL);
+
+    // MediaBufferObserver
+    virtual void signalBufferReturned(MediaBuffer *buffer);
+
+    // for AHandlerReflector
+    void onMessageReceived(const sp<AMessage> &msg);
+
+protected:
+    virtual ~MediaCodecSource();
+
+private:
+    struct Puller;
+
+    enum {
+        kWhatPullerNotify,
+        kWhatEncoderActivity,
+        kWhatStart,
+        kWhatStop,
+        kWhatPause,
+    };
+
+    MediaCodecSource(
+            const sp<ALooper> &looper,
+            const sp<AMessage> &outputFormat,
+            const sp<MediaSource> &source,
+            uint32_t flags = 0);
+
+    status_t onStart(MetaData *params);
+    status_t init();
+    status_t initEncoder();
+    void releaseEncoder();
+    status_t feedEncoderInputBuffers();
+    void scheduleDoMoreWork();
+    status_t doMoreWork();
+    void suspend();
+    void resume(int64_t skipFramesBeforeUs = -1ll);
+    void signalEOS(status_t err = ERROR_END_OF_STREAM);
+    bool reachedEOS();
+    status_t postSynchronouslyAndReturnError(const sp<AMessage> &msg);
+
+    sp<ALooper> mLooper;
+    sp<ALooper> mCodecLooper;
+    sp<AHandlerReflector<MediaCodecSource> > mReflector;
+    sp<AMessage> mOutputFormat;
+    sp<MetaData> mMeta;
+    sp<Puller> mPuller;
+    sp<MediaCodec> mEncoder;
+    uint32_t mFlags;
+    List<uint32_t> mStopReplyIDQueue;
+    bool mIsVideo;
+    bool mStarted;
+    bool mStopping;
+    bool mDoMoreWorkPending;
+    bool mPullerReachedEOS;
+    sp<AMessage> mEncoderActivityNotify;
+    sp<IGraphicBufferProducer> mGraphicBufferProducer;
+    Vector<sp<ABuffer> > mEncoderInputBuffers;
+    Vector<sp<ABuffer> > mEncoderOutputBuffers;
+    List<MediaBuffer *> mInputBufferQueue;
+    List<size_t> mAvailEncoderInputIndices;
+    List<int64_t> mDecodingTimeQueue; // decoding time (us) for video
+
+    // audio drift time
+    int64_t mFirstSampleTimeUs;
+    List<int64_t> mDriftTimeQueue;
+
+    // following variables are protected by mOutputBufferLock
+    Mutex mOutputBufferLock;
+    Condition mOutputBufferCond;
+    List<MediaBuffer*> mOutputBufferQueue;
+    bool mEncodedReachedEOS;
+    status_t mErrorCode;
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaCodecSource);
+};
+
+} // namespace android
+
+#endif /* MediaCodecSource_H_ */
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index cf5beda..678d642 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -38,6 +38,7 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
 extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
 extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
+extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
 extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
 extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
 extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
diff --git a/include/media/stagefright/MediaHTTP.h b/include/media/stagefright/MediaHTTP.h
new file mode 100644
index 0000000..006d8d8
--- /dev/null
+++ b/include/media/stagefright/MediaHTTP.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_HTTP_H_
+
+#define MEDIA_HTTP_H_
+
+#include <media/stagefright/foundation/AString.h>
+
+#include "include/HTTPBase.h"
+
+namespace android {
+
+struct IMediaHTTPConnection;
+
+struct MediaHTTP : public HTTPBase {
+    MediaHTTP(const sp<IMediaHTTPConnection> &conn);
+
+    virtual status_t connect(
+            const char *uri,
+            const KeyedVector<String8, String8> *headers,
+            off64_t offset);
+
+    virtual void disconnect();
+
+    virtual status_t initCheck() const;
+
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+
+    virtual status_t getSize(off64_t *size);
+
+    virtual uint32_t flags();
+
+    virtual status_t reconnectAtOffset(off64_t offset);
+
+protected:
+    virtual ~MediaHTTP();
+
+    virtual sp<DecryptHandle> DrmInitialization(const char* mime);
+    virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
+    virtual String8 getUri();
+    virtual String8 getMIMEType() const;
+
+private:
+    status_t mInitCheck;
+    sp<IMediaHTTPConnection> mHTTPConnection;
+
+    KeyedVector<String8, String8> mLastHeaders;
+    AString mLastURI;
+
+    bool mCachedSizeValid;
+    off64_t mCachedSize;
+
+    sp<DecryptHandle> mDecryptHandle;
+    DrmManagerClient *mDrmManagerClient;
+
+    void clearDRMState_l();
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaHTTP);
+};
+
+}  // namespace android
+
+#endif  // MEDIA_HTTP_H_
diff --git a/include/media/stagefright/MediaMuxer.h b/include/media/stagefright/MediaMuxer.h
index ff6a66e..bbe4303 100644
--- a/include/media/stagefright/MediaMuxer.h
+++ b/include/media/stagefright/MediaMuxer.h
@@ -30,7 +30,7 @@
 struct MediaBuffer;
 struct MediaSource;
 struct MetaData;
-struct MPEG4Writer;
+struct MediaWriter;
 
 // MediaMuxer is used to mux multiple tracks into a video. Currently, we only
 // support a mp4 file as the output.
@@ -44,6 +44,7 @@
     // OutputFormat is updated.
     enum OutputFormat {
         OUTPUT_FORMAT_MPEG_4 = 0,
+        OUTPUT_FORMAT_WEBM   = 1,
         OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
     };
 
@@ -115,7 +116,8 @@
                              int64_t timeUs, uint32_t flags) ;
 
 private:
-    sp<MPEG4Writer> mWriter;
+    const OutputFormat mFormat;
+    sp<MediaWriter> mWriter;
     Vector< sp<MediaAdapter> > mTrackList;  // Each track has its MediaAdapter.
     sp<MetaData> mFileMeta;  // Metadata for the whole file.
 
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index db8216b..e862ec3 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -56,6 +56,9 @@
     kKeyD263              = 'd263',  // raw data
     kKeyVorbisInfo        = 'vinf',  // raw data
     kKeyVorbisBooks       = 'vboo',  // raw data
+    kKeyOpusHeader        = 'ohdr',  // raw data
+    kKeyOpusCodecDelay    = 'ocod',  // uint64_t (codec delay in ns)
+    kKeyOpusSeekPreRoll   = 'ospr',  // uint64_t (seek preroll in ns)
     kKeyWantsNALFragments = 'NALf',
     kKeyIsSyncFrame       = 'sync',  // int32_t (bool)
     kKeyIsCodecConfig     = 'conf',  // int32_t (bool)
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
index 5ae6f6b..402e7f8 100644
--- a/include/media/stagefright/NuMediaExtractor.h
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -31,6 +31,7 @@
 struct ABuffer;
 struct AMessage;
 struct DataSource;
+struct IMediaHTTPService;
 struct MediaBuffer;
 struct MediaExtractor;
 struct MediaSource;
@@ -45,6 +46,7 @@
     NuMediaExtractor();
 
     status_t setDataSource(
+            const sp<IMediaHTTPService> &httpService,
             const char *path,
             const KeyedVector<String8, String8> *headers = NULL);
 
diff --git a/include/media/stagefright/SkipCutBuffer.h b/include/media/stagefright/SkipCutBuffer.h
index 2653b53..098aa69 100644
--- a/include/media/stagefright/SkipCutBuffer.h
+++ b/include/media/stagefright/SkipCutBuffer.h
@@ -47,6 +47,7 @@
  private:
     void write(const char *src, size_t num);
     size_t read(char *dst, size_t num);
+    int32_t mSkip;
     int32_t mFrontPadding;
     int32_t mBackPadding;
     int32_t mWriteHead;
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index db5f947..43b75fd 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -111,7 +111,7 @@
     // pass metadata through the buffers. Currently, it is force set to true
     bool isMetaDataStoredInVideoBuffers() const;
 
-    sp<BufferQueue> getBufferQueue() const { return mBufferQueue; }
+    sp<IGraphicBufferProducer> getProducer() const { return mProducer; }
 
     // To be called before start()
     status_t setMaxAcquiredBufferCount(size_t count);
@@ -139,12 +139,17 @@
     // frames is separate than the one calling stop.
     virtual void onBuffersReleased();
 
+    // SurfaceMediaSource can't handle sideband streams, so this is not expected
+    // to ever be called. Does nothing.
+    virtual void onSidebandStreamChanged();
+
     static bool isExternalFormat(uint32_t format);
 
 private:
-    // mBufferQueue is the exchange point between the producer and
-    // this consumer
-    sp<BufferQueue> mBufferQueue;
+    // A BufferQueue, represented by these interfaces, is the exchange point
+    // between the producer and this consumer
+    sp<IGraphicBufferProducer> mProducer;
+    sp<IGraphicBufferConsumer> mConsumer;
 
     struct SlotData {
         sp<GraphicBuffer> mGraphicBuffer;
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index bbad271..c85368f 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -60,6 +60,8 @@
 bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo,
                       bool isStreaming, audio_stream_type_t streamType);
 
+AString uriDebugString(const AString &uri, bool incognito = false);
+
 }  // namespace android
 
 #endif  // UTILS_H_
diff --git a/include/media/stagefright/foundation/AString.h b/include/media/stagefright/foundation/AString.h
index 0f8f1e1..622028e 100644
--- a/include/media/stagefright/foundation/AString.h
+++ b/include/media/stagefright/foundation/AString.h
@@ -22,10 +22,13 @@
 
 namespace android {
 
+struct String8;
+
 struct AString {
     AString();
     AString(const char *s);
     AString(const char *s, size_t size);
+    AString(const String8 &from);
     AString(const AString &from);
     AString(const AString &from, size_t offset, size_t n);
     ~AString();
diff --git a/include/media/stagefright/timedtext/TimedTextDriver.h b/include/media/stagefright/timedtext/TimedTextDriver.h
index f23c337..37ef674 100644
--- a/include/media/stagefright/timedtext/TimedTextDriver.h
+++ b/include/media/stagefright/timedtext/TimedTextDriver.h
@@ -25,6 +25,7 @@
 namespace android {
 
 class ALooper;
+struct IMediaHTTPService;
 class MediaPlayerBase;
 class MediaSource;
 class Parcel;
@@ -34,7 +35,9 @@
 
 class TimedTextDriver {
 public:
-    TimedTextDriver(const wp<MediaPlayerBase> &listener);
+    TimedTextDriver(
+            const wp<MediaPlayerBase> &listener,
+            const sp<IMediaHTTPService> &httpService);
 
     ~TimedTextDriver();
 
@@ -77,6 +80,7 @@
     sp<ALooper> mLooper;
     sp<TimedTextPlayer> mPlayer;
     wp<MediaPlayerBase> mListener;
+    sp<IMediaHTTPService> mHTTPService;
 
     // Variables to be guarded by mLock.
     State mState;
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 2d033e6..3901e79 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -48,7 +48,7 @@
 #define CBLK_STREAM_END_DONE 0x400 // set by server on render completion, cleared by client
 
 //EL_FIXME 20 seconds may not be enough and must be reconciled with new obtainBuffer implementation
-#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 //assuming upto a maximum of 20 seconds of offloaded
+#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 // assuming up to a maximum of 20 seconds of offloaded
 
 struct AudioTrackSharedStreaming {
     // similar to NBAIO MonoPipe
@@ -98,11 +98,7 @@
                                         // The value should be used "for entertainment purposes only",
                                         // which means don't make important decisions based on it.
 
-                size_t      frameCount_;    // used during creation to pass actual track buffer size
-                                            // from AudioFlinger to client, and not referenced again
-                                            // FIXME remove here and replace by createTrack() in/out
-                                            // parameter
-                                            // renamed to "_" to detect incorrect use
+                uint32_t    mPad1;      // unused
 
     volatile    int32_t     mFutex;     // event flag: down (P) by client,
                                         // up (V) by server or binderDied() or interrupt()
diff --git a/libvideoeditor/Android.mk b/libvideoeditor/Android.mk
deleted file mode 100755
index 5053e7d..0000000
--- a/libvideoeditor/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/lvpp/Android.mk b/libvideoeditor/lvpp/Android.mk
deleted file mode 100755
index 06c2e6a..0000000
--- a/libvideoeditor/lvpp/Android.mk
+++ /dev/null
@@ -1,104 +0,0 @@
-#
-# Copyright (C) 2011 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH:= $(call my-dir)
-
-#
-# libvideoeditorplayer
-#
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE:= libvideoeditorplayer
-
-LOCAL_SRC_FILES:=          \
-    VideoEditorTools.cpp \
-    VideoEditorPlayer.cpp \
-    PreviewPlayer.cpp \
-    VideoEditorAudioPlayer.cpp \
-    VideoEditorPreviewController.cpp \
-    VideoEditorSRC.cpp \
-    DummyAudioSource.cpp \
-    DummyVideoSource.cpp \
-    VideoEditorBGAudioProcessing.cpp \
-    PreviewRenderer.cpp \
-    I420ColorConverter.cpp \
-    NativeWindowRenderer.cpp
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_STATIC_LIBRARIES := \
-    libstagefright_color_conversion
-
-
-
-LOCAL_SHARED_LIBRARIES :=     \
-    libaudioresampler         \
-    libaudioutils             \
-    libbinder                 \
-    libcutils                 \
-    liblog                    \
-    libEGL                    \
-    libGLESv2                 \
-    libgui                    \
-    libmedia                  \
-    libdrmframework           \
-    libstagefright            \
-    libstagefright_foundation \
-    libstagefright_omx        \
-    libsync                   \
-    libui                     \
-    libutils                  \
-    libvideoeditor_osal       \
-
-
-LOCAL_C_INCLUDES += \
-    $(TOP)/system/media/audio_utils/include \
-    $(TOP)/frameworks/av/media/libmediaplayerservice \
-    $(TOP)/frameworks/av/media/libstagefright \
-    $(TOP)/frameworks/av/media/libstagefright/include \
-    $(TOP)/frameworks/av/media/libstagefright/rtsp \
-    $(call include-path-for, corecg graphics) \
-    $(TOP)/frameworks/av/libvideoeditor/osal/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/common/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/mcs/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/stagefrightshells/inc \
-    $(TOP)/frameworks/av/libvideoeditor/lvpp \
-    $(TOP)/frameworks/av/services/audioflinger \
-    $(TOP)/frameworks/native/include/media/editor \
-    $(TOP)/frameworks/native/include/media/openmax \
-
-
-LOCAL_SHARED_LIBRARIES += libdl
-
-# All of the shared libraries we link against.
-LOCAL_LDLIBS := \
-    -lpthread -ldl
-
-LOCAL_CFLAGS += -Wno-multichar \
-     -DM4_ENABLE_RENDERINGMODE \
-    -DUSE_STAGEFRIGHT_CODECS \
-    -DUSE_STAGEFRIGHT_AUDIODEC \
-    -DUSE_STAGEFRIGHT_VIDEODEC \
-    -DUSE_STAGEFRIGHT_AUDIOENC \
-    -DUSE_STAGEFRIGHT_VIDEOENC \
-    -DUSE_STAGEFRIGHT_READERS \
-    -DUSE_STAGEFRIGHT_3GPP_READER
-
-include $(BUILD_SHARED_LIBRARY)
-
-#include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/libvideoeditor/lvpp/DummyAudioSource.cpp b/libvideoeditor/lvpp/DummyAudioSource.cpp
deleted file mode 100755
index dbcab68..0000000
--- a/libvideoeditor/lvpp/DummyAudioSource.cpp
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "DummyAudioSource"
-#include <utils/Log.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MetaData.h>
-#include "DummyAudioSource.h"
-
-
-namespace android {
-
-//static
-sp<DummyAudioSource> DummyAudioSource::Create(
-        int32_t samplingRate, int32_t channelCount,
-        int64_t frameDurationUs, int64_t audioDurationUs) {
-
-    ALOGV("Create ");
-    return new DummyAudioSource(samplingRate,
-                                channelCount,
-                                frameDurationUs,
-                                audioDurationUs);
-
-}
-
-DummyAudioSource::DummyAudioSource(
-        int32_t samplingRate, int32_t channelCount,
-        int64_t frameDurationUs, int64_t audioDurationUs)
-    : mSamplingRate(samplingRate),
-      mChannelCount(channelCount),
-      mFrameDurationUs(frameDurationUs),
-      mNumberOfSamplePerFrame(0),
-      mAudioDurationUs(audioDurationUs),
-      mTimeStampUs(0),
-      mBufferGroup(NULL) {
-
-    mNumberOfSamplePerFrame = (int32_t)
-            ((1L * mSamplingRate * mFrameDurationUs)/1000000);
-    mNumberOfSamplePerFrame = mNumberOfSamplePerFrame  * mChannelCount;
-
-    ALOGV("Constructor: E");
-    ALOGV("samplingRate = %d", samplingRate);
-    ALOGV("channelCount = %d", channelCount);
-    ALOGV("frameDurationUs = %lld", frameDurationUs);
-    ALOGV("audioDurationUs = %lld", audioDurationUs);
-    ALOGV("mNumberOfSamplePerFrame = %d", mNumberOfSamplePerFrame);
-    ALOGV("Constructor: X");
-}
-
-DummyAudioSource::~DummyAudioSource() {
-    /* Do nothing here? */
-    ALOGV("~DummyAudioSource");
-}
-
-void DummyAudioSource::setDuration(int64_t audioDurationUs) {
-    ALOGV("setDuration: %lld us added to %lld us",
-        audioDurationUs, mAudioDurationUs);
-
-    Mutex::Autolock autoLock(mLock);
-    mAudioDurationUs += audioDurationUs;
-}
-
-status_t DummyAudioSource::start(MetaData *params) {
-    ALOGV("start: E");
-    status_t err = OK;
-
-    mTimeStampUs = 0;
-
-    mBufferGroup = new MediaBufferGroup;
-    mBufferGroup->add_buffer(
-            new MediaBuffer(mNumberOfSamplePerFrame * sizeof(int16_t)));
-
-    ALOGV("start: X");
-
-    return err;
-}
-
-status_t DummyAudioSource::stop() {
-    ALOGV("stop");
-
-    delete mBufferGroup;
-    mBufferGroup = NULL;
-
-    return OK;
-}
-
-
-sp<MetaData> DummyAudioSource::getFormat() {
-    ALOGV("getFormat");
-
-    sp<MetaData> meta = new MetaData;
-    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
-    meta->setInt32(kKeyChannelCount, mChannelCount);
-    meta->setInt32(kKeySampleRate, mSamplingRate);
-    meta->setInt64(kKeyDuration, mFrameDurationUs);
-    meta->setCString(kKeyDecoderComponent, "DummyAudioSource");
-
-    return meta;
-}
-
-status_t DummyAudioSource::read(
-        MediaBuffer **out, const MediaSource::ReadOptions *options) {
-
-    ALOGV("read: E");
-
-    int64_t seekTimeUs;
-    ReadOptions::SeekMode mode;
-
-    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
-        CHECK(seekTimeUs >= 0);
-        mTimeStampUs = seekTimeUs;
-    }
-
-    {
-        Mutex::Autolock autoLock(mLock);
-        if (mTimeStampUs >= mAudioDurationUs) {
-            ALOGI("read: EOS reached %lld > %lld",
-                mTimeStampUs, mAudioDurationUs);
-
-            *out = NULL;
-            return ERROR_END_OF_STREAM;
-        }
-    }
-
-    MediaBuffer *buffer;
-    status_t err = mBufferGroup->acquire_buffer(&buffer);
-    if (err != OK) {
-        ALOGE("Failed to acquire buffer from mBufferGroup: %d", err);
-        return err;
-    }
-
-    memset((uint8_t *) buffer->data() + buffer->range_offset(),
-            0, mNumberOfSamplePerFrame << 1);
-    buffer->set_range(buffer->range_offset(), (mNumberOfSamplePerFrame << 1));
-    buffer->meta_data()->setInt64(kKeyTime, mTimeStampUs);
-
-    ALOGV("read: offset  = %d, size = %d, mTimeStampUs = %lld",
-             buffer->range_offset(), buffer->size(), mTimeStampUs);
-
-    mTimeStampUs = mTimeStampUs + mFrameDurationUs;
-    *out = buffer;
-
-    return OK;
-}
-
-}// namespace android
diff --git a/libvideoeditor/lvpp/DummyAudioSource.h b/libvideoeditor/lvpp/DummyAudioSource.h
deleted file mode 100755
index 5f25a8c..0000000
--- a/libvideoeditor/lvpp/DummyAudioSource.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DUMMY_AUDIOSOURCE_H_
-#define DUMMY_AUDIOSOURCE_H_
-
-#include <media/stagefright/MediaSource.h>
-
-
-namespace android {
-
-class MetaData;
-struct MediaBufferGroup;
-
-struct DummyAudioSource : public MediaSource {
-
-public:
-    static sp<DummyAudioSource> Create(
-                int32_t samplingRate, int32_t channelCount,
-                int64_t frameDurationUs, int64_t audioDurationUs);
-
-    virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-                MediaBuffer **buffer,
-                const MediaSource::ReadOptions *options = NULL);
-
-    void setDuration(int64_t audioDurationUs);
-
-protected:
-    virtual ~DummyAudioSource();
-
-private:
-    int32_t mSamplingRate;
-    int32_t mChannelCount;
-    int64_t mFrameDurationUs;
-    int32_t mNumberOfSamplePerFrame;
-    int64_t mAudioDurationUs;
-    int64_t mTimeStampUs;
-    Mutex mLock;
-
-    MediaBufferGroup *mBufferGroup;
-
-    DummyAudioSource(
-            int32_t samplingRate, int32_t channelCount,
-            int64_t frameDurationUs, int64_t audioDurationUs);
-
-    // Don't call me
-    DummyAudioSource(const DummyAudioSource &);
-    DummyAudioSource &operator=(const DummyAudioSource &);
-
-};
-
-}//namespace android
-
-
-#endif //DUMMY_AUDIOSOURCE_H_
-
diff --git a/libvideoeditor/lvpp/DummyVideoSource.cpp b/libvideoeditor/lvpp/DummyVideoSource.cpp
deleted file mode 100755
index 6dbcf2a..0000000
--- a/libvideoeditor/lvpp/DummyVideoSource.cpp
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "DummyVideoSource"
-#include <inttypes.h>
-#include <stdlib.h>
-#include <utils/Log.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MetaData.h>
-#include "VideoEditorTools.h"
-#include "DummyVideoSource.h"
-
-
-namespace android {
-
-sp<DummyVideoSource> DummyVideoSource::Create(
-        uint32_t width, uint32_t height,
-        uint64_t clipDuration, const char *imageUri) {
-
-    ALOGV("Create");
-    return new DummyVideoSource(
-                    width, height, clipDuration, imageUri);
-
-}
-
-
-DummyVideoSource::DummyVideoSource(
-        uint32_t width, uint32_t height,
-        uint64_t clipDuration, const char *imageUri) {
-
-    ALOGV("Constructor: E");
-
-    mFrameWidth = width;
-    mFrameHeight = height;
-    mImageClipDuration = clipDuration;
-    mUri = imageUri;
-    mImageBuffer = NULL;
-
-    ALOGV("%s", mUri);
-    ALOGV("Constructor: X");
-}
-
-
-DummyVideoSource::~DummyVideoSource() {
-    /* Do nothing here? */
-    ALOGV("~DummyVideoSource");
-}
-
-
-
-status_t DummyVideoSource::start(MetaData *params) {
-    ALOGV("start: E");
-
-    // Get the frame buffer from the rgb file, mUri,
-    // and store its content into a MediaBuffer
-    status_t err = LvGetImageThumbNail(
-                    (const char *)mUri,
-                    mFrameHeight, mFrameWidth,
-                    (M4OSA_Void **) &mImageBuffer);
-    if (err != OK) {
-        ALOGE("LvGetImageThumbNail failed: %d", err);
-        return err;
-    }
-
-    mIsFirstImageFrame = true;
-    mImageSeekTime = 0;
-    mImagePlayStartTime = 0;
-    mFrameTimeUs = 0;
-
-    ALOGV("start: X");
-    return OK;
-}
-
-
-status_t DummyVideoSource::stop() {
-    ALOGV("stop");
-    status_t err = OK;
-
-    if (mImageBuffer != NULL) {
-        free(mImageBuffer);
-        mImageBuffer = NULL;
-    }
-
-    return err;
-}
-
-
-sp<MetaData> DummyVideoSource::getFormat() {
-    ALOGV("getFormat");
-
-    sp<MetaData> meta = new MetaData;
-    meta->setInt32(kKeyColorFormat, OMX_COLOR_FormatYUV420Planar);
-    meta->setInt32(kKeyWidth, mFrameWidth);
-    meta->setInt32(kKeyHeight, mFrameHeight);
-    meta->setInt64(kKeyDuration, mImageClipDuration);
-    meta->setCString(kKeyDecoderComponent, "DummyVideoSource");
-
-    return meta;
-}
-
-status_t DummyVideoSource::read(
-        MediaBuffer **out,
-        const MediaSource::ReadOptions *options) {
-
-    ALOGV("read: E");
-
-    const int32_t kTimeScale = 1000;  /* time scale in ms */
-    bool seeking = false;
-    int64_t seekTimeUs;
-    ReadOptions::SeekMode seekMode;
-    if (options && options->getSeekTo(&seekTimeUs, &seekMode)) {
-        seeking = true;
-        mImageSeekTime = seekTimeUs;
-        M4OSA_clockGetTime(&mImagePlayStartTime, kTimeScale);
-    }
-
-    if ((mImageSeekTime == mImageClipDuration) ||
-        (mFrameTimeUs == (int64_t)mImageClipDuration)) {
-        ALOGV("read: EOS reached");
-        *out = NULL;
-        return ERROR_END_OF_STREAM;
-    }
-
-    status_t err = OK;
-    MediaBuffer *buffer = new MediaBuffer(
-            mImageBuffer, (mFrameWidth * mFrameHeight * 1.5));
-
-    // Set timestamp of buffer
-    if (mIsFirstImageFrame) {
-        M4OSA_clockGetTime(&mImagePlayStartTime, kTimeScale);
-        mFrameTimeUs =  (mImageSeekTime + 1);
-        ALOGV("read: jpg 1st frame timeUs = %lld, begin cut time = %" PRIu32,
-            mFrameTimeUs, mImageSeekTime);
-
-        mIsFirstImageFrame = false;
-    } else {
-        M4OSA_Time  currentTimeMs;
-        M4OSA_clockGetTime(&currentTimeMs, kTimeScale);
-
-        mFrameTimeUs = mImageSeekTime +
-            (currentTimeMs - mImagePlayStartTime) * 1000LL;
-
-        ALOGV("read: jpg frame timeUs = %lld", mFrameTimeUs);
-    }
-
-    buffer->meta_data()->setInt64(kKeyTime, mFrameTimeUs);
-    buffer->set_range(buffer->range_offset(),
-                mFrameWidth * mFrameHeight * 1.5);
-
-    *out = buffer;
-    return err;
-}
-
-}// namespace android
diff --git a/libvideoeditor/lvpp/DummyVideoSource.h b/libvideoeditor/lvpp/DummyVideoSource.h
deleted file mode 100755
index 16514f2..0000000
--- a/libvideoeditor/lvpp/DummyVideoSource.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DUMMY_VIDEOSOURCE_H_
-#define DUMMY_VIDEOSOURCE_H_
-
-#include <media/stagefright/MediaSource.h>
-#include "M4OSA_Clock.h"
-#include "M4OSA_Time.h"
-#include "M4OSA_Types.h"
-
-namespace android {
-
-class  MediaBuffer;
-class  MetaData;
-
-struct DummyVideoSource : public MediaSource {
-
-public:
-    static sp<DummyVideoSource> Create(
-                uint32_t width, uint32_t height,
-                uint64_t clipDuration, const char *imageUri);
-
-    virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-                MediaBuffer **buffer,
-                const MediaSource::ReadOptions *options = NULL);
-
-protected:
-    virtual ~DummyVideoSource();
-
-private:
-    uint32_t mFrameWidth;
-    uint32_t mFrameHeight;
-    uint64_t mImageClipDuration;
-    const char *mUri;
-    int64_t mFrameTimeUs;
-    bool mIsFirstImageFrame;
-    void *mImageBuffer;
-    M4OSA_Time mImagePlayStartTime;
-    uint32_t mImageSeekTime;
-
-    DummyVideoSource(
-            uint32_t width, uint32_t height,
-            uint64_t clipDuration, const char *imageUri);
-
-    // Don't call me
-    DummyVideoSource(const DummyVideoSource &);
-    DummyVideoSource &operator=(const DummyVideoSource &);
-
-};
-
-
-}//namespace android
-
-
-#endif //DUMMY_VIDEOSOURCE_H_
-
diff --git a/libvideoeditor/lvpp/I420ColorConverter.cpp b/libvideoeditor/lvpp/I420ColorConverter.cpp
deleted file mode 100755
index 321d3fe..0000000
--- a/libvideoeditor/lvpp/I420ColorConverter.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <I420ColorConverter.h>
-#include <cutils/log.h>
-#include <dlfcn.h>
-
-I420ColorConverter::I420ColorConverter() {
-    // Open the shared library
-    mHandle = dlopen("libI420colorconvert.so", RTLD_NOW);
-
-    if (mHandle == NULL) {
-        ALOGW("I420ColorConverter: cannot load libI420colorconvert.so");
-        return;
-    }
-
-    // Find the entry point
-    void (*getI420ColorConverter)(I420ColorConverter *converter) =
-        (void (*)(I420ColorConverter*)) dlsym(mHandle, "getI420ColorConverter");
-
-    if (getI420ColorConverter == NULL) {
-        ALOGW("I420ColorConverter: cannot load getI420ColorConverter");
-        dlclose(mHandle);
-        mHandle = NULL;
-        return;
-    }
-
-    // Fill the function pointers.
-    getI420ColorConverter(this);
-
-    ALOGI("I420ColorConverter: libI420colorconvert.so loaded");
-}
-
-bool I420ColorConverter::isLoaded() {
-    return mHandle != NULL;
-}
-
-I420ColorConverter::~I420ColorConverter() {
-    if (mHandle) {
-        dlclose(mHandle);
-    }
-}
diff --git a/libvideoeditor/lvpp/I420ColorConverter.h b/libvideoeditor/lvpp/I420ColorConverter.h
deleted file mode 100755
index 8d48e44..0000000
--- a/libvideoeditor/lvpp/I420ColorConverter.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef I420_COLOR_CONVERTER_H
-#define I420_COLOR_CONVERTER_H
-
-#include <II420ColorConverter.h>
-
-// This is a wrapper around the I420 color converter functions in
-// II420ColorConverter, which is loaded from a shared library.
-class I420ColorConverter: public II420ColorConverter {
-public:
-    I420ColorConverter();
-    ~I420ColorConverter();
-
-    // Returns true if the converter functions are successfully loaded.
-    bool isLoaded();
-private:
-    void* mHandle;
-};
-
-#endif /* I420_COLOR_CONVERTER_H */
diff --git a/libvideoeditor/lvpp/MODULE_LICENSE_APACHE2 b/libvideoeditor/lvpp/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/libvideoeditor/lvpp/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/libvideoeditor/lvpp/NOTICE b/libvideoeditor/lvpp/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/libvideoeditor/lvpp/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
-   Copyright (c) 2005-2008, The Android Open Source Project
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.cpp b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
deleted file mode 100755
index 8b362ef..0000000
--- a/libvideoeditor/lvpp/NativeWindowRenderer.cpp
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "NativeWindowRenderer"
-#include "NativeWindowRenderer.h"
-
-#include <GLES2/gl2.h>
-#include <GLES2/gl2ext.h>
-#include <cutils/log.h>
-#include <gui/GLConsumer.h>
-#include <gui/Surface.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include "VideoEditorTools.h"
-
-#define CHECK_EGL_ERROR CHECK(EGL_SUCCESS == eglGetError())
-#define CHECK_GL_ERROR CHECK(GLenum(GL_NO_ERROR) == glGetError())
-
-//
-// Vertex and fragment programs
-//
-
-// The matrix is derived from
-// frameworks/base/media/libstagefright/colorconversion/ColorConverter.cpp
-//
-// R * 255 = 1.164 * (Y - 16) + 1.596 * (V - 128)
-// G * 255 = 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128)
-// B * 255 = 1.164 * (Y - 16) + 2.018 * (U - 128)
-//
-// Here we assume YUV are in the range of [0,255], RGB are in the range of
-// [0, 1]
-#define RGB2YUV_MATRIX \
-"const mat4 rgb2yuv = mat4("\
-"    65.52255,   -37.79398,   111.98732,     0.00000,"\
-"   128.62729,   -74.19334,   -93.81088,     0.00000,"\
-"    24.92233,   111.98732,   -18.17644,     0.00000,"\
-"    16.00000,   128.00000,   128.00000,     1.00000);\n"
-
-#define YUV2RGB_MATRIX \
-"const mat4 yuv2rgb = mat4("\
-"   0.00456,   0.00456,   0.00456,   0.00000,"\
-"   0.00000,  -0.00153,   0.00791,   0.00000,"\
-"   0.00626,  -0.00319,   0.00000,   0.00000,"\
-"  -0.87416,   0.53133,  -1.08599,   1.00000);\n"
-
-static const char vSrcNormal[] =
-    "attribute vec4 vPosition;\n"
-    "attribute vec2 vTexPos;\n"
-    "uniform mat4 texMatrix;\n"
-    "varying vec2 texCoords;\n"
-    "varying float topDown;\n"
-    "void main() {\n"
-    "  gl_Position = vPosition;\n"
-    "  texCoords = (texMatrix * vec4(vTexPos, 0.0, 1.0)).xy;\n"
-    "  topDown = vTexPos.y;\n"
-    "}\n";
-
-static const char fSrcNormal[] =
-    "#extension GL_OES_EGL_image_external : require\n"
-    "precision mediump float;\n"
-    "uniform samplerExternalOES texSampler;\n"
-    "varying vec2 texCoords;\n"
-    "void main() {\n"
-    "  gl_FragColor = texture2D(texSampler, texCoords);\n"
-    "}\n";
-
-static const char fSrcSepia[] =
-    "#extension GL_OES_EGL_image_external : require\n"
-    "precision mediump float;\n"
-    "uniform samplerExternalOES texSampler;\n"
-    "varying vec2 texCoords;\n"
-    RGB2YUV_MATRIX
-    YUV2RGB_MATRIX
-    "void main() {\n"
-    "  vec4 rgb = texture2D(texSampler, texCoords);\n"
-    "  vec4 yuv = rgb2yuv * rgb;\n"
-    "  yuv = vec4(yuv.x, 117.0, 139.0, 1.0);\n"
-    "  gl_FragColor = yuv2rgb * yuv;\n"
-    "}\n";
-
-static const char fSrcNegative[] =
-    "#extension GL_OES_EGL_image_external : require\n"
-    "precision mediump float;\n"
-    "uniform samplerExternalOES texSampler;\n"
-    "varying vec2 texCoords;\n"
-    RGB2YUV_MATRIX
-    YUV2RGB_MATRIX
-    "void main() {\n"
-    "  vec4 rgb = texture2D(texSampler, texCoords);\n"
-    "  vec4 yuv = rgb2yuv * rgb;\n"
-    "  yuv = vec4(255.0 - yuv.x, yuv.y, yuv.z, 1.0);\n"
-    "  gl_FragColor = yuv2rgb * yuv;\n"
-    "}\n";
-
-static const char fSrcGradient[] =
-    "#extension GL_OES_EGL_image_external : require\n"
-    "precision mediump float;\n"
-    "uniform samplerExternalOES texSampler;\n"
-    "varying vec2 texCoords;\n"
-    "varying float topDown;\n"
-    RGB2YUV_MATRIX
-    YUV2RGB_MATRIX
-    "void main() {\n"
-    "  vec4 rgb = texture2D(texSampler, texCoords);\n"
-    "  vec4 yuv = rgb2yuv * rgb;\n"
-    "  vec4 mixin = vec4(15.0/31.0, 59.0/63.0, 31.0/31.0, 1.0);\n"
-    "  vec4 yuv2 = rgb2yuv * vec4((mixin.xyz * topDown), 1);\n"
-    "  yuv = vec4(yuv.x, yuv2.y, yuv2.z, 1);\n"
-    "  gl_FragColor = yuv2rgb * yuv;\n"
-    "}\n";
-
-namespace android {
-
-NativeWindowRenderer::NativeWindowRenderer(sp<ANativeWindow> nativeWindow,
-        int width, int height)
-    : mNativeWindow(nativeWindow)
-    , mDstWidth(width)
-    , mDstHeight(height)
-    , mLastVideoEffect(-1)
-    , mNextTextureId(100)
-    , mActiveInputs(0)
-    , mThreadCmd(CMD_IDLE) {
-    createThread(threadStart, this);
-}
-
-// The functions below run in the GL thread.
-//
-// All GL-related work is done in this thread, and other threads send
-// requests to this thread using a command code. We expect most of the
-// time there will only be one thread sending in requests, so we let
-// other threads wait until the request is finished by GL thread.
-
-int NativeWindowRenderer::threadStart(void* self) {
-    ALOGD("create thread");
-    ((NativeWindowRenderer*)self)->glThread();
-    return 0;
-}
-
-void NativeWindowRenderer::glThread() {
-    initializeEGL();
-    createPrograms();
-
-    Mutex::Autolock autoLock(mLock);
-    bool quit = false;
-    while (!quit) {
-        switch (mThreadCmd) {
-            case CMD_IDLE:
-                mCond.wait(mLock);
-                continue;
-            case CMD_RENDER_INPUT:
-                render(mThreadRenderInput);
-                break;
-            case CMD_RESERVE_TEXTURE:
-                glBindTexture(GL_TEXTURE_EXTERNAL_OES, mThreadTextureId);
-                CHECK_GL_ERROR;
-                break;
-            case CMD_DELETE_TEXTURE:
-                glDeleteTextures(1, &mThreadTextureId);
-                break;
-            case CMD_QUIT:
-                terminateEGL();
-                quit = true;
-                break;
-        }
-        // Tell the requester that the command is finished.
-        mThreadCmd = CMD_IDLE;
-        mCond.broadcast();
-    }
-    ALOGD("quit");
-}
-
-void NativeWindowRenderer::initializeEGL() {
-    mEglDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY);
-    CHECK_EGL_ERROR;
-
-    EGLint majorVersion;
-    EGLint minorVersion;
-    eglInitialize(mEglDisplay, &majorVersion, &minorVersion);
-    CHECK_EGL_ERROR;
-
-    EGLConfig config;
-    EGLint numConfigs = -1;
-    EGLint configAttribs[] = {
-        EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
-        EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
-        EGL_RED_SIZE, 8,
-        EGL_GREEN_SIZE, 8,
-        EGL_BLUE_SIZE, 8,
-        EGL_NONE
-    };
-    eglChooseConfig(mEglDisplay, configAttribs, &config, 1, &numConfigs);
-    CHECK_EGL_ERROR;
-
-    mEglSurface = eglCreateWindowSurface(mEglDisplay, config,
-        mNativeWindow.get(), NULL);
-    CHECK_EGL_ERROR;
-
-    EGLint contextAttribs[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE };
-    mEglContext = eglCreateContext(mEglDisplay, config, EGL_NO_CONTEXT,
-        contextAttribs);
-    CHECK_EGL_ERROR;
-
-    eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface, mEglContext);
-    CHECK_EGL_ERROR;
-}
-
-void NativeWindowRenderer::terminateEGL() {
-    eglDestroyContext(mEglDisplay, mEglContext);
-    eglDestroySurface(mEglDisplay, mEglSurface);
-    eglMakeCurrent(mEglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
-    eglTerminate(mEglDisplay);
-}
-
-void NativeWindowRenderer::createPrograms() {
-    GLuint vShader;
-    loadShader(GL_VERTEX_SHADER, vSrcNormal, &vShader);
-
-    const char* fSrc[NUMBER_OF_EFFECTS] = {
-        fSrcNormal, fSrcSepia, fSrcNegative, fSrcGradient
-    };
-
-    for (int i = 0; i < NUMBER_OF_EFFECTS; i++) {
-        GLuint fShader;
-        loadShader(GL_FRAGMENT_SHADER, fSrc[i], &fShader);
-        createProgram(vShader, fShader, &mProgram[i]);
-        glDeleteShader(fShader);
-        CHECK_GL_ERROR;
-    }
-
-    glDeleteShader(vShader);
-    CHECK_GL_ERROR;
-}
-
-void NativeWindowRenderer::createProgram(
-    GLuint vertexShader, GLuint fragmentShader, GLuint* outPgm) {
-
-    GLuint program = glCreateProgram();
-    CHECK_GL_ERROR;
-
-    glAttachShader(program, vertexShader);
-    CHECK_GL_ERROR;
-
-    glAttachShader(program, fragmentShader);
-    CHECK_GL_ERROR;
-
-    glLinkProgram(program);
-    CHECK_GL_ERROR;
-
-    GLint linkStatus = GL_FALSE;
-    glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
-    if (linkStatus != GL_TRUE) {
-        GLint infoLen = 0;
-        glGetProgramiv(program, GL_INFO_LOG_LENGTH, &infoLen);
-        if (infoLen) {
-            char* buf = (char*) malloc(infoLen);
-            if (buf) {
-                glGetProgramInfoLog(program, infoLen, NULL, buf);
-                ALOGE("Program link log:\n%s\n", buf);
-                free(buf);
-            }
-        }
-        glDeleteProgram(program);
-        program = 0;
-    }
-
-    *outPgm = program;
-}
-
-void NativeWindowRenderer::loadShader(GLenum shaderType, const char* pSource,
-        GLuint* outShader) {
-    GLuint shader = glCreateShader(shaderType);
-    CHECK_GL_ERROR;
-
-    glShaderSource(shader, 1, &pSource, NULL);
-    CHECK_GL_ERROR;
-
-    glCompileShader(shader);
-    CHECK_GL_ERROR;
-
-    GLint compiled = 0;
-    glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
-    if (!compiled) {
-        GLint infoLen = 0;
-        glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
-        char* buf = (char*) malloc(infoLen);
-        if (buf) {
-            glGetShaderInfoLog(shader, infoLen, NULL, buf);
-            ALOGE("Shader compile log:\n%s\n", buf);
-            free(buf);
-        }
-        glDeleteShader(shader);
-        shader = 0;
-    }
-    *outShader = shader;
-}
-
-NativeWindowRenderer::~NativeWindowRenderer() {
-    CHECK(mActiveInputs == 0);
-    startRequest(CMD_QUIT);
-    sendRequest();
-}
-
-void NativeWindowRenderer::render(RenderInput* input) {
-    sp<GLConsumer> ST = input->mST;
-    sp<Surface> STC = input->mSTC;
-
-    if (input->mIsExternalBuffer) {
-        queueExternalBuffer(STC.get(), input->mBuffer,
-            input->mWidth, input->mHeight);
-    } else {
-        queueInternalBuffer(STC.get(), input->mBuffer);
-    }
-
-    ST->updateTexImage();
-    glClearColor(0, 0, 0, 0);
-    glClear(GL_COLOR_BUFFER_BIT);
-
-    calculatePositionCoordinates(input->mRenderingMode,
-        input->mWidth, input->mHeight);
-
-    const GLfloat textureCoordinates[] = {
-         0.0f,  1.0f,
-         0.0f,  0.0f,
-         1.0f,  0.0f,
-         1.0f,  1.0f,
-    };
-
-    updateProgramAndHandle(input->mVideoEffect);
-
-    glVertexAttribPointer(mPositionHandle, 2, GL_FLOAT, GL_FALSE, 0,
-        mPositionCoordinates);
-    CHECK_GL_ERROR;
-
-    glEnableVertexAttribArray(mPositionHandle);
-    CHECK_GL_ERROR;
-
-    glVertexAttribPointer(mTexPosHandle, 2, GL_FLOAT, GL_FALSE, 0,
-        textureCoordinates);
-    CHECK_GL_ERROR;
-
-    glEnableVertexAttribArray(mTexPosHandle);
-    CHECK_GL_ERROR;
-
-    GLfloat texMatrix[16];
-    ST->getTransformMatrix(texMatrix);
-    glUniformMatrix4fv(mTexMatrixHandle, 1, GL_FALSE, texMatrix);
-    CHECK_GL_ERROR;
-
-    glBindTexture(GL_TEXTURE_EXTERNAL_OES, input->mTextureId);
-    CHECK_GL_ERROR;
-
-    glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
-    glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
-    glTexParameteri(
-        GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
-    glTexParameteri(
-        GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
-    CHECK_GL_ERROR;
-
-    glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
-    CHECK_GL_ERROR;
-
-    eglSwapBuffers(mEglDisplay, mEglSurface);
-}
-
-void NativeWindowRenderer::queueInternalBuffer(ANativeWindow *anw,
-    MediaBuffer* buffer) {
-    int64_t timeUs;
-    CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
-    native_window_set_buffers_timestamp(anw, timeUs * 1000);
-    status_t err = anw->queueBuffer(anw, buffer->graphicBuffer().get(), -1);
-    if (err != 0) {
-        ALOGE("queueBuffer failed with error %s (%d)", strerror(-err), -err);
-        return;
-    }
-
-    sp<MetaData> metaData = buffer->meta_data();
-    metaData->setInt32(kKeyRendered, 1);
-}
-
-void NativeWindowRenderer::queueExternalBuffer(ANativeWindow* anw,
-    MediaBuffer* buffer, int width, int height) {
-    native_window_set_buffers_geometry(anw, width, height,
-            HAL_PIXEL_FORMAT_YV12);
-    native_window_set_usage(anw, GRALLOC_USAGE_SW_WRITE_OFTEN);
-
-    ANativeWindowBuffer* anb;
-    CHECK(NO_ERROR == native_window_dequeue_buffer_and_wait(anw, &anb));
-    CHECK(anb != NULL);
-
-    // Copy the buffer
-    uint8_t* img = NULL;
-    sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
-    buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
-    copyI420Buffer(buffer, img, width, height, buf->getStride());
-    buf->unlock();
-    CHECK(NO_ERROR == anw->queueBuffer(anw, buf->getNativeBuffer(), -1));
-}
-
-void NativeWindowRenderer::copyI420Buffer(MediaBuffer* src, uint8_t* dst,
-        int srcWidth, int srcHeight, int stride) {
-    int strideUV = (stride / 2 + 0xf) & ~0xf;
-    uint8_t* p = (uint8_t*)src->data() + src->range_offset();
-    // Y
-    for (int i = srcHeight; i > 0; i--) {
-        memcpy(dst, p, srcWidth);
-        dst += stride;
-        p += srcWidth;
-    }
-    // The src is I420, the dst is YV12.
-    // U
-    p += srcWidth * srcHeight / 4;
-    for (int i = srcHeight / 2; i > 0; i--) {
-        memcpy(dst, p, srcWidth / 2);
-        dst += strideUV;
-        p += srcWidth / 2;
-    }
-    // V
-    p -= srcWidth * srcHeight / 2;
-    for (int i = srcHeight / 2; i > 0; i--) {
-        memcpy(dst, p, srcWidth / 2);
-        dst += strideUV;
-        p += srcWidth / 2;
-    }
-}
-
-void NativeWindowRenderer::updateProgramAndHandle(uint32_t videoEffect) {
-    if (mLastVideoEffect == videoEffect) {
-        return;
-    }
-
-    mLastVideoEffect = videoEffect;
-    int i;
-    switch (mLastVideoEffect) {
-        case VIDEO_EFFECT_NONE:
-            i = 0;
-            break;
-        case VIDEO_EFFECT_SEPIA:
-            i = 1;
-            break;
-        case VIDEO_EFFECT_NEGATIVE:
-            i = 2;
-            break;
-        case VIDEO_EFFECT_GRADIENT:
-            i = 3;
-            break;
-        default:
-            i = 0;
-            break;
-    }
-    glUseProgram(mProgram[i]);
-    CHECK_GL_ERROR;
-
-    mPositionHandle = glGetAttribLocation(mProgram[i], "vPosition");
-    mTexPosHandle = glGetAttribLocation(mProgram[i], "vTexPos");
-    mTexMatrixHandle = glGetUniformLocation(mProgram[i], "texMatrix");
-    CHECK_GL_ERROR;
-}
-
-void NativeWindowRenderer::calculatePositionCoordinates(
-        M4xVSS_MediaRendering renderingMode, int srcWidth, int srcHeight) {
-    float x, y;
-    switch (renderingMode) {
-        case M4xVSS_kResizing:
-        default:
-            x = 1;
-            y = 1;
-            break;
-        case M4xVSS_kCropping:
-            x = float(srcWidth) / mDstWidth;
-            y = float(srcHeight) / mDstHeight;
-            // Make the smaller side 1
-            if (x > y) {
-                x /= y;
-                y = 1;
-            } else {
-                y /= x;
-                x = 1;
-            }
-            break;
-        case M4xVSS_kBlackBorders:
-            x = float(srcWidth) / mDstWidth;
-            y = float(srcHeight) / mDstHeight;
-            // Make the larger side 1
-            if (x > y) {
-                y /= x;
-                x = 1;
-            } else {
-                x /= y;
-                y = 1;
-            }
-            break;
-    }
-
-    mPositionCoordinates[0] = -x;
-    mPositionCoordinates[1] = y;
-    mPositionCoordinates[2] = -x;
-    mPositionCoordinates[3] = -y;
-    mPositionCoordinates[4] = x;
-    mPositionCoordinates[5] = -y;
-    mPositionCoordinates[6] = x;
-    mPositionCoordinates[7] = y;
-}
-
-//
-//  The functions below run in other threads.
-//
-
-void NativeWindowRenderer::startRequest(int cmd) {
-    mLock.lock();
-    while (mThreadCmd != CMD_IDLE) {
-        mCond.wait(mLock);
-    }
-    mThreadCmd = cmd;
-}
-
-void NativeWindowRenderer::sendRequest() {
-    mCond.broadcast();
-    while (mThreadCmd != CMD_IDLE) {
-        mCond.wait(mLock);
-    }
-    mLock.unlock();
-}
-
-RenderInput* NativeWindowRenderer::createRenderInput() {
-    ALOGD("new render input %d", mNextTextureId);
-    RenderInput* input = new RenderInput(this, mNextTextureId);
-
-    startRequest(CMD_RESERVE_TEXTURE);
-    mThreadTextureId = mNextTextureId;
-    sendRequest();
-
-    mNextTextureId++;
-    mActiveInputs++;
-    return input;
-}
-
-void NativeWindowRenderer::destroyRenderInput(RenderInput* input) {
-    ALOGD("destroy render input %d", input->mTextureId);
-    GLuint textureId = input->mTextureId;
-    delete input;
-
-    startRequest(CMD_DELETE_TEXTURE);
-    mThreadTextureId = textureId;
-    sendRequest();
-
-    mActiveInputs--;
-}
-
-//
-//  RenderInput
-//
-
-RenderInput::RenderInput(NativeWindowRenderer* renderer, GLuint textureId)
-    : mRenderer(renderer)
-    , mTextureId(textureId) {
-    sp<BufferQueue> bq = new BufferQueue();
-    mST = new GLConsumer(bq, mTextureId);
-    mSTC = new Surface(bq);
-    native_window_connect(mSTC.get(), NATIVE_WINDOW_API_MEDIA);
-}
-
-RenderInput::~RenderInput() {
-}
-
-ANativeWindow* RenderInput::getTargetWindow() {
-    return mSTC.get();
-}
-
-void RenderInput::updateVideoSize(sp<MetaData> meta) {
-    CHECK(meta->findInt32(kKeyWidth, &mWidth));
-    CHECK(meta->findInt32(kKeyHeight, &mHeight));
-
-    int left, top, right, bottom;
-    if (meta->findRect(kKeyCropRect, &left, &top, &right, &bottom)) {
-        mWidth = right - left + 1;
-        mHeight = bottom - top + 1;
-    }
-
-    // If rotation degrees is 90 or 270, swap width and height
-    // (mWidth and mHeight are the _rotated_ source rectangle).
-    int32_t rotationDegrees;
-    if (!meta->findInt32(kKeyRotation, &rotationDegrees)) {
-        rotationDegrees = 0;
-    }
-
-    if (rotationDegrees == 90 || rotationDegrees == 270) {
-        int tmp = mWidth;
-        mWidth = mHeight;
-        mHeight = tmp;
-    }
-}
-
-void RenderInput::render(MediaBuffer* buffer, uint32_t videoEffect,
-        M4xVSS_MediaRendering renderingMode, bool isExternalBuffer) {
-    mVideoEffect = videoEffect;
-    mRenderingMode = renderingMode;
-    mIsExternalBuffer = isExternalBuffer;
-    mBuffer = buffer;
-
-    mRenderer->startRequest(NativeWindowRenderer::CMD_RENDER_INPUT);
-    mRenderer->mThreadRenderInput = this;
-    mRenderer->sendRequest();
-}
-
-}  // namespace android
diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.h b/libvideoeditor/lvpp/NativeWindowRenderer.h
deleted file mode 100755
index 26b4cba..0000000
--- a/libvideoeditor/lvpp/NativeWindowRenderer.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NATIVE_WINDOW_RENDERER_H_
-#define NATIVE_WINDOW_RENDERER_H_
-
-#include <EGL/egl.h>
-#include <GLES2/gl2.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MetaData.h>
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-
-#include "M4xVSS_API.h"
-
-// The NativeWindowRenderer draws video frames stored in MediaBuffers to
-// an ANativeWindow.  It can apply "rendering mode" and color effects to
-// the frames. "Rendering mode" is the option to do resizing, cropping,
-// or black-bordering when the source and destination aspect ratio are
-// different. Color effects include sepia, negative, and gradient.
-//
-// The input to NativeWindowRenderer is provided by the RenderInput class,
-// and there can be multiple active RenderInput at the same time. Although
-// we only expect that happens briefly when one clip is about to finish
-// and the next clip is about to start.
-//
-// We allocate a Surface for each RenderInput and the user can use
-// the getTargetWindow() function to get the corresponding ANativeWindow
-// for that Surface. The intention is that the user can pass that
-// ANativeWindow to OMXCodec::Create() so the codec can decode directly
-// to buffers provided by the texture.
-
-namespace android {
-
-class GLConsumer;
-class Surface;
-class RenderInput;
-
-class NativeWindowRenderer {
-public:
-    NativeWindowRenderer(sp<ANativeWindow> nativeWindow, int width, int height);
-    ~NativeWindowRenderer();
-
-    RenderInput* createRenderInput();
-    void destroyRenderInput(RenderInput* input);
-
-private:
-    // No copy constructor and assignment
-    NativeWindowRenderer(const NativeWindowRenderer &);
-    NativeWindowRenderer &operator=(const NativeWindowRenderer &);
-
-    // Initialization and finialization
-    void initializeEGL();
-    void terminateEGL();
-    void createPrograms();
-    void createProgram(
-            GLuint vertexShader, GLuint fragmentShader, GLuint* outPgm);
-    void loadShader(
-            GLenum shaderType, const char* pSource, GLuint* outShader);
-
-    // These functions are executed every frame.
-    void render(RenderInput* input);
-    void queueInternalBuffer(ANativeWindow* anw, MediaBuffer* buffer);
-    void queueExternalBuffer(ANativeWindow* anw, MediaBuffer* buffer,
-            int width, int height);
-    void copyI420Buffer(MediaBuffer* src, uint8_t* dst,
-            int srcWidth, int srcHeight, int stride);
-    void updateProgramAndHandle(uint32_t videoEffect);
-    void calculatePositionCoordinates(M4xVSS_MediaRendering renderingMode,
-            int srcWidth, int srcHeight);
-
-    // These variables are initialized once and doesn't change afterwards.
-    sp<ANativeWindow> mNativeWindow;
-    int mDstWidth, mDstHeight;
-    EGLDisplay mEglDisplay;
-    EGLSurface mEglSurface;
-    EGLContext mEglContext;
-    enum {
-        EFFECT_NORMAL,
-        EFFECT_SEPIA,
-        EFFECT_NEGATIVE,
-        EFFECT_GRADIENT,
-        NUMBER_OF_EFFECTS
-    };
-    GLuint mProgram[NUMBER_OF_EFFECTS];
-
-    // We use one shader program for each effect. mLastVideoEffect remembers
-    // the program used for the last frame. when the effect used changes,
-    // we change the program used and update the handles.
-    uint32_t mLastVideoEffect;
-    GLint mPositionHandle;
-    GLint mTexPosHandle;
-    GLint mTexMatrixHandle;
-
-    // This is the vertex coordinates used for the frame texture.
-    // It's calculated according the the rendering mode and the source and
-    // destination aspect ratio.
-    GLfloat mPositionCoordinates[8];
-
-    // We use a different GL id for each Surface.
-    GLuint mNextTextureId;
-
-    // Number of existing RenderInputs, just for debugging.
-    int mActiveInputs;
-
-    // The GL thread functions
-    static int threadStart(void* self);
-    void glThread();
-
-    // These variables are used to communicate between the GL thread and
-    // other threads.
-    Mutex mLock;
-    Condition mCond;
-    enum {
-        CMD_IDLE,
-        CMD_RENDER_INPUT,
-        CMD_RESERVE_TEXTURE,
-        CMD_DELETE_TEXTURE,
-        CMD_QUIT,
-    };
-    int mThreadCmd;
-    RenderInput* mThreadRenderInput;
-    GLuint mThreadTextureId;
-
-    // These functions are used to send commands to the GL thread.
-    // sendRequest() also waits for the GL thread acknowledges the
-    // command is finished.
-    void startRequest(int cmd);
-    void sendRequest();
-
-    friend class RenderInput;
-};
-
-class RenderInput {
-public:
-    // Returns the ANativeWindow corresponds to the Surface.
-    ANativeWindow* getTargetWindow();
-
-    // Updates video frame size from the MediaSource's metadata. Specifically
-    // we look for kKeyWidth, kKeyHeight, and (optionally) kKeyCropRect.
-    void updateVideoSize(sp<MetaData> meta);
-
-    // Renders the buffer with the given video effect and rending mode.
-    // The video effets are defined in VideoEditorTools.h
-    // Set isExternalBuffer to true only when the buffer given is not
-    // provided by the Surface.
-    void render(MediaBuffer *buffer, uint32_t videoEffect,
-        M4xVSS_MediaRendering renderingMode, bool isExternalBuffer);
-private:
-    RenderInput(NativeWindowRenderer* renderer, GLuint textureId);
-    ~RenderInput();
-    NativeWindowRenderer* mRenderer;
-    GLuint mTextureId;
-    sp<GLConsumer> mST;
-    sp<Surface> mSTC;
-    int mWidth, mHeight;
-
-    // These are only valid during render() calls
-    uint32_t mVideoEffect;
-    M4xVSS_MediaRendering mRenderingMode;
-    bool mIsExternalBuffer;
-    MediaBuffer* mBuffer;
-
-    friend class NativeWindowRenderer;
-};
-
-}  // namespace android
-
-#endif  // NATIVE_WINDOW_RENDERER_H_
diff --git a/libvideoeditor/lvpp/PreviewPlayer.cpp b/libvideoeditor/lvpp/PreviewPlayer.cpp
deleted file mode 100755
index 2bd9f84..0000000
--- a/libvideoeditor/lvpp/PreviewPlayer.cpp
+++ /dev/null
@@ -1,2082 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "PreviewPlayer"
-#include <utils/Log.h>
-
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <media/IMediaPlayerService.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXCodec.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <gui/Surface.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/Surface.h>
-
-#include "VideoEditorPreviewController.h"
-#include "DummyAudioSource.h"
-#include "DummyVideoSource.h"
-#include "VideoEditorSRC.h"
-#include "PreviewPlayer.h"
-
-namespace android {
-
-
-void addBatteryData(uint32_t params) {
-    sp<IBinder> binder =
-        defaultServiceManager()->getService(String16("media.player"));
-    sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
-    CHECK(service.get() != NULL);
-
-    service->addBatteryData(params);
-}
-
-struct PreviewPlayerEvent : public TimedEventQueue::Event {
-    PreviewPlayerEvent(
-            PreviewPlayer *player,
-            void (PreviewPlayer::*method)())
-        : mPlayer(player),
-          mMethod(method) {
-    }
-
-protected:
-    virtual ~PreviewPlayerEvent() {}
-
-    virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
-        (mPlayer->*mMethod)();
-    }
-
-private:
-    PreviewPlayer *mPlayer;
-    void (PreviewPlayer::*mMethod)();
-
-    PreviewPlayerEvent(const PreviewPlayerEvent &);
-    PreviewPlayerEvent &operator=(const PreviewPlayerEvent &);
-};
-
-PreviewPlayer::PreviewPlayer(NativeWindowRenderer* renderer)
-    : mQueueStarted(false),
-      mTimeSource(NULL),
-      mVideoRendererIsPreview(false),
-      mAudioPlayer(NULL),
-      mDisplayWidth(0),
-      mDisplayHeight(0),
-      mFlags(0),
-      mExtractorFlags(0),
-      mVideoBuffer(NULL),
-      mLastVideoTimeUs(-1),
-      mNativeWindowRenderer(renderer),
-      mCurrFramingEffectIndex(0),
-      mFrameRGBBuffer(NULL),
-      mFrameYUVBuffer(NULL) {
-
-    CHECK_EQ(mClient.connect(), (status_t)OK);
-    DataSource::RegisterDefaultSniffers();
-
-
-    mVideoRenderer = NULL;
-    mEffectsSettings = NULL;
-    mAudioPlayer = NULL;
-    mAudioMixStoryBoardTS = 0;
-    mCurrentMediaBeginCutTime = 0;
-    mCurrentMediaVolumeValue = 0;
-    mNumberEffects = 0;
-    mDecodedVideoTs = 0;
-    mDecVideoTsStoryBoard = 0;
-    mCurrentVideoEffect = VIDEO_EFFECT_NONE;
-    mProgressCbInterval = 0;
-    mNumberDecVideoFrames = 0;
-    mOverlayUpdateEventPosted = false;
-    mIsChangeSourceRequired = true;
-
-    mVideoEvent = new PreviewPlayerEvent(this, &PreviewPlayer::onVideoEvent);
-    mVideoEventPending = false;
-    mVideoLagEvent = new PreviewPlayerEvent(this, &PreviewPlayer::onVideoLagUpdate);
-    mVideoEventPending = false;
-    mCheckAudioStatusEvent = new PreviewPlayerEvent(
-            this, &PreviewPlayer::onCheckAudioStatus);
-    mAudioStatusEventPending = false;
-    mStreamDoneEvent = new PreviewPlayerEvent(
-            this, &PreviewPlayer::onStreamDone);
-    mStreamDoneEventPending = false;
-    mProgressCbEvent = new PreviewPlayerEvent(this,
-         &PreviewPlayer::onProgressCbEvent);
-
-    mOverlayUpdateEvent = new PreviewPlayerEvent(this,
-        &PreviewPlayer::onUpdateOverlayEvent);
-    mProgressCbEventPending = false;
-
-    mOverlayUpdateEventPending = false;
-    mRenderingMode = (M4xVSS_MediaRendering)MEDIA_RENDERING_INVALID;
-    mIsFiftiesEffectStarted = false;
-    reset();
-}
-
-PreviewPlayer::~PreviewPlayer() {
-
-    if (mQueueStarted) {
-        mQueue.stop();
-    }
-
-    reset();
-
-    if (mVideoRenderer) {
-        mNativeWindowRenderer->destroyRenderInput(mVideoRenderer);
-    }
-
-    Mutex::Autolock lock(mLock);
-    clear_l();
-    mClient.disconnect();
-}
-
-void PreviewPlayer::cancelPlayerEvents_l(bool updateProgressCb) {
-    mQueue.cancelEvent(mVideoEvent->eventID());
-    mVideoEventPending = false;
-    mQueue.cancelEvent(mStreamDoneEvent->eventID());
-    mStreamDoneEventPending = false;
-    mQueue.cancelEvent(mCheckAudioStatusEvent->eventID());
-    mAudioStatusEventPending = false;
-    mQueue.cancelEvent(mVideoLagEvent->eventID());
-    mVideoLagEventPending = false;
-    if (updateProgressCb) {
-        mQueue.cancelEvent(mProgressCbEvent->eventID());
-        mProgressCbEventPending = false;
-    }
-}
-
-status_t PreviewPlayer::setDataSource(const char *path) {
-    Mutex::Autolock autoLock(mLock);
-    return setDataSource_l(path);
-}
-
-status_t PreviewPlayer::setDataSource_l(const char *path) {
-    reset_l();
-
-    mUri = path;
-
-    // The actual work will be done during preparation in the call to
-    // ::finishSetDataSource_l to avoid blocking the calling thread in
-    // setDataSource for any significant time.
-    return OK;
-}
-
-status_t PreviewPlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
-    bool haveAudio = false;
-    bool haveVideo = false;
-    for (size_t i = 0; i < extractor->countTracks(); ++i) {
-        sp<MetaData> meta = extractor->getTrackMetaData(i);
-
-        const char *mime;
-        CHECK(meta->findCString(kKeyMIMEType, &mime));
-
-        if (!haveVideo && !strncasecmp(mime, "video/", 6)) {
-            setVideoSource(extractor->getTrack(i));
-            haveVideo = true;
-        } else if (!haveAudio && !strncasecmp(mime, "audio/", 6)) {
-            setAudioSource(extractor->getTrack(i));
-            haveAudio = true;
-
-            if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
-                // Only do this for vorbis audio, none of the other audio
-                // formats even support this ringtone specific hack and
-                // retrieving the metadata on some extractors may turn out
-                // to be very expensive.
-                sp<MetaData> fileMeta = extractor->getMetaData();
-                int32_t loop;
-                if (fileMeta != NULL
-                        && fileMeta->findInt32(kKeyAutoLoop, &loop)
-                         && loop != 0) {
-                    mFlags |= AUTO_LOOPING;
-                }
-            }
-        }
-
-        if (haveAudio && haveVideo) {
-            break;
-        }
-    }
-
-    /* Add the support for Dummy audio*/
-    if( !haveAudio ){
-        mAudioTrack = DummyAudioSource::Create(32000, 2, 20000,
-                                              ((mPlayEndTimeMsec)*1000LL));
-        if(mAudioTrack != NULL) {
-            haveAudio = true;
-        }
-    }
-
-    if (!haveAudio && !haveVideo) {
-        return UNKNOWN_ERROR;
-    }
-
-    mExtractorFlags = extractor->flags();
-    return OK;
-}
-
-status_t PreviewPlayer::setDataSource_l_jpg() {
-    ALOGV("setDataSource_l_jpg");
-
-    M4OSA_ERR err = M4NO_ERROR;
-
-    mAudioSource = DummyAudioSource::Create(32000, 2, 20000,
-                                          ((mPlayEndTimeMsec)*1000LL));
-    if(mAudioSource != NULL) {
-        setAudioSource(mAudioSource);
-    }
-    status_t error = mAudioSource->start();
-    if (error != OK) {
-        ALOGE("Error starting dummy audio source");
-        mAudioSource.clear();
-        return err;
-    }
-
-    mDurationUs = (mPlayEndTimeMsec - mPlayBeginTimeMsec)*1000LL;
-
-    mVideoSource = DummyVideoSource::Create(mVideoWidth, mVideoHeight,
-                                            mDurationUs, mUri);
-
-    updateSizeToRender(mVideoSource->getFormat());
-    setVideoSource(mVideoSource);
-    status_t err1 = mVideoSource->start();
-    if (err1 != OK) {
-        mVideoSource.clear();
-        return err;
-    }
-
-    mIsVideoSourceJpg = true;
-    return OK;
-}
-
-void PreviewPlayer::reset_l() {
-
-    if (mFlags & PREPARING) {
-        mFlags |= PREPARE_CANCELLED;
-    }
-
-    while (mFlags & PREPARING) {
-        mPreparedCondition.wait(mLock);
-    }
-
-    cancelPlayerEvents_l();
-    mAudioTrack.clear();
-    mVideoTrack.clear();
-
-    // Shutdown audio first, so that the respone to the reset request
-    // appears to happen instantaneously as far as the user is concerned
-    // If we did this later, audio would continue playing while we
-    // shutdown the video-related resources and the player appear to
-    // not be as responsive to a reset request.
-    if (mAudioPlayer == NULL && mAudioSource != NULL) {
-        // If we had an audio player, it would have effectively
-        // taken possession of the audio source and stopped it when
-        // _it_ is stopped. Otherwise this is still our responsibility.
-        mAudioSource->stop();
-    }
-    mAudioSource.clear();
-
-    mTimeSource = NULL;
-
-    //Single audio player instance used
-    //So donot delete it here
-    //It is deleted from PreviewController class
-    //delete mAudioPlayer;
-    mAudioPlayer = NULL;
-
-    if (mVideoBuffer) {
-        mVideoBuffer->release();
-        mVideoBuffer = NULL;
-    }
-
-    if (mVideoSource != NULL) {
-        mVideoSource->stop();
-
-        // The following hack is necessary to ensure that the OMX
-        // component is completely released by the time we may try
-        // to instantiate it again.
-        wp<MediaSource> tmp = mVideoSource;
-        mVideoSource.clear();
-        while (tmp.promote() != NULL) {
-            usleep(1000);
-        }
-        IPCThreadState::self()->flushCommands();
-    }
-
-    mDurationUs = -1;
-    mFlags = 0;
-    mExtractorFlags = 0;
-    mVideoWidth = mVideoHeight = -1;
-    mTimeSourceDeltaUs = 0;
-    mVideoTimeUs = 0;
-
-    mSeeking = NO_SEEK;
-    mSeekNotificationSent = false;
-    mSeekTimeUs = 0;
-
-    mUri.setTo("");
-
-    mCurrentVideoEffect = VIDEO_EFFECT_NONE;
-    mIsVideoSourceJpg = false;
-    mFrameRGBBuffer = NULL;
-    if(mFrameYUVBuffer != NULL) {
-        free(mFrameYUVBuffer);
-        mFrameYUVBuffer = NULL;
-    }
-}
-
-status_t PreviewPlayer::play() {
-    ALOGV("play");
-    Mutex::Autolock autoLock(mLock);
-
-    mFlags &= ~CACHE_UNDERRUN;
-    mFlags &= ~INFORMED_AV_EOS;
-    return play_l();
-}
-
-status_t PreviewPlayer::startAudioPlayer_l() {
-    ALOGV("startAudioPlayer_l");
-    CHECK(!(mFlags & AUDIO_RUNNING));
-
-    if (mAudioSource == NULL || mAudioPlayer == NULL) {
-        return OK;
-    }
-
-    if (!(mFlags & AUDIOPLAYER_STARTED)) {
-        mFlags |= AUDIOPLAYER_STARTED;
-
-        // We've already started the MediaSource in order to enable
-        // the prefetcher to read its data.
-        status_t err = mAudioPlayer->start(
-                true /* sourceAlreadyStarted */);
-
-        if (err != OK) {
-            notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
-            return err;
-        }
-    } else {
-        mAudioPlayer->resume();
-    }
-
-    mFlags |= AUDIO_RUNNING;
-
-    mWatchForAudioEOS = true;
-
-    return OK;
-}
-
-status_t PreviewPlayer::setAudioPlayer(VideoEditorAudioPlayer *audioPlayer) {
-    ALOGV("setAudioPlayer");
-    Mutex::Autolock autoLock(mLock);
-    CHECK(!(mFlags & PLAYING));
-    mAudioPlayer = audioPlayer;
-
-    ALOGV("SetAudioPlayer");
-    mIsChangeSourceRequired = true;
-
-    // check if the new and old source are dummy
-    sp<MediaSource> anAudioSource = mAudioPlayer->getSource();
-    if (anAudioSource == NULL) {
-        // Audio player does not have any source set.
-        ALOGV("setAudioPlayer: Audio player does not have any source set");
-        return OK;
-    }
-
-    // If new video source is not dummy, then always change source
-    // Else audio player continues using old audio source and there are
-    // frame drops to maintain AV sync
-    sp<MetaData> meta;
-    if (mVideoSource != NULL) {
-        meta = mVideoSource->getFormat();
-        const char *pVidSrcType;
-        if (meta->findCString(kKeyDecoderComponent, &pVidSrcType)) {
-            if (strcmp(pVidSrcType, "DummyVideoSource") != 0) {
-                ALOGV(" Video clip with silent audio; need to change source");
-                return OK;
-            }
-        }
-    }
-
-    const char *pSrcType1;
-    const char *pSrcType2;
-    meta = anAudioSource->getFormat();
-
-    if (meta->findCString(kKeyDecoderComponent, &pSrcType1)) {
-        if (strcmp(pSrcType1, "DummyAudioSource") == 0) {
-            meta = mAudioSource->getFormat();
-            if (meta->findCString(kKeyDecoderComponent, &pSrcType2)) {
-                if (strcmp(pSrcType2, "DummyAudioSource") == 0) {
-                    mIsChangeSourceRequired = false;
-                    // Just set the new play duration for the existing source
-                    MediaSource *pMediaSrc = anAudioSource.get();
-                    DummyAudioSource *pDummyAudioSource = (DummyAudioSource*)pMediaSrc;
-                    //Increment the duration of audio source
-                    pDummyAudioSource->setDuration(
-                        (int64_t)((mPlayEndTimeMsec)*1000LL));
-
-                    // Stop the new audio source
-                    // since we continue using old source
-                    ALOGV("setAudioPlayer: stop new audio source");
-                    mAudioSource->stop();
-                }
-            }
-        }
-    }
-
-    return OK;
-}
-
-void PreviewPlayer::onStreamDone() {
-    ALOGV("onStreamDone");
-    // Posted whenever any stream finishes playing.
-
-    Mutex::Autolock autoLock(mLock);
-    if (!mStreamDoneEventPending) {
-        return;
-    }
-    mStreamDoneEventPending = false;
-
-    if (mStreamDoneStatus != ERROR_END_OF_STREAM) {
-        ALOGV("MEDIA_ERROR %d", mStreamDoneStatus);
-
-        notifyListener_l(
-                MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, mStreamDoneStatus);
-
-        pause_l(true /* at eos */);
-
-        mFlags |= AT_EOS;
-        return;
-    }
-
-    const bool allDone =
-        (mVideoSource == NULL || (mFlags & VIDEO_AT_EOS))
-            && (mAudioSource == NULL || (mFlags & AUDIO_AT_EOS));
-
-    if (!allDone) {
-        return;
-    }
-
-    if (mFlags & (LOOPING | AUTO_LOOPING)) {
-        seekTo_l(0);
-
-        if (mVideoSource != NULL) {
-            postVideoEvent_l();
-        }
-    } else {
-        ALOGV("MEDIA_PLAYBACK_COMPLETE");
-        //pause before sending event
-        pause_l(true /* at eos */);
-
-        //This lock is used to syncronize onStreamDone() in PreviewPlayer and
-        //stopPreview() in PreviewController
-        Mutex::Autolock autoLock(mLockControl);
-        /* Make sure PreviewPlayer only notifies MEDIA_PLAYBACK_COMPLETE once for each clip!
-         * It happens twice in following scenario.
-         * To make the clips in preview storyboard are played and switched smoothly,
-         * PreviewController uses two PreviewPlayer instances and one AudioPlayer.
-         * The two PreviewPlayer use the same AudioPlayer to play the audio,
-         * and change the audio source of the AudioPlayer.
-         * If the audio source of current playing clip and next clip are dummy
-         * audio source(image or video without audio), it will not change the audio source
-         * to avoid the "audio glitch", and keep using the current audio source.
-         * When the video of current clip reached the EOS, PreviewPlayer will set EOS flag
-         * for video and audio, and it will notify MEDIA_PLAYBACK_COMPLETE.
-         * But the audio(dummy audio source) is still playing(for next clip),
-         * and when it reached the EOS, and video reached EOS,
-         * PreviewPlayer will notify MEDIA_PLAYBACK_COMPLETE again. */
-        if (!(mFlags & INFORMED_AV_EOS)) {
-            notifyListener_l(MEDIA_PLAYBACK_COMPLETE);
-            mFlags |= INFORMED_AV_EOS;
-        }
-        mFlags |= AT_EOS;
-        ALOGV("onStreamDone end");
-        return;
-    }
-}
-
-
-status_t PreviewPlayer::play_l() {
-    ALOGV("play_l");
-
-    mFlags &= ~SEEK_PREVIEW;
-
-    if (mFlags & PLAYING) {
-        return OK;
-    }
-    mStartNextPlayer = false;
-
-    if (!(mFlags & PREPARED)) {
-        status_t err = prepare_l();
-
-        if (err != OK) {
-            return err;
-        }
-    }
-
-    mFlags |= PLAYING;
-    mFlags |= FIRST_FRAME;
-
-    bool deferredAudioSeek = false;
-
-    if (mAudioSource != NULL) {
-        if (mAudioPlayer == NULL) {
-            if (mAudioSink != NULL) {
-
-                mAudioPlayer = new VideoEditorAudioPlayer(mAudioSink, this);
-                mAudioPlayer->setSource(mAudioSource);
-
-                mAudioPlayer->setAudioMixSettings(
-                 mPreviewPlayerAudioMixSettings);
-
-                mAudioPlayer->setAudioMixPCMFileHandle(
-                 mAudioMixPCMFileHandle);
-
-                mAudioPlayer->setAudioMixStoryBoardSkimTimeStamp(
-                 mAudioMixStoryBoardTS, mCurrentMediaBeginCutTime,
-                 mCurrentMediaVolumeValue);
-
-                 mFlags |= AUDIOPLAYER_STARTED;
-                // We've already started the MediaSource in order to enable
-                // the prefetcher to read its data.
-                status_t err = mAudioPlayer->start(
-                        true /* sourceAlreadyStarted */);
-
-                if (err != OK) {
-                    //delete mAudioPlayer;
-                    mAudioPlayer = NULL;
-
-                    mFlags &= ~(PLAYING | FIRST_FRAME);
-                    return err;
-                }
-
-                mTimeSource = mAudioPlayer;
-                mFlags |= AUDIO_RUNNING;
-                deferredAudioSeek = true;
-                mWatchForAudioSeekComplete = false;
-                mWatchForAudioEOS = true;
-            }
-        } else {
-            bool isAudioPlayerStarted = mAudioPlayer->isStarted();
-
-            if (mIsChangeSourceRequired == true) {
-                ALOGV("play_l: Change audio source required");
-
-                if (isAudioPlayerStarted == true) {
-                    mAudioPlayer->pause();
-                }
-
-                mAudioPlayer->setSource(mAudioSource);
-                mAudioPlayer->setObserver(this);
-
-                mAudioPlayer->setAudioMixSettings(
-                 mPreviewPlayerAudioMixSettings);
-
-                mAudioPlayer->setAudioMixStoryBoardSkimTimeStamp(
-                    mAudioMixStoryBoardTS, mCurrentMediaBeginCutTime,
-                    mCurrentMediaVolumeValue);
-
-                if (isAudioPlayerStarted == true) {
-                    mAudioPlayer->resume();
-                } else {
-                    status_t err = OK;
-                    err = mAudioPlayer->start(true);
-                    if (err != OK) {
-                        mAudioPlayer = NULL;
-                        mAudioPlayer = NULL;
-
-                        mFlags &= ~(PLAYING | FIRST_FRAME);
-                        return err;
-                    }
-                }
-            } else {
-                ALOGV("play_l: No Source change required");
-                mAudioPlayer->setAudioMixStoryBoardSkimTimeStamp(
-                    mAudioMixStoryBoardTS, mCurrentMediaBeginCutTime,
-                    mCurrentMediaVolumeValue);
-
-                mAudioPlayer->resume();
-            }
-
-            mFlags |= AUDIOPLAYER_STARTED;
-            mFlags |= AUDIO_RUNNING;
-            mTimeSource = mAudioPlayer;
-            deferredAudioSeek = true;
-            mWatchForAudioSeekComplete = false;
-            mWatchForAudioEOS = true;
-        }
-    }
-
-    if (mTimeSource == NULL && mAudioPlayer == NULL) {
-        mTimeSource = &mSystemTimeSource;
-    }
-
-    // Set the seek option for Image source files and read.
-    // This resets the timestamping for image play
-    if (mIsVideoSourceJpg) {
-        MediaSource::ReadOptions options;
-        MediaBuffer *aLocalBuffer;
-        options.setSeekTo(mSeekTimeUs);
-        mVideoSource->read(&aLocalBuffer, &options);
-        aLocalBuffer->release();
-    }
-
-    if (mVideoSource != NULL) {
-        // Kick off video playback
-        postVideoEvent_l();
-    }
-
-    if (deferredAudioSeek) {
-        // If there was a seek request while we were paused
-        // and we're just starting up again, honor the request now.
-        seekAudioIfNecessary_l();
-    }
-
-    if (mFlags & AT_EOS) {
-        // Legacy behaviour, if a stream finishes playing and then
-        // is started again, we play from the start...
-        seekTo_l(0);
-    }
-
-    return OK;
-}
-
-
-status_t PreviewPlayer::initRenderer_l() {
-    if (mSurface != NULL) {
-        if(mVideoRenderer == NULL) {
-            mVideoRenderer = mNativeWindowRenderer->createRenderInput();
-            if (mVideoSource != NULL) {
-                updateSizeToRender(mVideoSource->getFormat());
-            }
-        }
-    }
-    return OK;
-}
-
-
-status_t PreviewPlayer::seekTo(int64_t timeUs) {
-    Mutex::Autolock autoLock(mLock);
-    if ((mExtractorFlags & MediaExtractor::CAN_SEEK) || (mIsVideoSourceJpg)) {
-        return seekTo_l(timeUs);
-    }
-
-    return OK;
-}
-
-
-status_t PreviewPlayer::getVideoDimensions(
-        int32_t *width, int32_t *height) const {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mVideoWidth < 0 || mVideoHeight < 0) {
-        return UNKNOWN_ERROR;
-    }
-
-    *width = mVideoWidth;
-    *height = mVideoHeight;
-
-    return OK;
-}
-
-
-status_t PreviewPlayer::initAudioDecoder_l() {
-    sp<MetaData> meta = mAudioTrack->getFormat();
-    const char *mime;
-    CHECK(meta->findCString(kKeyMIMEType, &mime));
-
-    if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
-        mAudioSource = mAudioTrack;
-    } else {
-        sp<MediaSource> aRawSource;
-        aRawSource = OMXCodec::Create(
-                mClient.interface(), mAudioTrack->getFormat(),
-                false, // createEncoder
-                mAudioTrack);
-
-        if(aRawSource != NULL) {
-            mAudioSource = new VideoEditorSRC(aRawSource);
-        }
-    }
-
-    if (mAudioSource != NULL) {
-        int64_t durationUs;
-        if (mAudioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
-            setDuration_l(durationUs);
-        }
-        status_t err = mAudioSource->start();
-
-        if (err != OK) {
-            mAudioSource.clear();
-            return err;
-        }
-    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
-        // For legacy reasons we're simply going to ignore the absence
-        // of an audio decoder for QCELP instead of aborting playback
-        // altogether.
-        return OK;
-    }
-
-    return mAudioSource != NULL ? OK : UNKNOWN_ERROR;
-}
-
-status_t PreviewPlayer::initVideoDecoder_l(uint32_t flags) {
-    initRenderer_l();
-
-    if (mVideoRenderer == NULL) {
-        ALOGE("Cannot create renderer");
-        return UNKNOWN_ERROR;
-    }
-
-    mVideoSource = OMXCodec::Create(
-            mClient.interface(), mVideoTrack->getFormat(),
-            false,
-            mVideoTrack,
-            NULL, flags, mVideoRenderer->getTargetWindow());
-
-    if (mVideoSource != NULL) {
-        int64_t durationUs;
-        if (mVideoTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
-            setDuration_l(durationUs);
-        }
-
-        updateSizeToRender(mVideoTrack->getFormat());
-
-        status_t err = mVideoSource->start();
-
-        if (err != OK) {
-            mVideoSource.clear();
-            return err;
-        }
-    }
-
-    return mVideoSource != NULL ? OK : UNKNOWN_ERROR;
-}
-
-
-void PreviewPlayer::onVideoEvent() {
-    uint32_t i=0;
-    M4OSA_ERR err1 = M4NO_ERROR;
-    int64_t imageFrameTimeUs = 0;
-
-    Mutex::Autolock autoLock(mLock);
-    if (!mVideoEventPending) {
-        // The event has been cancelled in reset_l() but had already
-        // been scheduled for execution at that time.
-        return;
-    }
-    mVideoEventPending = false;
-
-    if (mFlags & SEEK_PREVIEW) {
-        mFlags &= ~SEEK_PREVIEW;
-        return;
-    }
-
-    TimeSource *ts_st =  &mSystemTimeSource;
-    int64_t timeStartUs = ts_st->getRealTimeUs();
-
-    if (mSeeking != NO_SEEK) {
-
-        if(mAudioSource != NULL) {
-
-            // We're going to seek the video source first, followed by
-            // the audio source.
-            // In order to avoid jumps in the DataSource offset caused by
-            // the audio codec prefetching data from the old locations
-            // while the video codec is already reading data from the new
-            // locations, we'll "pause" the audio source, causing it to
-            // stop reading input data until a subsequent seek.
-
-            if (mAudioPlayer != NULL && (mFlags & AUDIO_RUNNING)) {
-                mAudioPlayer->pause();
-                mFlags &= ~AUDIO_RUNNING;
-            }
-            mAudioSource->pause();
-        }
-    }
-
-    if (!mVideoBuffer) {
-        MediaSource::ReadOptions options;
-        if (mSeeking != NO_SEEK) {
-            ALOGV("LV PLAYER seeking to %lld us (%.2f secs)", mSeekTimeUs,
-                                                      mSeekTimeUs / 1E6);
-
-            options.setSeekTo(
-                    mSeekTimeUs, MediaSource::ReadOptions::SEEK_CLOSEST);
-        }
-        for (;;) {
-            status_t err = mVideoSource->read(&mVideoBuffer, &options);
-            options.clearSeekTo();
-
-            if (err != OK) {
-                CHECK(!mVideoBuffer);
-
-                if (err == INFO_FORMAT_CHANGED) {
-                    ALOGV("LV PLAYER VideoSource signalled format change");
-                    notifyVideoSize_l();
-
-                    if (mVideoRenderer != NULL) {
-                        mVideoRendererIsPreview = false;
-                        err = initRenderer_l();
-                        if (err != OK) {
-                            postStreamDoneEvent_l(err);
-                        }
-
-                    }
-
-                    updateSizeToRender(mVideoSource->getFormat());
-                    continue;
-                }
-                // So video playback is complete, but we may still have
-                // a seek request pending that needs to be applied to the audio track
-                if (mSeeking != NO_SEEK) {
-                    ALOGV("video stream ended while seeking!");
-                }
-                finishSeekIfNecessary(-1);
-                ALOGV("PreviewPlayer: onVideoEvent EOS reached.");
-                mFlags |= VIDEO_AT_EOS;
-                mFlags |= AUDIO_AT_EOS;
-                mOverlayUpdateEventPosted = false;
-                postStreamDoneEvent_l(err);
-                // Set the last decoded timestamp to duration
-                mDecodedVideoTs = (mPlayEndTimeMsec*1000LL);
-                return;
-            }
-
-            if (mVideoBuffer->range_length() == 0) {
-                // Some decoders, notably the PV AVC software decoder
-                // return spurious empty buffers that we just want to ignore.
-
-                mVideoBuffer->release();
-                mVideoBuffer = NULL;
-                continue;
-            }
-
-            int64_t videoTimeUs;
-            CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &videoTimeUs));
-
-            if (mSeeking != NO_SEEK) {
-                if (videoTimeUs < mSeekTimeUs) {
-                    // buffers are before seek time
-                    // ignore them
-                    mVideoBuffer->release();
-                    mVideoBuffer = NULL;
-                    continue;
-                }
-            } else {
-                if((videoTimeUs/1000) < mPlayBeginTimeMsec) {
-                    // Frames are before begin cut time
-                    // Donot render
-                    mVideoBuffer->release();
-                    mVideoBuffer = NULL;
-                    continue;
-                }
-            }
-            break;
-        }
-    }
-
-    mNumberDecVideoFrames++;
-
-    int64_t timeUs;
-    CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
-    setPosition_l(timeUs);
-
-    if (!mStartNextPlayer) {
-        int64_t playbackTimeRemaining = (mPlayEndTimeMsec * 1000LL) - timeUs;
-        if (playbackTimeRemaining <= 1500000) {
-            //When less than 1.5 sec of playback left
-            // send notification to start next player
-
-            mStartNextPlayer = true;
-            notifyListener_l(0xAAAAAAAA);
-        }
-    }
-
-    SeekType wasSeeking = mSeeking;
-    finishSeekIfNecessary(timeUs);
-    if (mAudioPlayer != NULL && !(mFlags & (AUDIO_RUNNING))) {
-        status_t err = startAudioPlayer_l();
-        if (err != OK) {
-            ALOGE("Starting the audio player failed w/ err %d", err);
-            return;
-        }
-    }
-
-    TimeSource *ts = (mFlags & AUDIO_AT_EOS) ? &mSystemTimeSource : mTimeSource;
-
-    if(ts == NULL) {
-        mVideoBuffer->release();
-        mVideoBuffer = NULL;
-        return;
-    }
-
-    if(!mIsVideoSourceJpg) {
-        if (mFlags & FIRST_FRAME) {
-            mFlags &= ~FIRST_FRAME;
-
-            mTimeSourceDeltaUs = ts->getRealTimeUs() - timeUs;
-        }
-
-        int64_t realTimeUs, mediaTimeUs;
-        if (!(mFlags & AUDIO_AT_EOS) && mAudioPlayer != NULL
-            && mAudioPlayer->getMediaTimeMapping(&realTimeUs, &mediaTimeUs)) {
-            mTimeSourceDeltaUs = realTimeUs - mediaTimeUs;
-        }
-
-        int64_t nowUs = ts->getRealTimeUs() - mTimeSourceDeltaUs;
-
-        int64_t latenessUs = nowUs - timeUs;
-
-        if (wasSeeking != NO_SEEK) {
-            // Let's display the first frame after seeking right away.
-            latenessUs = 0;
-        }
-        ALOGV("Audio time stamp = %lld and video time stamp = %lld",
-                                            ts->getRealTimeUs(),timeUs);
-        if (latenessUs > 40000) {
-            // We're more than 40ms late.
-
-            ALOGV("LV PLAYER we're late by %lld us (%.2f secs)",
-                                           latenessUs, latenessUs / 1E6);
-
-            mVideoBuffer->release();
-            mVideoBuffer = NULL;
-            postVideoEvent_l(0);
-            return;
-        }
-
-        if (latenessUs < -25000) {
-            // We're more than 25ms early.
-            ALOGV("We're more than 25ms early, lateness %lld", latenessUs);
-
-            postVideoEvent_l(25000);
-            return;
-        }
-    }
-
-    if (mVideoRendererIsPreview || mVideoRenderer == NULL) {
-        mVideoRendererIsPreview = false;
-
-        status_t err = initRenderer_l();
-        if (err != OK) {
-            postStreamDoneEvent_l(err);
-        }
-    }
-
-    // If timestamp exceeds endCutTime of clip, donot render
-    if((timeUs/1000) > mPlayEndTimeMsec) {
-        mVideoBuffer->release();
-        mVideoBuffer = NULL;
-        mFlags |= VIDEO_AT_EOS;
-        mFlags |= AUDIO_AT_EOS;
-        ALOGV("PreviewPlayer: onVideoEvent timeUs > mPlayEndTime; send EOS..");
-        mOverlayUpdateEventPosted = false;
-        // Set the last decoded timestamp to duration
-        mDecodedVideoTs = (mPlayEndTimeMsec*1000LL);
-        postStreamDoneEvent_l(ERROR_END_OF_STREAM);
-        return;
-    }
-    // Capture the frame timestamp to be rendered
-    mDecodedVideoTs = timeUs;
-
-    // Post processing to apply video effects
-    for(i=0;i<mNumberEffects;i++) {
-        // First check if effect starttime matches the clip being previewed
-        if((mEffectsSettings[i].uiStartTime < (mDecVideoTsStoryBoard/1000)) ||
-        (mEffectsSettings[i].uiStartTime >=
-         ((mDecVideoTsStoryBoard/1000) + mPlayEndTimeMsec - mPlayBeginTimeMsec)))
-        {
-            // This effect doesn't belong to this clip, check next one
-            continue;
-        }
-        // Check if effect applies to this particular frame timestamp
-        if((mEffectsSettings[i].uiStartTime <=
-         (((timeUs+mDecVideoTsStoryBoard)/1000)-mPlayBeginTimeMsec)) &&
-            ((mEffectsSettings[i].uiStartTime+mEffectsSettings[i].uiDuration) >=
-             (((timeUs+mDecVideoTsStoryBoard)/1000)-mPlayBeginTimeMsec))
-              && (mEffectsSettings[i].uiDuration != 0)) {
-            setVideoPostProcessingNode(
-             mEffectsSettings[i].VideoEffectType, TRUE);
-        }
-        else {
-            setVideoPostProcessingNode(
-             mEffectsSettings[i].VideoEffectType, FALSE);
-        }
-    }
-
-    //Provide the overlay Update indication when there is an overlay effect
-    if (mCurrentVideoEffect & VIDEO_EFFECT_FRAMING) {
-        mCurrentVideoEffect &= ~VIDEO_EFFECT_FRAMING; //never apply framing here.
-        if (!mOverlayUpdateEventPosted) {
-            // Find the effect in effectSettings array
-            M4OSA_UInt32 index;
-            for (index = 0; index < mNumberEffects; index++) {
-                M4OSA_UInt32 timeMs = mDecodedVideoTs/1000;
-                M4OSA_UInt32 timeOffset = mDecVideoTsStoryBoard/1000;
-                if(mEffectsSettings[index].VideoEffectType ==
-                    (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Framing) {
-                    if (((mEffectsSettings[index].uiStartTime + 1) <=
-                        timeMs + timeOffset - mPlayBeginTimeMsec) &&
-                        ((mEffectsSettings[index].uiStartTime - 1 +
-                        mEffectsSettings[index].uiDuration) >=
-                        timeMs + timeOffset - mPlayBeginTimeMsec))
-                    {
-                        break;
-                    }
-                }
-            }
-            if (index < mNumberEffects) {
-                mCurrFramingEffectIndex = index;
-                mOverlayUpdateEventPosted = true;
-                postOverlayUpdateEvent_l();
-                ALOGV("Framing index = %ld", mCurrFramingEffectIndex);
-            } else {
-                ALOGV("No framing effects found");
-            }
-        }
-
-    } else if (mOverlayUpdateEventPosted) {
-        //Post the event when the overlay is no more valid
-        ALOGV("Overlay is Done");
-        mOverlayUpdateEventPosted = false;
-        postOverlayUpdateEvent_l();
-    }
-
-    if (mVideoRenderer != NULL) {
-        mVideoRenderer->render(mVideoBuffer, mCurrentVideoEffect,
-                mRenderingMode, mIsVideoSourceJpg);
-    }
-
-    mVideoBuffer->release();
-    mVideoBuffer = NULL;
-
-    // Post progress callback based on callback interval set
-    if(mNumberDecVideoFrames >= mProgressCbInterval) {
-        postProgressCallbackEvent_l();
-        mNumberDecVideoFrames = 0;  // reset counter
-    }
-
-    // if reached EndCutTime of clip, post EOS event
-    if((timeUs/1000) >= mPlayEndTimeMsec) {
-        ALOGV("PreviewPlayer: onVideoEvent EOS.");
-        mFlags |= VIDEO_AT_EOS;
-        mFlags |= AUDIO_AT_EOS;
-        mOverlayUpdateEventPosted = false;
-        // Set the last decoded timestamp to duration
-        mDecodedVideoTs = (mPlayEndTimeMsec*1000LL);
-        postStreamDoneEvent_l(ERROR_END_OF_STREAM);
-    }
-    else {
-        if ((wasSeeking != NO_SEEK) && (mFlags & SEEK_PREVIEW)) {
-            mFlags &= ~SEEK_PREVIEW;
-            return;
-        }
-
-        if(!mIsVideoSourceJpg) {
-            postVideoEvent_l(0);
-        }
-        else {
-            postVideoEvent_l(33000);
-        }
-    }
-}
-
-status_t PreviewPlayer::prepare() {
-    ALOGV("prepare");
-    Mutex::Autolock autoLock(mLock);
-    return prepare_l();
-}
-
-status_t PreviewPlayer::prepare_l() {
-    ALOGV("prepare_l");
-    if (mFlags & PREPARED) {
-        return OK;
-    }
-
-    if (mFlags & PREPARING) {
-        return UNKNOWN_ERROR;
-    }
-
-    mIsAsyncPrepare = false;
-    status_t err = prepareAsync_l();
-
-    if (err != OK) {
-        return err;
-    }
-
-    while (mFlags & PREPARING) {
-        mPreparedCondition.wait(mLock);
-    }
-
-    return mPrepareResult;
-}
-
-status_t PreviewPlayer::prepareAsync() {
-    ALOGV("prepareAsync");
-    Mutex::Autolock autoLock(mLock);
-    return prepareAsync_l();
-}
-
-status_t PreviewPlayer::prepareAsync_l() {
-    ALOGV("prepareAsync_l");
-    if (mFlags & PREPARING) {
-        return UNKNOWN_ERROR;  // async prepare already pending
-    }
-
-    if (!mQueueStarted) {
-        mQueue.start();
-        mQueueStarted = true;
-    }
-
-    mFlags |= PREPARING;
-    mAsyncPrepareEvent = new PreviewPlayerEvent(
-            this, &PreviewPlayer::onPrepareAsyncEvent);
-
-    mQueue.postEvent(mAsyncPrepareEvent);
-
-    return OK;
-}
-
-status_t PreviewPlayer::finishSetDataSource_l() {
-    sp<DataSource> dataSource;
-    sp<MediaExtractor> extractor;
-
-    dataSource = DataSource::CreateFromURI(mUri.string(), NULL);
-
-    if (dataSource == NULL) {
-        return UNKNOWN_ERROR;
-    }
-
-    //If file type is .rgb, then no need to check for Extractor
-    int uriLen = strlen(mUri);
-    int startOffset = uriLen - 4;
-    if(!strncasecmp(mUri+startOffset, ".rgb", 4)) {
-        extractor = NULL;
-    }
-    else {
-        extractor = MediaExtractor::Create(dataSource,
-                                        MEDIA_MIMETYPE_CONTAINER_MPEG4);
-    }
-
-    if (extractor == NULL) {
-        ALOGV("finishSetDataSource_l: failed to create extractor");
-        return setDataSource_l_jpg();
-    }
-
-    return setDataSource_l(extractor);
-}
-
-void PreviewPlayer::onPrepareAsyncEvent() {
-    Mutex::Autolock autoLock(mLock);
-    ALOGV("onPrepareAsyncEvent");
-
-    if (mFlags & PREPARE_CANCELLED) {
-        ALOGV("prepare was cancelled before doing anything");
-        abortPrepare(UNKNOWN_ERROR);
-        return;
-    }
-
-    if (mUri.size() > 0) {
-        status_t err = finishSetDataSource_l();
-
-        if (err != OK) {
-            abortPrepare(err);
-            return;
-        }
-    }
-
-    if (mVideoTrack != NULL && mVideoSource == NULL) {
-        status_t err = initVideoDecoder_l(OMXCodec::kHardwareCodecsOnly);
-
-        if (err != OK) {
-            abortPrepare(err);
-            return;
-        }
-    }
-
-    if (mAudioTrack != NULL && mAudioSource == NULL) {
-        status_t err = initAudioDecoder_l();
-
-        if (err != OK) {
-            abortPrepare(err);
-            return;
-        }
-    }
-    finishAsyncPrepare_l();
-
-}
-
-void PreviewPlayer::finishAsyncPrepare_l() {
-    ALOGV("finishAsyncPrepare_l");
-    if (mIsAsyncPrepare) {
-        if (mVideoSource == NULL) {
-            notifyListener_l(MEDIA_SET_VIDEO_SIZE, 0, 0);
-        } else {
-            notifyVideoSize_l();
-        }
-        notifyListener_l(MEDIA_PREPARED);
-    }
-
-    mPrepareResult = OK;
-    mFlags &= ~(PREPARING|PREPARE_CANCELLED);
-    mFlags |= PREPARED;
-    mAsyncPrepareEvent = NULL;
-    mPreparedCondition.broadcast();
-}
-
-void PreviewPlayer::acquireLock() {
-    ALOGV("acquireLock");
-    mLockControl.lock();
-}
-
-void PreviewPlayer::releaseLock() {
-    ALOGV("releaseLock");
-    mLockControl.unlock();
-}
-
-status_t PreviewPlayer::loadEffectsSettings(
-        M4VSS3GPP_EffectSettings* pEffectSettings, int nEffects) {
-
-    ALOGV("loadEffectsSettings");
-    mNumberEffects = nEffects;
-    mEffectsSettings = pEffectSettings;
-    return OK;
-}
-
-status_t PreviewPlayer::loadAudioMixSettings(
-        M4xVSS_AudioMixingSettings* pAudioMixSettings) {
-
-    ALOGV("loadAudioMixSettings");
-    mPreviewPlayerAudioMixSettings = pAudioMixSettings;
-    return OK;
-}
-
-status_t PreviewPlayer::setAudioMixPCMFileHandle(
-        M4OSA_Context pAudioMixPCMFileHandle) {
-
-    ALOGV("setAudioMixPCMFileHandle");
-    mAudioMixPCMFileHandle = pAudioMixPCMFileHandle;
-    return OK;
-}
-
-status_t PreviewPlayer::setAudioMixStoryBoardParam(
-        M4OSA_UInt32 audioMixStoryBoardTS,
-        M4OSA_UInt32 currentMediaBeginCutTime,
-        M4OSA_UInt32 primaryTrackVolValue ) {
-
-    ALOGV("setAudioMixStoryBoardParam");
-    mAudioMixStoryBoardTS = audioMixStoryBoardTS;
-    mCurrentMediaBeginCutTime = currentMediaBeginCutTime;
-    mCurrentMediaVolumeValue = primaryTrackVolValue;
-    return OK;
-}
-
-status_t PreviewPlayer::setPlaybackBeginTime(uint32_t msec) {
-
-    mPlayBeginTimeMsec = msec;
-    return OK;
-}
-
-status_t PreviewPlayer::setPlaybackEndTime(uint32_t msec) {
-
-    mPlayEndTimeMsec = msec;
-    return OK;
-}
-
-status_t PreviewPlayer::setStoryboardStartTime(uint32_t msec) {
-
-    mStoryboardStartTimeMsec = msec;
-    mDecVideoTsStoryBoard = mStoryboardStartTimeMsec * 1000LL;
-    return OK;
-}
-
-status_t PreviewPlayer::setProgressCallbackInterval(uint32_t cbInterval) {
-
-    mProgressCbInterval = cbInterval;
-    return OK;
-}
-
-
-status_t PreviewPlayer::setMediaRenderingMode(
-        M4xVSS_MediaRendering mode,
-        M4VIDEOEDITING_VideoFrameSize outputVideoSize) {
-
-    mRenderingMode = mode;
-
-    /* get the video width and height by resolution */
-    return getVideoSizeByResolution(
-                outputVideoSize,
-                    &mOutputVideoWidth, &mOutputVideoHeight);
-
-}
-
-status_t PreviewPlayer::resetJniCallbackTimeStamp() {
-
-    mDecVideoTsStoryBoard = mStoryboardStartTimeMsec * 1000LL;
-    return OK;
-}
-
-void PreviewPlayer::postProgressCallbackEvent_l() {
-    if (mProgressCbEventPending) {
-        return;
-    }
-    mProgressCbEventPending = true;
-
-    mQueue.postEvent(mProgressCbEvent);
-}
-
-
-void PreviewPlayer::onProgressCbEvent() {
-    Mutex::Autolock autoLock(mLock);
-    if (!mProgressCbEventPending) {
-        return;
-    }
-    mProgressCbEventPending = false;
-    // If playback starts from previous I-frame,
-    // then send frame storyboard duration
-    if ((mDecodedVideoTs/1000) < mPlayBeginTimeMsec) {
-        notifyListener_l(MEDIA_INFO, 0, mDecVideoTsStoryBoard/1000);
-    } else {
-        notifyListener_l(MEDIA_INFO, 0,
-        (((mDecodedVideoTs+mDecVideoTsStoryBoard)/1000)-mPlayBeginTimeMsec));
-    }
-}
-
-void PreviewPlayer::postOverlayUpdateEvent_l() {
-    if (mOverlayUpdateEventPending) {
-        return;
-    }
-    mOverlayUpdateEventPending = true;
-    mQueue.postEvent(mOverlayUpdateEvent);
-}
-
-void PreviewPlayer::onUpdateOverlayEvent() {
-    Mutex::Autolock autoLock(mLock);
-
-    if (!mOverlayUpdateEventPending) {
-        return;
-    }
-    mOverlayUpdateEventPending = false;
-
-    int updateState = mOverlayUpdateEventPosted? 1: 0;
-    notifyListener_l(0xBBBBBBBB, updateState, mCurrFramingEffectIndex);
-}
-
-
-void PreviewPlayer::setVideoPostProcessingNode(
-        M4VSS3GPP_VideoEffectType type, M4OSA_Bool enable) {
-
-    uint32_t effect = VIDEO_EFFECT_NONE;
-
-    //Map M4VSS3GPP_VideoEffectType to local enum
-    switch(type) {
-        case M4VSS3GPP_kVideoEffectType_FadeFromBlack:
-            effect = VIDEO_EFFECT_FADEFROMBLACK;
-            break;
-
-        case M4VSS3GPP_kVideoEffectType_FadeToBlack:
-            effect = VIDEO_EFFECT_FADETOBLACK;
-            break;
-
-        case M4xVSS_kVideoEffectType_BlackAndWhite:
-            effect = VIDEO_EFFECT_BLACKANDWHITE;
-            break;
-
-        case M4xVSS_kVideoEffectType_Pink:
-            effect = VIDEO_EFFECT_PINK;
-            break;
-
-        case M4xVSS_kVideoEffectType_Green:
-            effect = VIDEO_EFFECT_GREEN;
-            break;
-
-        case M4xVSS_kVideoEffectType_Sepia:
-            effect = VIDEO_EFFECT_SEPIA;
-            break;
-
-        case M4xVSS_kVideoEffectType_Negative:
-            effect = VIDEO_EFFECT_NEGATIVE;
-            break;
-
-        case M4xVSS_kVideoEffectType_Framing:
-            effect = VIDEO_EFFECT_FRAMING;
-            break;
-
-        case M4xVSS_kVideoEffectType_Fifties:
-            effect = VIDEO_EFFECT_FIFTIES;
-            break;
-
-        case M4xVSS_kVideoEffectType_ColorRGB16:
-            effect = VIDEO_EFFECT_COLOR_RGB16;
-            break;
-
-        case M4xVSS_kVideoEffectType_Gradient:
-            effect = VIDEO_EFFECT_GRADIENT;
-            break;
-
-        default:
-            effect = VIDEO_EFFECT_NONE;
-            break;
-    }
-
-    if (enable == M4OSA_TRUE) {
-        //If already set, then no need to set again
-        if (!(mCurrentVideoEffect & effect)) {
-            mCurrentVideoEffect |= effect;
-            if (effect == VIDEO_EFFECT_FIFTIES) {
-                mIsFiftiesEffectStarted = true;
-            }
-        }
-    } else  {
-        //Reset only if already set
-        if (mCurrentVideoEffect & effect) {
-            mCurrentVideoEffect &= ~effect;
-        }
-    }
-}
-
-status_t PreviewPlayer::setImageClipProperties(uint32_t width,uint32_t height) {
-    mVideoWidth = width;
-    mVideoHeight = height;
-    return OK;
-}
-
-status_t PreviewPlayer::readFirstVideoFrame() {
-    ALOGV("readFirstVideoFrame");
-
-    if (!mVideoBuffer) {
-        MediaSource::ReadOptions options;
-        if (mSeeking != NO_SEEK) {
-            ALOGV("seeking to %lld us (%.2f secs)", mSeekTimeUs,
-                    mSeekTimeUs / 1E6);
-
-            options.setSeekTo(
-                    mSeekTimeUs, MediaSource::ReadOptions::SEEK_CLOSEST);
-        }
-        for (;;) {
-            status_t err = mVideoSource->read(&mVideoBuffer, &options);
-            options.clearSeekTo();
-
-            if (err != OK) {
-                CHECK(!mVideoBuffer);
-
-                if (err == INFO_FORMAT_CHANGED) {
-                    ALOGV("VideoSource signalled format change");
-                    notifyVideoSize_l();
-
-                    if (mVideoRenderer != NULL) {
-                        mVideoRendererIsPreview = false;
-                        err = initRenderer_l();
-                        if (err != OK) {
-                            postStreamDoneEvent_l(err);
-                        }
-                    }
-
-                    updateSizeToRender(mVideoSource->getFormat());
-                    continue;
-                }
-                ALOGV("EOS reached.");
-                mFlags |= VIDEO_AT_EOS;
-                mFlags |= AUDIO_AT_EOS;
-                postStreamDoneEvent_l(err);
-                return OK;
-            }
-
-            if (mVideoBuffer->range_length() == 0) {
-                // Some decoders, notably the PV AVC software decoder
-                // return spurious empty buffers that we just want to ignore.
-
-                mVideoBuffer->release();
-                mVideoBuffer = NULL;
-                continue;
-            }
-
-            int64_t videoTimeUs;
-            CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &videoTimeUs));
-            if (mSeeking != NO_SEEK) {
-                if (videoTimeUs < mSeekTimeUs) {
-                    // buffers are before seek time
-                    // ignore them
-                    mVideoBuffer->release();
-                    mVideoBuffer = NULL;
-                    continue;
-                }
-            } else {
-                if ((videoTimeUs/1000) < mPlayBeginTimeMsec) {
-                    // buffers are before begin cut time
-                    // ignore them
-                    mVideoBuffer->release();
-                    mVideoBuffer = NULL;
-                    continue;
-                }
-            }
-            break;
-        }
-    }
-
-    int64_t timeUs;
-    CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
-    setPosition_l(timeUs);
-
-    mDecodedVideoTs = timeUs;
-
-    return OK;
-
-}
-
-status_t PreviewPlayer::getLastRenderedTimeMs(uint32_t *lastRenderedTimeMs) {
-    *lastRenderedTimeMs = (((mDecodedVideoTs+mDecVideoTsStoryBoard)/1000)-mPlayBeginTimeMsec);
-    return OK;
-}
-
-void PreviewPlayer::updateSizeToRender(sp<MetaData> meta) {
-    if (mVideoRenderer) {
-        mVideoRenderer->updateVideoSize(meta);
-    }
-}
-
-void PreviewPlayer::setListener(const wp<MediaPlayerBase> &listener) {
-    Mutex::Autolock autoLock(mLock);
-    mListener = listener;
-}
-
-status_t PreviewPlayer::setDataSource(const sp<IStreamSource> &source) {
-    return INVALID_OPERATION;
-}
-
-void PreviewPlayer::reset() {
-    Mutex::Autolock autoLock(mLock);
-    reset_l();
-}
-
-void PreviewPlayer::clear_l() {
-    mDisplayWidth = 0;
-    mDisplayHeight = 0;
-
-    if (mFlags & PLAYING) {
-        updateBatteryUsage_l();
-    }
-
-    if (mFlags & PREPARING) {
-        mFlags |= PREPARE_CANCELLED;
-
-        if (mFlags & PREPARING_CONNECTED) {
-            // We are basically done preparing, we're just buffering
-            // enough data to start playback, we can safely interrupt that.
-            finishAsyncPrepare_l();
-        }
-    }
-
-    while (mFlags & PREPARING) {
-        mPreparedCondition.wait(mLock);
-    }
-
-    cancelPlayerEvents_l(true);
-
-    mAudioTrack.clear();
-    mVideoTrack.clear();
-
-    // Shutdown audio first, so that the respone to the reset request
-    // appears to happen instantaneously as far as the user is concerned
-    // If we did this later, audio would continue playing while we
-    // shutdown the video-related resources and the player appear to
-    // not be as responsive to a reset request.
-    if (mAudioPlayer == NULL && mAudioSource != NULL) {
-        // If we had an audio player, it would have effectively
-        // taken possession of the audio source and stopped it when
-        // _it_ is stopped. Otherwise this is still our responsibility.
-        mAudioSource->stop();
-    }
-    mAudioSource.clear();
-
-    mTimeSource = NULL;
-
-    delete mAudioPlayer;
-    mAudioPlayer = NULL;
-
-    if (mVideoSource != NULL) {
-        shutdownVideoDecoder_l();
-    }
-
-    mDurationUs = -1;
-    mFlags = 0;
-    mExtractorFlags = 0;
-    mTimeSourceDeltaUs = 0;
-    mVideoTimeUs = 0;
-
-    mSeeking = NO_SEEK;
-    mSeekNotificationSent = false;
-    mSeekTimeUs = 0;
-
-    mUri.setTo("");
-
-    mBitrate = -1;
-    mLastVideoTimeUs = -1;
-}
-
-void PreviewPlayer::notifyListener_l(int msg, int ext1, int ext2) {
-    if (mListener != NULL) {
-        sp<MediaPlayerBase> listener = mListener.promote();
-
-        if (listener != NULL) {
-            listener->sendEvent(msg, ext1, ext2);
-        }
-    }
-}
-
-void PreviewPlayer::onVideoLagUpdate() {
-    Mutex::Autolock autoLock(mLock);
-    if (!mVideoLagEventPending) {
-        return;
-    }
-    mVideoLagEventPending = false;
-
-    int64_t audioTimeUs = mAudioPlayer->getMediaTimeUs();
-    int64_t videoLateByUs = audioTimeUs - mVideoTimeUs;
-
-    if (!(mFlags & VIDEO_AT_EOS) && videoLateByUs > 300000ll) {
-        ALOGV("video late by %lld ms.", videoLateByUs / 1000ll);
-
-        notifyListener_l(
-                MEDIA_INFO,
-                MEDIA_INFO_VIDEO_TRACK_LAGGING,
-                videoLateByUs / 1000ll);
-    }
-
-    postVideoLagEvent_l();
-}
-
-void PreviewPlayer::notifyVideoSize_l() {
-    sp<MetaData> meta = mVideoSource->getFormat();
-
-    int32_t vWidth, vHeight;
-    int32_t cropLeft, cropTop, cropRight, cropBottom;
-
-    CHECK(meta->findInt32(kKeyWidth, &vWidth));
-    CHECK(meta->findInt32(kKeyHeight, &vHeight));
-
-    mGivenWidth = vWidth;
-    mGivenHeight = vHeight;
-
-    if (!meta->findRect(
-                kKeyCropRect, &cropLeft, &cropTop, &cropRight, &cropBottom)) {
-
-        cropLeft = cropTop = 0;
-        cropRight = vWidth - 1;
-        cropBottom = vHeight - 1;
-
-        ALOGD("got dimensions only %d x %d", vWidth, vHeight);
-    } else {
-        ALOGD("got crop rect %d, %d, %d, %d",
-             cropLeft, cropTop, cropRight, cropBottom);
-    }
-
-    mCropRect.left = cropLeft;
-    mCropRect.right = cropRight;
-    mCropRect.top = cropTop;
-    mCropRect.bottom = cropBottom;
-
-    int32_t displayWidth;
-    if (meta->findInt32(kKeyDisplayWidth, &displayWidth)) {
-        ALOGV("Display width changed (%d=>%d)", mDisplayWidth, displayWidth);
-        mDisplayWidth = displayWidth;
-    }
-    int32_t displayHeight;
-    if (meta->findInt32(kKeyDisplayHeight, &displayHeight)) {
-        ALOGV("Display height changed (%d=>%d)", mDisplayHeight, displayHeight);
-        mDisplayHeight = displayHeight;
-    }
-
-    int32_t usableWidth = cropRight - cropLeft + 1;
-    int32_t usableHeight = cropBottom - cropTop + 1;
-    if (mDisplayWidth != 0) {
-        usableWidth = mDisplayWidth;
-    }
-    if (mDisplayHeight != 0) {
-        usableHeight = mDisplayHeight;
-    }
-
-    int32_t rotationDegrees;
-    if (!mVideoTrack->getFormat()->findInt32(
-                kKeyRotation, &rotationDegrees)) {
-        rotationDegrees = 0;
-    }
-
-    if (rotationDegrees == 90 || rotationDegrees == 270) {
-        notifyListener_l(
-                MEDIA_SET_VIDEO_SIZE, usableHeight, usableWidth);
-    } else {
-        notifyListener_l(
-                MEDIA_SET_VIDEO_SIZE, usableWidth, usableHeight);
-    }
-}
-
-status_t PreviewPlayer::pause() {
-    Mutex::Autolock autoLock(mLock);
-
-    mFlags &= ~CACHE_UNDERRUN;
-
-    return pause_l();
-}
-
-status_t PreviewPlayer::pause_l(bool at_eos) {
-    if (!(mFlags & PLAYING)) {
-        return OK;
-    }
-
-    cancelPlayerEvents_l();
-
-    if (mAudioPlayer != NULL && (mFlags & AUDIO_RUNNING)) {
-        if (at_eos) {
-            // If we played the audio stream to completion we
-            // want to make sure that all samples remaining in the audio
-            // track's queue are played out.
-            mAudioPlayer->pause(true /* playPendingSamples */);
-        } else {
-            mAudioPlayer->pause();
-        }
-
-        mFlags &= ~AUDIO_RUNNING;
-    }
-
-    mFlags &= ~PLAYING;
-    updateBatteryUsage_l();
-
-    return OK;
-}
-
-bool PreviewPlayer::isPlaying() const {
-    return (mFlags & PLAYING) || (mFlags & CACHE_UNDERRUN);
-}
-
-void PreviewPlayer::setSurface(const sp<Surface> &surface) {
-    Mutex::Autolock autoLock(mLock);
-
-    mSurface = surface;
-    setNativeWindow_l(surface);
-}
-
-void PreviewPlayer::setSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer) {
-    Mutex::Autolock autoLock(mLock);
-
-    mSurface.clear();
-    if (bufferProducer != NULL) {
-        setNativeWindow_l(new Surface(bufferProducer));
-    }
-}
-
-void PreviewPlayer::shutdownVideoDecoder_l() {
-    if (mVideoBuffer) {
-        mVideoBuffer->release();
-        mVideoBuffer = NULL;
-    }
-
-    mVideoSource->stop();
-
-    // The following hack is necessary to ensure that the OMX
-    // component is completely released by the time we may try
-    // to instantiate it again.
-    wp<MediaSource> tmp = mVideoSource;
-    mVideoSource.clear();
-    while (tmp.promote() != NULL) {
-        usleep(1000);
-    }
-    IPCThreadState::self()->flushCommands();
-}
-
-void PreviewPlayer::setNativeWindow_l(const sp<ANativeWindow> &native) {
-    mNativeWindow = native;
-
-    if (mVideoSource == NULL) {
-        return;
-    }
-
-    ALOGI("attempting to reconfigure to use new surface");
-
-    bool wasPlaying = (mFlags & PLAYING) != 0;
-
-    pause_l();
-
-    shutdownVideoDecoder_l();
-
-    CHECK_EQ(initVideoDecoder_l(), (status_t)OK);
-
-    if (mLastVideoTimeUs >= 0) {
-        mSeeking = SEEK;
-        mSeekNotificationSent = true;
-        mSeekTimeUs = mLastVideoTimeUs;
-        mFlags &= ~(AT_EOS | AUDIO_AT_EOS | VIDEO_AT_EOS);
-    }
-
-    if (wasPlaying) {
-        play_l();
-    }
-}
-
-void PreviewPlayer::setAudioSink(
-        const sp<MediaPlayerBase::AudioSink> &audioSink) {
-    Mutex::Autolock autoLock(mLock);
-
-    mAudioSink = audioSink;
-}
-
-status_t PreviewPlayer::setLooping(bool shouldLoop) {
-    Mutex::Autolock autoLock(mLock);
-
-    mFlags = mFlags & ~LOOPING;
-
-    if (shouldLoop) {
-        mFlags |= LOOPING;
-    }
-
-    return OK;
-}
-
-void PreviewPlayer::setDuration_l(int64_t durationUs) {
-    if (mDurationUs < 0 || durationUs > mDurationUs) {
-        mDurationUs = durationUs;
-    }
-}
-
-status_t PreviewPlayer::getDuration(int64_t *durationUs) {
-    Mutex::Autolock autoLock(mLock);
-    if (mDurationUs < 0) {
-        return UNKNOWN_ERROR;
-    }
-
-    *durationUs = mDurationUs;
-    return OK;
-}
-
-status_t PreviewPlayer::getPosition(int64_t *positionUs) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mSeeking != NO_SEEK) {
-        *positionUs = mSeekTimeUs;
-    } else if (mVideoSource != NULL
-            && (mAudioPlayer == NULL || !(mFlags & VIDEO_AT_EOS))) {
-        *positionUs = mVideoTimeUs;
-    } else if (mAudioPlayer != NULL) {
-        *positionUs = mAudioPlayer->getMediaTimeUs();
-    } else {
-        *positionUs = 0;
-    }
-
-    return OK;
-}
-
-void PreviewPlayer::setPosition_l(int64_t timeUs) {
-    mVideoTimeUs = timeUs;
-}
-
-status_t PreviewPlayer::seekTo_l(int64_t timeUs) {
-    ALOGV("seekTo_l");
-    if (mFlags & CACHE_UNDERRUN) {
-        mFlags &= ~CACHE_UNDERRUN;
-        play_l();
-    }
-
-    if ((mFlags & PLAYING) && mVideoSource != NULL && (mFlags & VIDEO_AT_EOS)) {
-        // Video playback completed before, there's no pending
-        // video event right now. In order for this new seek
-        // to be honored, we need to post one.
-
-        postVideoEvent_l();
-    }
-
-    mSeeking = SEEK;
-    mSeekNotificationSent = false;
-    mSeekTimeUs = timeUs;
-    mFlags &= ~(AT_EOS | AUDIO_AT_EOS | VIDEO_AT_EOS);
-
-    seekAudioIfNecessary_l();
-
-    if (!(mFlags & PLAYING)) {
-        ALOGV("seeking while paused, sending SEEK_COMPLETE notification"
-             " immediately.");
-
-        notifyListener_l(MEDIA_SEEK_COMPLETE);
-        mSeekNotificationSent = true;
-
-        if ((mFlags & PREPARED) && mVideoSource != NULL) {
-            mFlags |= SEEK_PREVIEW;
-            postVideoEvent_l();
-        }
-    }
-
-    return OK;
-}
-
-void PreviewPlayer::seekAudioIfNecessary_l() {
-    if (mSeeking != NO_SEEK && mVideoSource == NULL && mAudioPlayer != NULL) {
-        mAudioPlayer->seekTo(mSeekTimeUs);
-
-        mWatchForAudioSeekComplete = true;
-        mWatchForAudioEOS = true;
-    }
-}
-
-void PreviewPlayer::setAudioSource(const sp<MediaSource>& source) {
-    CHECK(source != NULL);
-    mAudioTrack = source;
-}
-
-void PreviewPlayer::setVideoSource(const sp<MediaSource>& source) {
-    CHECK(source != NULL);
-    mVideoTrack = source;
-}
-
-void PreviewPlayer::finishSeekIfNecessary(int64_t videoTimeUs) {
-    if (mSeeking == SEEK_VIDEO_ONLY) {
-        mSeeking = NO_SEEK;
-        return;
-    }
-
-    if (mSeeking == NO_SEEK || (mFlags & SEEK_PREVIEW)) {
-        return;
-    }
-
-    if (mAudioPlayer != NULL) {
-        ALOGV("seeking audio to %lld us (%.2f secs).", videoTimeUs, videoTimeUs / 1E6);
-
-        // If we don't have a video time, seek audio to the originally
-        // requested seek time instead.
-
-        mAudioPlayer->seekTo(videoTimeUs < 0 ? mSeekTimeUs : videoTimeUs);
-        mWatchForAudioSeekComplete = true;
-        mWatchForAudioEOS = true;
-    } else if (!mSeekNotificationSent) {
-        // If we're playing video only, report seek complete now,
-        // otherwise audio player will notify us later.
-        notifyListener_l(MEDIA_SEEK_COMPLETE);
-        mSeekNotificationSent = true;
-    }
-
-    mFlags |= FIRST_FRAME;
-    mSeeking = NO_SEEK;
-}
-
-void PreviewPlayer::onCheckAudioStatus() {
-    Mutex::Autolock autoLock(mLock);
-    if (!mAudioStatusEventPending) {
-        // Event was dispatched and while we were blocking on the mutex,
-        // has already been cancelled.
-        return;
-    }
-
-    mAudioStatusEventPending = false;
-
-    if (mWatchForAudioSeekComplete && !mAudioPlayer->isSeeking()) {
-        mWatchForAudioSeekComplete = false;
-
-        if (!mSeekNotificationSent) {
-            notifyListener_l(MEDIA_SEEK_COMPLETE);
-            mSeekNotificationSent = true;
-        }
-
-        mSeeking = NO_SEEK;
-    }
-
-    status_t finalStatus;
-    if (mWatchForAudioEOS && mAudioPlayer->reachedEOS(&finalStatus)) {
-        mWatchForAudioEOS = false;
-        mFlags |= AUDIO_AT_EOS;
-        mFlags |= FIRST_FRAME;
-        postStreamDoneEvent_l(finalStatus);
-    }
-}
-
-void PreviewPlayer::postVideoEvent_l(int64_t delayUs) {
-    if (mVideoEventPending) {
-        return;
-    }
-
-    mVideoEventPending = true;
-    mQueue.postEventWithDelay(mVideoEvent, delayUs < 0 ? 10000 : delayUs);
-}
-
-void PreviewPlayer::postStreamDoneEvent_l(status_t status) {
-    if (mStreamDoneEventPending) {
-        return;
-    }
-    mStreamDoneEventPending = true;
-
-    mStreamDoneStatus = status;
-    mQueue.postEvent(mStreamDoneEvent);
-}
-
-void PreviewPlayer::postVideoLagEvent_l() {
-    if (mVideoLagEventPending) {
-        return;
-    }
-    mVideoLagEventPending = true;
-    mQueue.postEventWithDelay(mVideoLagEvent, 1000000ll);
-}
-
-void PreviewPlayer::postCheckAudioStatusEvent_l(int64_t delayUs) {
-    if (mAudioStatusEventPending) {
-        return;
-    }
-    mAudioStatusEventPending = true;
-    mQueue.postEventWithDelay(mCheckAudioStatusEvent, delayUs);
-}
-
-void PreviewPlayer::abortPrepare(status_t err) {
-    CHECK(err != OK);
-
-    if (mIsAsyncPrepare) {
-        notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
-    }
-
-    mPrepareResult = err;
-    mFlags &= ~(PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED);
-    mAsyncPrepareEvent = NULL;
-    mPreparedCondition.broadcast();
-}
-
-uint32_t PreviewPlayer::getSourceSeekFlags() const {
-    Mutex::Autolock lock(mLock);
-    return mExtractorFlags;
-}
-
-void PreviewPlayer::postAudioEOS(int64_t delayUs) {
-    Mutex::Autolock autoLock(mLock);
-    postCheckAudioStatusEvent_l(delayUs);
-}
-
-void PreviewPlayer::postAudioSeekComplete() {
-    Mutex::Autolock autoLock(mLock);
-    postCheckAudioStatusEvent_l(0 /* delayUs */);
-}
-
-void PreviewPlayer::updateBatteryUsage_l() {
-    uint32_t params = IMediaPlayerService::kBatteryDataTrackDecoder;
-    if ((mAudioSource != NULL) && (mAudioSource != mAudioTrack)) {
-        params |= IMediaPlayerService::kBatteryDataTrackAudio;
-    }
-    if (mVideoSource != NULL) {
-        params |= IMediaPlayerService::kBatteryDataTrackVideo;
-    }
-    addBatteryData(params);
-}
-
-}  // namespace android
diff --git a/libvideoeditor/lvpp/PreviewPlayer.h b/libvideoeditor/lvpp/PreviewPlayer.h
deleted file mode 100755
index 5a13b58..0000000
--- a/libvideoeditor/lvpp/PreviewPlayer.h
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef PREVIEW_PLAYER_H_
-
-#define PREVIEW_PLAYER_H_
-
-#include "TimedEventQueue.h"
-#include "VideoEditorAudioPlayer.h"
-
-#include <media/MediaPlayerInterface.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/TimeSource.h>
-#include <utils/threads.h>
-#include "NativeWindowRenderer.h"
-
-namespace android {
-
-struct VideoEditorAudioPlayer;
-struct MediaExtractor;
-
-struct PreviewPlayer {
-    PreviewPlayer(NativeWindowRenderer* renderer);
-    ~PreviewPlayer();
-
-    void setListener(const wp<MediaPlayerBase> &listener);
-    void reset();
-
-    status_t play();
-    status_t pause();
-
-    bool isPlaying() const;
-    void setSurface(const sp<Surface> &surface);
-    void setSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer);
-    status_t seekTo(int64_t timeUs);
-
-    status_t getVideoDimensions(int32_t *width, int32_t *height) const;
-
-
-    // FIXME: Sync between ...
-    void acquireLock();
-    void releaseLock();
-
-    status_t prepare();
-    status_t prepareAsync();
-    status_t setDataSource(const char *path);
-    status_t setDataSource(const sp<IStreamSource> &source);
-
-    void setAudioSink(const sp<MediaPlayerBase::AudioSink> &audioSink);
-    status_t setLooping(bool shouldLoop);
-    status_t getDuration(int64_t *durationUs);
-    status_t getPosition(int64_t *positionUs);
-
-    uint32_t getSourceSeekFlags() const;
-
-    void postAudioEOS(int64_t delayUs = 0ll);
-    void postAudioSeekComplete();
-
-    status_t loadEffectsSettings(M4VSS3GPP_EffectSettings* pEffectSettings,
-                                 int nEffects);
-    status_t loadAudioMixSettings(M4xVSS_AudioMixingSettings* pAudioMixSettings);
-    status_t setAudioMixPCMFileHandle(M4OSA_Context pAudioMixPCMFileHandle);
-    status_t setAudioMixStoryBoardParam(M4OSA_UInt32 audioMixStoryBoardTS,
-                            M4OSA_UInt32 currentMediaBeginCutTime,
-                            M4OSA_UInt32 currentMediaVolumeVol);
-
-    status_t setPlaybackBeginTime(uint32_t msec);
-    status_t setPlaybackEndTime(uint32_t msec);
-    status_t setStoryboardStartTime(uint32_t msec);
-    status_t setProgressCallbackInterval(uint32_t cbInterval);
-    status_t setMediaRenderingMode(M4xVSS_MediaRendering mode,
-                            M4VIDEOEDITING_VideoFrameSize outputVideoSize);
-
-    status_t resetJniCallbackTimeStamp();
-    status_t setImageClipProperties(uint32_t width, uint32_t height);
-    status_t readFirstVideoFrame();
-    status_t getLastRenderedTimeMs(uint32_t *lastRenderedTimeMs);
-    status_t setAudioPlayer(VideoEditorAudioPlayer *audioPlayer);
-
-private:
-    enum {
-        PLAYING             = 1,
-        LOOPING             = 2,
-        FIRST_FRAME         = 4,
-        PREPARING           = 8,
-        PREPARED            = 16,
-        AT_EOS              = 32,
-        PREPARE_CANCELLED   = 64,
-        CACHE_UNDERRUN      = 128,
-        AUDIO_AT_EOS        = 256,
-        VIDEO_AT_EOS        = 512,
-        AUTO_LOOPING        = 1024,
-        INFORMED_AV_EOS     = 2048,
-
-        // We are basically done preparing but are currently buffering
-        // sufficient data to begin playback and finish the preparation phase
-        // for good.
-        PREPARING_CONNECTED = 2048,
-
-        // We're triggering a single video event to display the first frame
-        // after the seekpoint.
-        SEEK_PREVIEW        = 4096,
-
-        AUDIO_RUNNING       = 8192,
-        AUDIOPLAYER_STARTED = 16384,
-
-        INCOGNITO           = 32768,
-    };
-
-    mutable Mutex mLock;
-
-    OMXClient mClient;
-    TimedEventQueue mQueue;
-    bool mQueueStarted;
-    wp<MediaPlayerBase> mListener;
-
-    sp<Surface> mSurface;
-    sp<ANativeWindow> mNativeWindow;
-    sp<MediaPlayerBase::AudioSink> mAudioSink;
-
-    SystemTimeSource mSystemTimeSource;
-    TimeSource *mTimeSource;
-
-    String8 mUri;
-
-    sp<MediaSource> mVideoTrack;
-    sp<MediaSource> mVideoSource;
-    bool mVideoRendererIsPreview;
-
-    sp<MediaSource> mAudioTrack;
-    sp<MediaSource> mAudioSource;
-    VideoEditorAudioPlayer *mAudioPlayer;
-    int64_t mDurationUs;
-
-    int32_t mDisplayWidth;
-    int32_t mDisplayHeight;
-
-    uint32_t mFlags;
-    uint32_t mExtractorFlags;
-
-    int64_t mTimeSourceDeltaUs;
-    int64_t mVideoTimeUs;
-
-    enum SeekType {
-        NO_SEEK,
-        SEEK,
-        SEEK_VIDEO_ONLY
-    };
-    SeekType mSeeking;
-
-    bool mSeekNotificationSent;
-    int64_t mSeekTimeUs;
-
-    int64_t mBitrate;  // total bitrate of the file (in bps) or -1 if unknown.
-
-    bool mWatchForAudioSeekComplete;
-    bool mWatchForAudioEOS;
-
-    sp<TimedEventQueue::Event> mVideoEvent;
-    bool mVideoEventPending;
-    sp<TimedEventQueue::Event> mStreamDoneEvent;
-    bool mStreamDoneEventPending;
-    sp<TimedEventQueue::Event> mCheckAudioStatusEvent;
-    bool mAudioStatusEventPending;
-    sp<TimedEventQueue::Event> mVideoLagEvent;
-    bool mVideoLagEventPending;
-
-    sp<TimedEventQueue::Event> mAsyncPrepareEvent;
-    Condition mPreparedCondition;
-    bool mIsAsyncPrepare;
-    status_t mPrepareResult;
-    status_t mStreamDoneStatus;
-
-    MediaBuffer *mVideoBuffer;
-    int64_t mLastVideoTimeUs;
-    ARect mCropRect;
-    int32_t mGivenWidth, mGivenHeight;
-
-
-    bool mIsChangeSourceRequired;
-
-    NativeWindowRenderer *mNativeWindowRenderer;
-    RenderInput *mVideoRenderer;
-
-    int32_t mVideoWidth, mVideoHeight;
-
-    //Data structures used for audio and video effects
-    M4VSS3GPP_EffectSettings* mEffectsSettings;
-    M4xVSS_AudioMixingSettings* mPreviewPlayerAudioMixSettings;
-    M4OSA_Context mAudioMixPCMFileHandle;
-    M4OSA_UInt32 mAudioMixStoryBoardTS;
-    M4OSA_UInt32 mCurrentMediaBeginCutTime;
-    M4OSA_UInt32 mCurrentMediaVolumeValue;
-    M4OSA_UInt32 mCurrFramingEffectIndex;
-
-    uint32_t mNumberEffects;
-    uint32_t mPlayBeginTimeMsec;
-    uint32_t mPlayEndTimeMsec;
-    uint64_t mDecodedVideoTs; // timestamp of current decoded video frame buffer
-    uint64_t mDecVideoTsStoryBoard; // timestamp of frame relative to storyboard
-    uint32_t mCurrentVideoEffect;
-    uint32_t mProgressCbInterval;
-    uint32_t mNumberDecVideoFrames; // Counter of number of video frames decoded
-    sp<TimedEventQueue::Event> mProgressCbEvent;
-    bool mProgressCbEventPending;
-    sp<TimedEventQueue::Event> mOverlayUpdateEvent;
-    bool mOverlayUpdateEventPending;
-    bool mOverlayUpdateEventPosted;
-
-    M4xVSS_MediaRendering mRenderingMode;
-    uint32_t mOutputVideoWidth;
-    uint32_t mOutputVideoHeight;
-
-    uint32_t mStoryboardStartTimeMsec;
-
-    bool mIsVideoSourceJpg;
-    bool mIsFiftiesEffectStarted;
-    int64_t mImageFrameTimeUs;
-    bool mStartNextPlayer;
-    mutable Mutex mLockControl;
-
-    M4VIFI_UInt8*  mFrameRGBBuffer;
-    M4VIFI_UInt8*  mFrameYUVBuffer;
-
-    void cancelPlayerEvents_l(bool updateProgressCb = false);
-    status_t setDataSource_l(const sp<MediaExtractor> &extractor);
-    status_t setDataSource_l(const char *path);
-    void setNativeWindow_l(const sp<ANativeWindow> &native);
-    void reset_l();
-    void clear_l();
-    status_t play_l();
-    status_t pause_l(bool at_eos = false);
-    status_t initRenderer_l();
-    status_t initAudioDecoder_l();
-    status_t initVideoDecoder_l(uint32_t flags = 0);
-    void notifyVideoSize_l();
-    void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0);
-    void onVideoEvent();
-    void onVideoLagUpdate();
-    void onStreamDone();
-    void onCheckAudioStatus();
-    void onPrepareAsyncEvent();
-
-    void finishAsyncPrepare_l();
-    void abortPrepare(status_t err);
-
-    status_t startAudioPlayer_l();
-    void setVideoSource(const sp<MediaSource>& source);
-    status_t finishSetDataSource_l();
-    void setAudioSource(const sp<MediaSource>& source);
-
-    status_t seekTo_l(int64_t timeUs);
-    void seekAudioIfNecessary_l();
-    void finishSeekIfNecessary(int64_t videoTimeUs);
-
-    void postCheckAudioStatusEvent_l(int64_t delayUs);
-    void postVideoLagEvent_l();
-    void postStreamDoneEvent_l(status_t status);
-    void postVideoEvent_l(int64_t delayUs = -1);
-    void setVideoPostProcessingNode(
-                    M4VSS3GPP_VideoEffectType type, M4OSA_Bool enable);
-    void postProgressCallbackEvent_l();
-    void shutdownVideoDecoder_l();
-    void onProgressCbEvent();
-
-    void postOverlayUpdateEvent_l();
-    void onUpdateOverlayEvent();
-
-    status_t setDataSource_l_jpg();
-    status_t prepare_l();
-    status_t prepareAsync_l();
-    void updateBatteryUsage_l();
-    void updateSizeToRender(sp<MetaData> meta);
-
-    void setDuration_l(int64_t durationUs);
-    void setPosition_l(int64_t timeUs);
-
-    PreviewPlayer(const PreviewPlayer &);
-    PreviewPlayer &operator=(const PreviewPlayer &);
-};
-
-}  // namespace android
-
-#endif  // PREVIEW_PLAYER_H_
-
diff --git a/libvideoeditor/lvpp/PreviewRenderer.cpp b/libvideoeditor/lvpp/PreviewRenderer.cpp
deleted file mode 100755
index b1cfc8e..0000000
--- a/libvideoeditor/lvpp/PreviewRenderer.cpp
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#define LOG_NDEBUG 1
-#define LOG_TAG "PreviewRenderer"
-#include <utils/Log.h>
-
-#include "PreviewRenderer.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <gui/Surface.h>
-
-namespace android {
-
-PreviewRenderer* PreviewRenderer::CreatePreviewRenderer (
-        const sp<Surface> &surface, size_t width, size_t height) {
-
-    PreviewRenderer* renderer = new PreviewRenderer(surface, width, height);
-
-    if (renderer->init() != 0) {
-        delete renderer;
-        return NULL;
-    }
-
-    return renderer;
-}
-
-PreviewRenderer::PreviewRenderer(
-        const sp<Surface> &surface,
-        size_t width, size_t height)
-    : mSurface(surface),
-      mWidth(width),
-      mHeight(height) {
-}
-
-int PreviewRenderer::init() {
-    int err = 0;
-    ANativeWindow* anw = mSurface.get();
-
-    err = native_window_api_connect(anw, NATIVE_WINDOW_API_CPU);
-    if (err) goto fail;
-
-    err = native_window_set_usage(
-            anw, GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN);
-    if (err) goto fail;
-
-    err = native_window_set_buffer_count(anw, 3);
-    if (err) goto fail;
-
-    err = native_window_set_scaling_mode(
-            anw, NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-    if (err) goto fail;
-
-    err = native_window_set_buffers_geometry(
-            anw, mWidth, mHeight, HAL_PIXEL_FORMAT_YV12);
-    if (err) goto fail;
-
-    err = native_window_set_buffers_transform(anw, 0);
-    if (err) goto fail;
-
-fail:
-    return err;
-}
-
-PreviewRenderer::~PreviewRenderer() {
-    native_window_api_disconnect(mSurface.get(), NATIVE_WINDOW_API_CPU);
-}
-
-
-//
-// Provides a buffer and associated stride
-// This buffer is allocated by the SurfaceFlinger
-//
-// For optimal display performances, you should :
-// 1) call getBufferYV12()
-// 2) fill the buffer with your data
-// 3) call renderYV12() to take these changes into account
-//
-// For each call to getBufferYV12(), you must also call renderYV12()
-// Expected format in the buffer is YV12 formats (similar to YUV420 planar fromat)
-// for more details on this YV12 cf hardware/libhardware/include/hardware/hardware.h
-//
-void PreviewRenderer::getBufferYV12(uint8_t **data, size_t *stride) {
-    int err = OK;
-
-    if ((err = native_window_dequeue_buffer_and_wait(mSurface.get(),
-            &mBuf)) != 0) {
-        ALOGW("native_window_dequeue_buffer_and_wait returned error %d", err);
-        return;
-    }
-
-    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
-
-    Rect bounds(mWidth, mHeight);
-
-    void *dst;
-    CHECK_EQ(0, mapper.lock(mBuf->handle,
-            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
-            bounds, &dst));
-
-    *data   = (uint8_t*)dst;
-    *stride = mBuf->stride;
-}
-
-
-//
-// Display the content of the buffer provided by last call to getBufferYV12()
-//
-// See getBufferYV12() for details.
-//
-void PreviewRenderer::renderYV12() {
-    int err = OK;
-
-    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
-
-    if (mBuf!= NULL) {
-        CHECK_EQ(0, mapper.unlock(mBuf->handle));
-
-        if ((err = mSurface->ANativeWindow::queueBuffer(mSurface.get(), mBuf, -1)) != 0) {
-            ALOGW("Surface::queueBuffer returned error %d", err);
-        }
-    }
-    mBuf = NULL;
-}
-
-}  // namespace android
diff --git a/libvideoeditor/lvpp/PreviewRenderer.h b/libvideoeditor/lvpp/PreviewRenderer.h
deleted file mode 100755
index ce28276..0000000
--- a/libvideoeditor/lvpp/PreviewRenderer.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef PREVIEW_RENDERER_H_
-
-#define PREVIEW_RENDERER_H_
-
-#include <media/stagefright/ColorConverter.h>
-#include <utils/RefBase.h>
-#include <system/window.h>
-#include <ui/GraphicBufferMapper.h>
-
-
-namespace android {
-
-class Surface;
-
-class PreviewRenderer {
-public:
-
-static PreviewRenderer* CreatePreviewRenderer (
-        const sp<Surface> &surface,
-        size_t width, size_t height);
-
-    ~PreviewRenderer();
-
-    void getBufferYV12(uint8_t **data, size_t *stride);
-
-    void renderYV12();
-
-    static size_t ALIGN(size_t x, size_t alignment) {
-        return (x + alignment - 1) & ~(alignment - 1);
-    }
-
-private:
-    PreviewRenderer(
-            const sp<Surface> &surface,
-            size_t width, size_t height);
-
-    int init();
-
-    sp<Surface> mSurface;
-    size_t mWidth, mHeight;
-
-    ANativeWindowBuffer *mBuf;
-
-    PreviewRenderer(const PreviewRenderer &);
-    PreviewRenderer &operator=(const PreviewRenderer &);
-};
-
-}  // namespace android
-
-#endif  // PREVIEW_RENDERER_H_
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
deleted file mode 100755
index 91dc590..0000000
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
+++ /dev/null
@@ -1,900 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <inttypes.h>
-
-#define LOG_NDEBUG 1
-#define LOG_TAG "VideoEditorAudioPlayer"
-#include <utils/Log.h>
-
-#include <binder/IPCThreadState.h>
-#include <media/AudioTrack.h>
-#include <VideoEditorAudioPlayer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-
-#include <system/audio.h>
-
-#include "PreviewPlayer.h"
-namespace android {
-
-VideoEditorAudioPlayer::VideoEditorAudioPlayer(
-        const sp<MediaPlayerBase::AudioSink> &audioSink,
-        PreviewPlayer *observer)
-    : mInputBuffer(NULL),
-      mSampleRate(0),
-      mLatencyUs(0),
-      mFrameSize(0),
-      mNumFramesPlayed(0),
-      mPositionTimeMediaUs(-1),
-      mPositionTimeRealUs(-1),
-      mSeeking(false),
-      mReachedEOS(false),
-      mFinalStatus(OK),
-      mStarted(false),
-      mIsFirstBuffer(false),
-      mFirstBufferResult(OK),
-      mFirstBuffer(NULL),
-      mAudioSink(audioSink),
-      mObserver(observer) {
-
-    ALOGV("Constructor");
-    mBGAudioPCMFileHandle = NULL;
-    mAudioProcess = NULL;
-    mBGAudioPCMFileLength = 0;
-    mBGAudioPCMFileTrimmedLength = 0;
-    mBGAudioPCMFileDuration = 0;
-    mBGAudioPCMFileSeekPoint = 0;
-    mBGAudioPCMFileOriginalSeekPoint = 0;
-    mBGAudioStoryBoardSkimTimeStamp = 0;
-    mBGAudioStoryBoardCurrentMediaBeginCutTS = 0;
-    mBGAudioStoryBoardCurrentMediaVolumeVal = 0;
-    mSeekTimeUs = 0;
-    mSource = NULL;
-}
-
-VideoEditorAudioPlayer::~VideoEditorAudioPlayer() {
-
-    ALOGV("Destructor");
-    if (mStarted) {
-        reset();
-    }
-    if (mAudioProcess != NULL) {
-        delete mAudioProcess;
-        mAudioProcess = NULL;
-    }
-}
-
-void VideoEditorAudioPlayer::pause(bool playPendingSamples) {
-    ALOGV("pause: playPendingSamples=%d", playPendingSamples);
-    CHECK(mStarted);
-
-    if (playPendingSamples) {
-        if (mAudioSink.get() != NULL) {
-            mAudioSink->stop();
-        } else {
-            mAudioTrack->stop();
-        }
-    } else {
-        if (mAudioSink.get() != NULL) {
-            mAudioSink->pause();
-        } else {
-            mAudioTrack->pause();
-        }
-    }
-}
-
-void VideoEditorAudioPlayer::clear() {
-    ALOGV("clear");
-    if (!mStarted) {
-        return;
-    }
-
-    if (mAudioSink.get() != NULL) {
-        mAudioSink->stop();
-        mAudioSink->close();
-    } else {
-        mAudioTrack->stop();
-
-        mAudioTrack.clear();
-    }
-
-    // Make sure to release any buffer we hold onto so that the
-    // source is able to stop().
-
-    if (mFirstBuffer != NULL) {
-        mFirstBuffer->release();
-        mFirstBuffer = NULL;
-    }
-
-    if (mInputBuffer != NULL) {
-        ALOGV("AudioPlayerBase releasing input buffer.");
-
-        mInputBuffer->release();
-        mInputBuffer = NULL;
-    }
-
-    mSource->stop();
-
-    // The following hack is necessary to ensure that the OMX
-    // component is completely released by the time we may try
-    // to instantiate it again.
-    wp<MediaSource> tmp = mSource;
-    mSource.clear();
-    while (tmp.promote() != NULL) {
-        usleep(1000);
-    }
-    IPCThreadState::self()->flushCommands();
-
-    mNumFramesPlayed = 0;
-    mPositionTimeMediaUs = -1;
-    mPositionTimeRealUs = -1;
-    mSeeking = false;
-    mReachedEOS = false;
-    mFinalStatus = OK;
-    mStarted = false;
-}
-
-status_t VideoEditorAudioPlayer::resume() {
-    ALOGV("resume");
-
-    AudioMixSettings audioMixSettings;
-
-    // Single audio player is used;
-    // Pass on the audio ducking parameters
-    // which might have changed with new audio source
-    audioMixSettings.lvInDucking_threshold =
-        mAudioMixSettings->uiInDucking_threshold;
-    audioMixSettings.lvInDucking_lowVolume =
-        ((M4OSA_Float)mAudioMixSettings->uiInDucking_lowVolume) / 100.0;
-    audioMixSettings.lvInDucking_enable =
-        mAudioMixSettings->bInDucking_enable;
-    audioMixSettings.lvPTVolLevel =
-        ((M4OSA_Float)mBGAudioStoryBoardCurrentMediaVolumeVal) / 100.0;
-    audioMixSettings.lvBTVolLevel =
-        ((M4OSA_Float)mAudioMixSettings->uiAddVolume) / 100.0;
-    audioMixSettings.lvBTChannelCount = mAudioMixSettings->uiBTChannelCount;
-    audioMixSettings.lvPTChannelCount = mAudioMixSettings->uiNbChannels;
-
-    // Call to Audio mix param setting
-    mAudioProcess->setMixParams(audioMixSettings);
-
-    CHECK(mStarted);
-
-    if (mAudioSink.get() != NULL) {
-        mAudioSink->start();
-    } else {
-        mAudioTrack->start();
-    }
-    return OK;
-}
-
-status_t VideoEditorAudioPlayer::seekTo(int64_t time_us) {
-    ALOGV("seekTo: %lld", time_us);
-    Mutex::Autolock autoLock(mLock);
-
-    mSeeking = true;
-    mPositionTimeRealUs = mPositionTimeMediaUs = -1;
-    mReachedEOS = false;
-    mSeekTimeUs = time_us;
-
-    if (mAudioSink != NULL) {
-        mAudioSink->flush();
-    } else {
-        mAudioTrack->flush();
-    }
-
-    return OK;
-}
-
-bool VideoEditorAudioPlayer::isSeeking() {
-    Mutex::Autolock lock(mLock);
-    ALOGV("isSeeking: mSeeking=%d", mSeeking);
-    return mSeeking;
-}
-
-bool VideoEditorAudioPlayer::reachedEOS(status_t *finalStatus) {
-    ALOGV("reachedEOS: status=%d", mFinalStatus);
-    *finalStatus = OK;
-
-    Mutex::Autolock autoLock(mLock);
-    *finalStatus = mFinalStatus;
-    return mReachedEOS;
-}
-
-int64_t VideoEditorAudioPlayer::getRealTimeUs() {
-    Mutex::Autolock autoLock(mLock);
-    return getRealTimeUs_l();
-}
-
-int64_t VideoEditorAudioPlayer::getRealTimeUs_l() {
-    return -mLatencyUs + (mNumFramesPlayed * 1000000) / mSampleRate;
-}
-
-int64_t VideoEditorAudioPlayer::getMediaTimeUs() {
-    ALOGV("getMediaTimeUs");
-    Mutex::Autolock autoLock(mLock);
-
-    if (mPositionTimeMediaUs < 0 || mPositionTimeRealUs < 0) {
-        if (mSeeking) {
-            return mSeekTimeUs;
-        }
-
-        return 0;
-    }
-
-    int64_t realTimeOffset = getRealTimeUs_l() - mPositionTimeRealUs;
-    if (realTimeOffset < 0) {
-        realTimeOffset = 0;
-    }
-
-    return mPositionTimeMediaUs + realTimeOffset;
-}
-
-bool VideoEditorAudioPlayer::getMediaTimeMapping(
-        int64_t *realtime_us, int64_t *mediatime_us) {
-    ALOGV("getMediaTimeMapping");
-    Mutex::Autolock autoLock(mLock);
-
-    *realtime_us = mPositionTimeRealUs;
-    *mediatime_us = mPositionTimeMediaUs;
-
-    return mPositionTimeRealUs != -1 && mPositionTimeMediaUs != -1;
-}
-
-void VideoEditorAudioPlayer::setSource(const sp<MediaSource> &source) {
-    Mutex::Autolock autoLock(mLock);
-
-    // Before setting source, stop any existing source.
-    // Make sure to release any buffer we hold onto so that the
-    // source is able to stop().
-
-    if (mFirstBuffer != NULL) {
-        mFirstBuffer->release();
-        mFirstBuffer = NULL;
-    }
-
-    if (mInputBuffer != NULL) {
-        ALOGV("VideoEditorAudioPlayer releasing input buffer.");
-
-        mInputBuffer->release();
-        mInputBuffer = NULL;
-    }
-
-    if (mSource != NULL) {
-        mSource->stop();
-        mSource.clear();
-    }
-
-    mSource = source;
-    mReachedEOS = false;
-}
-
-sp<MediaSource> VideoEditorAudioPlayer::getSource() {
-    Mutex::Autolock autoLock(mLock);
-    return mSource;
-}
-
-void VideoEditorAudioPlayer::setObserver(PreviewPlayer *observer) {
-    ALOGV("setObserver");
-    //CHECK(!mStarted);
-    mObserver = observer;
-}
-
-bool VideoEditorAudioPlayer::isStarted() {
-    return mStarted;
-}
-
-// static
-void VideoEditorAudioPlayer::AudioCallback(int event, void *user, void *info) {
-    static_cast<VideoEditorAudioPlayer *>(user)->AudioCallback(event, info);
-}
-
-
-void VideoEditorAudioPlayer::AudioCallback(int event, void *info) {
-    if (event != AudioTrack::EVENT_MORE_DATA) {
-        return;
-    }
-
-    AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
-    size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
-
-    buffer->size = numBytesWritten;
-}
-
-status_t VideoEditorAudioPlayer::start(bool sourceAlreadyStarted) {
-    Mutex::Autolock autoLock(mLock);
-    CHECK(!mStarted);
-    CHECK(mSource != NULL);
-    ALOGV("Start");
-    status_t err;
-    M4OSA_ERR result = M4NO_ERROR;
-    M4OSA_UInt32 startTime = 0;
-    M4OSA_UInt32 seekTimeStamp = 0;
-    M4OSA_Bool bStoryBoardTSBeyondBTEndCutTime = M4OSA_FALSE;
-
-    if (!sourceAlreadyStarted) {
-        err = mSource->start();
-        if (err != OK) {
-            return err;
-        }
-    }
-
-    // Create the BG Audio handler
-    mAudioProcess = new VideoEditorBGAudioProcessing();
-    AudioMixSettings audioMixSettings;
-
-    // Pass on the audio ducking parameters
-    audioMixSettings.lvInDucking_threshold =
-        mAudioMixSettings->uiInDucking_threshold;
-    audioMixSettings.lvInDucking_lowVolume =
-        ((M4OSA_Float)mAudioMixSettings->uiInDucking_lowVolume) / 100.0;
-    audioMixSettings.lvInDucking_enable =
-        mAudioMixSettings->bInDucking_enable;
-    audioMixSettings.lvPTVolLevel =
-        ((M4OSA_Float)mBGAudioStoryBoardCurrentMediaVolumeVal) / 100.0;
-    audioMixSettings.lvBTVolLevel =
-        ((M4OSA_Float)mAudioMixSettings->uiAddVolume) / 100.0;
-    audioMixSettings.lvBTChannelCount = mAudioMixSettings->uiBTChannelCount;
-    audioMixSettings.lvPTChannelCount = mAudioMixSettings->uiNbChannels;
-
-    // Call to Audio mix param setting
-    mAudioProcess->setMixParams(audioMixSettings);
-
-    // Get the BG Audio PCM file details
-    if ( mBGAudioPCMFileHandle ) {
-
-        // TODO : 32bits required for OSAL, to be updated once OSAL is updated
-        M4OSA_UInt32 tmp32 = 0;
-        result = M4OSA_fileReadGetOption(mBGAudioPCMFileHandle,
-                                        M4OSA_kFileReadGetFileSize,
-                                        (M4OSA_Void**)&tmp32);
-        mBGAudioPCMFileLength = tmp32;
-        mBGAudioPCMFileTrimmedLength = mBGAudioPCMFileLength;
-
-
-        ALOGV("VideoEditorAudioPlayer::start M4OSA_kFileReadGetFileSize = %lld",
-                            mBGAudioPCMFileLength);
-
-        // Get the duration in time of the audio BT
-        if ( result == M4NO_ERROR ) {
-         ALOGV("VEAP: channels = %" PRIu32 " freq = %" PRIu32,
-         mAudioMixSettings->uiNbChannels,  mAudioMixSettings->uiSamplingFrequency);
-
-            // No trim
-            mBGAudioPCMFileDuration = ((
-                    (int64_t)(mBGAudioPCMFileLength/sizeof(M4OSA_UInt16)/
-                    mAudioMixSettings->uiNbChannels))*1000 ) /
-                    mAudioMixSettings->uiSamplingFrequency;
-
-            ALOGV("VideoEditorAudioPlayer:: beginCutMs %d , endCutMs %d",
-                    (unsigned int) mAudioMixSettings->beginCutMs,
-                    (unsigned int) mAudioMixSettings->endCutMs);
-
-            // Remove the trim part
-            if ((mAudioMixSettings->beginCutMs == 0) &&
-                (mAudioMixSettings->endCutMs != 0)) {
-                // End time itself the file duration
-                mBGAudioPCMFileDuration = mAudioMixSettings->endCutMs;
-                // Limit the file length also
-                mBGAudioPCMFileTrimmedLength = ((
-                     (int64_t)(mBGAudioPCMFileDuration *
-                     mAudioMixSettings->uiSamplingFrequency) *
-                     mAudioMixSettings->uiNbChannels) *
-                     sizeof(M4OSA_UInt16)) / 1000;
-            }
-            else if ((mAudioMixSettings->beginCutMs != 0) &&
-                     (mAudioMixSettings->endCutMs == mBGAudioPCMFileDuration)) {
-                // End time itself the file duration
-                mBGAudioPCMFileDuration = mBGAudioPCMFileDuration -
-                      mAudioMixSettings->beginCutMs;
-                // Limit the file length also
-                mBGAudioPCMFileTrimmedLength = ((
-                     (int64_t)(mBGAudioPCMFileDuration *
-                     mAudioMixSettings->uiSamplingFrequency) *
-                     mAudioMixSettings->uiNbChannels) *
-                     sizeof(M4OSA_UInt16)) / 1000;
-            }
-            else if ((mAudioMixSettings->beginCutMs != 0) &&
-                    (mAudioMixSettings->endCutMs != 0)) {
-                // End time itself the file duration
-                mBGAudioPCMFileDuration = mAudioMixSettings->endCutMs -
-                    mAudioMixSettings->beginCutMs;
-                // Limit the file length also
-                mBGAudioPCMFileTrimmedLength = ((
-                    (int64_t)(mBGAudioPCMFileDuration *
-                    mAudioMixSettings->uiSamplingFrequency) *
-                    mAudioMixSettings->uiNbChannels) *
-                    sizeof(M4OSA_UInt16)) / 1000; /*make to sec from ms*/
-            }
-
-            ALOGV("VideoEditorAudioPlayer: file duration recorded : %lld",
-                    mBGAudioPCMFileDuration);
-        }
-
-        // Last played location to be seeked at for next media item
-        if ( result == M4NO_ERROR ) {
-            ALOGV("VideoEditorAudioPlayer::mBGAudioStoryBoardSkimTimeStamp %lld",
-                    mBGAudioStoryBoardSkimTimeStamp);
-            ALOGV("VideoEditorAudioPlayer::uiAddCts %d",
-                    mAudioMixSettings->uiAddCts);
-            if (mBGAudioStoryBoardSkimTimeStamp >= mAudioMixSettings->uiAddCts) {
-                startTime = (mBGAudioStoryBoardSkimTimeStamp -
-                 mAudioMixSettings->uiAddCts);
-            }
-            else {
-                // do nothing
-            }
-
-            ALOGV("VideoEditorAudioPlayer::startTime %" PRIu32, startTime);
-            seekTimeStamp = 0;
-            if (startTime) {
-                if (startTime >= mBGAudioPCMFileDuration) {
-                    // The BG track should be looped and started again
-                    if (mAudioMixSettings->bLoop) {
-                        // Add begin cut time to the mod value
-                        seekTimeStamp = ((startTime%mBGAudioPCMFileDuration) +
-                        mAudioMixSettings->beginCutMs);
-                    }else {
-                        // Looping disabled, donot do BT Mix , set to file end
-                        seekTimeStamp = (mBGAudioPCMFileDuration +
-                        mAudioMixSettings->beginCutMs);
-                    }
-                }else {
-                    // BT still present , just seek to story board time
-                    seekTimeStamp = startTime + mAudioMixSettings->beginCutMs;
-                }
-            }
-            else {
-                seekTimeStamp = mAudioMixSettings->beginCutMs;
-            }
-
-            // Convert the seekTimeStamp to file location
-            mBGAudioPCMFileOriginalSeekPoint = (
-                                        (int64_t)(mAudioMixSettings->beginCutMs)
-                                        * mAudioMixSettings->uiSamplingFrequency
-                                        * mAudioMixSettings->uiNbChannels
-                                        * sizeof(M4OSA_UInt16))/ 1000 ; /*make to sec from ms*/
-
-            mBGAudioPCMFileSeekPoint = ((int64_t)(seekTimeStamp)
-                                        * mAudioMixSettings->uiSamplingFrequency
-                                        * mAudioMixSettings->uiNbChannels
-                                        * sizeof(M4OSA_UInt16))/ 1000 ;
-        }
-    }
-
-    // We allow an optional INFO_FORMAT_CHANGED at the very beginning
-    // of playback, if there is one, getFormat below will retrieve the
-    // updated format, if there isn't, we'll stash away the valid buffer
-    // of data to be used on the first audio callback.
-
-    CHECK(mFirstBuffer == NULL);
-
-    mFirstBufferResult = mSource->read(&mFirstBuffer);
-    if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
-        ALOGV("INFO_FORMAT_CHANGED!!!");
-
-        CHECK(mFirstBuffer == NULL);
-        mFirstBufferResult = OK;
-        mIsFirstBuffer = false;
-    } else {
-        mIsFirstBuffer = true;
-    }
-
-    sp<MetaData> format = mSource->getFormat();
-    const char *mime;
-    bool success = format->findCString(kKeyMIMEType, &mime);
-    CHECK(success);
-    CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
-
-    success = format->findInt32(kKeySampleRate, &mSampleRate);
-    CHECK(success);
-
-    int32_t numChannels;
-    success = format->findInt32(kKeyChannelCount, &numChannels);
-    CHECK(success);
-
-    if (mAudioSink.get() != NULL) {
-        status_t err = mAudioSink->open(
-                mSampleRate, numChannels, CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT,
-                DEFAULT_AUDIOSINK_BUFFERCOUNT,
-                &VideoEditorAudioPlayer::AudioSinkCallback, this);
-        if (err != OK) {
-            if (mFirstBuffer != NULL) {
-                mFirstBuffer->release();
-                mFirstBuffer = NULL;
-            }
-
-            if (!sourceAlreadyStarted) {
-                mSource->stop();
-            }
-
-            return err;
-        }
-
-        mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
-        mFrameSize = mAudioSink->frameSize();
-
-        mAudioSink->start();
-    } else {
-        mAudioTrack = new AudioTrack(
-                AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT,
-                audio_channel_out_mask_from_count(numChannels),
-                0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
-
-        if ((err = mAudioTrack->initCheck()) != OK) {
-            mAudioTrack.clear();
-
-            if (mFirstBuffer != NULL) {
-                mFirstBuffer->release();
-                mFirstBuffer = NULL;
-            }
-
-            if (!sourceAlreadyStarted) {
-                mSource->stop();
-            }
-
-            return err;
-        }
-
-        mLatencyUs = (int64_t)mAudioTrack->latency() * 1000;
-        mFrameSize = mAudioTrack->frameSize();
-
-        mAudioTrack->start();
-    }
-
-    mStarted = true;
-
-    return OK;
-}
-
-
-void VideoEditorAudioPlayer::reset() {
-
-    ALOGV("reset");
-    clear();
-
-    // Capture the current seek point
-    mBGAudioPCMFileSeekPoint = 0;
-    mBGAudioStoryBoardSkimTimeStamp =0;
-    mBGAudioStoryBoardCurrentMediaBeginCutTS=0;
-}
-
-size_t VideoEditorAudioPlayer::AudioSinkCallback(
-        MediaPlayerBase::AudioSink *audioSink,
-        void *buffer, size_t size, void *cookie,
-        MediaPlayerBase::AudioSink::cb_event_t event) {
-    VideoEditorAudioPlayer *me = (VideoEditorAudioPlayer *)cookie;
-
-    if (event == MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER ) {
-        return me->fillBuffer(buffer, size);
-    } else {
-        return 0;
-    }
-}
-
-
-size_t VideoEditorAudioPlayer::fillBuffer(void *data, size_t size) {
-
-    if (mReachedEOS) {
-        return 0;
-    }
-
-    size_t size_done = 0;
-    size_t size_remaining = size;
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4AM_Buffer16 bgFrame = {NULL, 0};
-    M4AM_Buffer16 mixFrame = {NULL, 0};
-    M4AM_Buffer16 ptFrame = {NULL, 0};
-    int64_t currentSteamTS = 0;
-    int64_t startTimeForBT = 0;
-    M4OSA_Float fPTVolLevel =
-     ((M4OSA_Float)mBGAudioStoryBoardCurrentMediaVolumeVal)/100;
-    M4OSA_Int16     *pPTMdata=NULL;
-    M4OSA_UInt32     uiPCMsize = 0;
-
-    bool postSeekComplete = false;
-    bool postEOS = false;
-
-    while ((size_remaining > 0)&&(err==M4NO_ERROR)) {
-        MediaSource::ReadOptions options;
-
-        {
-            Mutex::Autolock autoLock(mLock);
-            if (mSeeking) {
-                if (mIsFirstBuffer) {
-                    if (mFirstBuffer != NULL) {
-                        mFirstBuffer->release();
-                        mFirstBuffer = NULL;
-                    }
-                    mIsFirstBuffer = false;
-                }
-
-                options.setSeekTo(mSeekTimeUs);
-
-                if (mInputBuffer != NULL) {
-                    mInputBuffer->release();
-                    mInputBuffer = NULL;
-                }
-
-                mSeeking = false;
-
-                if (mObserver) {
-                    postSeekComplete = true;
-                }
-            }
-        }
-
-        if (mInputBuffer == NULL) {
-            status_t status = OK;
-
-            if (mIsFirstBuffer) {
-                mInputBuffer = mFirstBuffer;
-                mFirstBuffer = NULL;
-                status = mFirstBufferResult;
-
-                mIsFirstBuffer = false;
-            } else {
-
-                {
-                    Mutex::Autolock autoLock(mLock);
-                    status = mSource->read(&mInputBuffer, &options);
-                }
-                // Data is Primary Track, mix with background track
-                // after reading same size from Background track PCM file
-                if (status == OK)
-                {
-                    // Mix only when skim point is after startTime of BT
-                    if (((mBGAudioStoryBoardSkimTimeStamp* 1000) +
-                          (mPositionTimeMediaUs - mSeekTimeUs)) >=
-                          (int64_t)(mAudioMixSettings->uiAddCts * 1000)) {
-
-                        ALOGV("VideoEditorAudioPlayer::INSIDE MIXING");
-                        ALOGV("Checking %lld <= %lld",
-                            mBGAudioPCMFileSeekPoint-mBGAudioPCMFileOriginalSeekPoint,
-                            mBGAudioPCMFileTrimmedLength);
-
-
-                        M4OSA_Void* ptr;
-                        ptr = reinterpret_cast<M4OSA_Void*>(
-                                reinterpret_cast<uintptr_t>(mInputBuffer->data()) +
-                                mInputBuffer->range_offset());
-
-                        M4OSA_UInt32 len = mInputBuffer->range_length();
-                        M4OSA_Context fp = M4OSA_NULL;
-
-                        uiPCMsize = (mInputBuffer->range_length())/2;
-                        pPTMdata = (M4OSA_Int16*) ((uint8_t*) mInputBuffer->data()
-                                + mInputBuffer->range_offset());
-
-                        ALOGV("mix with background malloc to do len %d", len);
-
-                        bgFrame.m_dataAddress = (M4OSA_UInt16*)M4OSA_32bitAlignedMalloc( len, 1,
-                                                       (M4OSA_Char*)"bgFrame");
-                        bgFrame.m_bufferSize = len;
-
-                        mixFrame.m_dataAddress = (M4OSA_UInt16*)M4OSA_32bitAlignedMalloc(len, 1,
-                                                    (M4OSA_Char*)"mixFrame");
-                        mixFrame.m_bufferSize = len;
-
-                        ALOGV("mix with bgm with size %lld", mBGAudioPCMFileLength);
-
-                        CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime,
-                                         &mPositionTimeMediaUs));
-
-                        if (mBGAudioPCMFileSeekPoint -
-                             mBGAudioPCMFileOriginalSeekPoint <=
-                              (mBGAudioPCMFileTrimmedLength - len)) {
-
-                            ALOGV("Checking mBGAudioPCMFileHandle %p",
-                                  mBGAudioPCMFileHandle);
-
-                            if (mBGAudioPCMFileHandle != M4OSA_NULL) {
-                                ALOGV("fillBuffer seeking file to %lld",
-                                    mBGAudioPCMFileSeekPoint);
-
-                            // TODO : 32bits required for OSAL
-                                M4OSA_UInt32 tmp32 =
-                                    (M4OSA_UInt32)mBGAudioPCMFileSeekPoint;
-                                err = M4OSA_fileReadSeek(mBGAudioPCMFileHandle,
-                                                M4OSA_kFileSeekBeginning,
-                                                (M4OSA_FilePosition*)&tmp32);
-
-                                mBGAudioPCMFileSeekPoint = tmp32;
-
-                                if (err != M4NO_ERROR){
-                                    ALOGE("M4OSA_fileReadSeek err %d",(int)err);
-                                }
-
-                                err = M4OSA_fileReadData(mBGAudioPCMFileHandle,
-                                       (M4OSA_Int8*)bgFrame.m_dataAddress,
-                                       (M4OSA_UInt32*)&len);
-                                if (err == M4WAR_NO_DATA_YET ) {
-
-                                    ALOGV("fillBuffer End of file reached");
-                                    err = M4NO_ERROR;
-
-                                    // We reached the end of file
-                                    // move to begin cut time equal value
-                                    if (mAudioMixSettings->bLoop) {
-                                        mBGAudioPCMFileSeekPoint =
-                                         (((int64_t)(mAudioMixSettings->beginCutMs) *
-                                          mAudioMixSettings->uiSamplingFrequency) *
-                                          mAudioMixSettings->uiNbChannels *
-                                           sizeof(M4OSA_UInt16)) / 1000;
-                                        ALOGV("fillBuffer Looping \
-                                            to mBGAudioPCMFileSeekPoint %lld",
-                                            mBGAudioPCMFileSeekPoint);
-                                    }
-                                    else {
-                                            // No mixing;
-                                            // take care of volume of primary track
-                                        if (fPTVolLevel < 1.0) {
-                                            setPrimaryTrackVolume(pPTMdata,
-                                             uiPCMsize, fPTVolLevel);
-                                        }
-                                    }
-                                } else if (err != M4NO_ERROR ) {
-                                     ALOGV("fileReadData for audio err %d", err);
-                                } else {
-                                    mBGAudioPCMFileSeekPoint += len;
-                                    ALOGV("fillBuffer mBGAudioPCMFileSeekPoint \
-                                         %lld", mBGAudioPCMFileSeekPoint);
-
-                                    // Assign the ptr data to primary track
-                                    ptFrame.m_dataAddress = (M4OSA_UInt16*)ptr;
-                                    ptFrame.m_bufferSize = len;
-
-                                    // Call to mix and duck
-                                    mAudioProcess->mixAndDuck(
-                                         &ptFrame, &bgFrame, &mixFrame);
-
-                                        // Overwrite the decoded buffer
-                                    memcpy((void *)ptr,
-                                         (void *)mixFrame.m_dataAddress, len);
-                                }
-                            }
-                        } else if (mAudioMixSettings->bLoop){
-                            // Move to begin cut time equal value
-                            mBGAudioPCMFileSeekPoint =
-                                mBGAudioPCMFileOriginalSeekPoint;
-                        } else {
-                            // No mixing;
-                            // take care of volume level of primary track
-                            if(fPTVolLevel < 1.0) {
-                                setPrimaryTrackVolume(
-                                      pPTMdata, uiPCMsize, fPTVolLevel);
-                            }
-                        }
-                        if (bgFrame.m_dataAddress) {
-                            free(bgFrame.m_dataAddress);
-                        }
-                        if (mixFrame.m_dataAddress) {
-                            free(mixFrame.m_dataAddress);
-                        }
-                    } else {
-                        // No mixing;
-                        // take care of volume level of primary track
-                        if(fPTVolLevel < 1.0) {
-                            setPrimaryTrackVolume(pPTMdata, uiPCMsize,
-                                                 fPTVolLevel);
-                        }
-                    }
-                }
-            }
-
-            CHECK((status == OK && mInputBuffer != NULL)
-                   || (status != OK && mInputBuffer == NULL));
-
-            Mutex::Autolock autoLock(mLock);
-
-            if (status != OK) {
-                ALOGV("fillBuffer: mSource->read returned err %d", status);
-                if (mObserver && !mReachedEOS) {
-                    postEOS = true;
-                }
-
-                mReachedEOS = true;
-                mFinalStatus = status;
-                break;
-            }
-
-            CHECK(mInputBuffer->meta_data()->findInt64(
-                        kKeyTime, &mPositionTimeMediaUs));
-
-            mPositionTimeRealUs =
-                ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
-                    / mSampleRate;
-
-            ALOGV("buffer->size() = %d, "
-                     "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
-                 mInputBuffer->range_length(),
-                 mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
-        }
-
-        if (mInputBuffer->range_length() == 0) {
-            mInputBuffer->release();
-            mInputBuffer = NULL;
-
-            continue;
-        }
-
-        size_t copy = size_remaining;
-        if (copy > mInputBuffer->range_length()) {
-            copy = mInputBuffer->range_length();
-        }
-
-        memcpy((char *)data + size_done,
-           (const char *)mInputBuffer->data() + mInputBuffer->range_offset(),
-               copy);
-
-        mInputBuffer->set_range(mInputBuffer->range_offset() + copy,
-                            mInputBuffer->range_length() - copy);
-
-        size_done += copy;
-        size_remaining -= copy;
-    }
-
-    {
-        Mutex::Autolock autoLock(mLock);
-        mNumFramesPlayed += size_done / mFrameSize;
-    }
-
-    if (postEOS) {
-        mObserver->postAudioEOS();
-    }
-
-    if (postSeekComplete) {
-        mObserver->postAudioSeekComplete();
-    }
-
-    return size_done;
-}
-
-void VideoEditorAudioPlayer::setAudioMixSettings(
-                            M4xVSS_AudioMixingSettings* pAudioMixSettings) {
-    mAudioMixSettings = pAudioMixSettings;
-}
-
-void VideoEditorAudioPlayer::setAudioMixPCMFileHandle(
-                            M4OSA_Context pBGAudioPCMFileHandle){
-    mBGAudioPCMFileHandle = pBGAudioPCMFileHandle;
-}
-
-void VideoEditorAudioPlayer::setAudioMixStoryBoardSkimTimeStamp(
-                            M4OSA_UInt32 pBGAudioStoryBoardSkimTimeStamp,
-                            M4OSA_UInt32 pBGAudioCurrentMediaBeginCutTS,
-                            M4OSA_UInt32 pBGAudioCurrentMediaVolumeVal) {
-
-    mBGAudioStoryBoardSkimTimeStamp = pBGAudioStoryBoardSkimTimeStamp;
-    mBGAudioStoryBoardCurrentMediaBeginCutTS = pBGAudioCurrentMediaBeginCutTS;
-    mBGAudioStoryBoardCurrentMediaVolumeVal = pBGAudioCurrentMediaVolumeVal;
-}
-
-void VideoEditorAudioPlayer::setPrimaryTrackVolume(
-    M4OSA_Int16 *data, M4OSA_UInt32 size, M4OSA_Float volLevel) {
-
-    while(size-- > 0) {
-        *data = (M4OSA_Int16)((*data)*volLevel);
-        data++;
-    }
-}
-
-}
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
deleted file mode 100755
index 2caf5e8..0000000
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VE_AUDIO_PLAYER_H_
-#define VE_AUDIO_PLAYER_H_
-
-#include <media/MediaPlayerInterface.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/TimeSource.h>
-#include <utils/threads.h>
-
-#include "M4xVSS_API.h"
-#include "VideoEditorMain.h"
-#include "M4OSA_FileReader.h"
-#include "VideoEditorBGAudioProcessing.h"
-
-
-namespace android {
-
-class MediaSource;
-class AudioTrack;
-class PreviewPlayer;
-
-class VideoEditorAudioPlayer : public TimeSource {
-public:
-    enum {
-        REACHED_EOS,
-        SEEK_COMPLETE
-    };
-
-    VideoEditorAudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink,
-        PreviewPlayer *audioObserver = NULL);
-
-    ~VideoEditorAudioPlayer();
-
-    // Return time in us.
-    int64_t getRealTimeUs();
-
-    // Returns the timestamp of the last buffer played (in us).
-    int64_t getMediaTimeUs();
-
-    // Returns true iff a mapping is established, i.e. the AudioPlayerBase
-    // has played at least one frame of audio.
-    bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
-
-    status_t start(bool sourceAlreadyStarted = false);
-    void pause(bool playPendingSamples = false);
-    status_t resume();
-    status_t seekTo(int64_t time_us);
-    bool isSeeking();
-    bool reachedEOS(status_t *finalStatus);
-
-    void setAudioMixSettings(M4xVSS_AudioMixingSettings* pAudioMixSettings);
-    void setAudioMixPCMFileHandle(M4OSA_Context pBGAudioPCMFileHandle);
-    void setAudioMixStoryBoardSkimTimeStamp(
-        M4OSA_UInt32 pBGAudioStoryBoardSkimTimeStamp,
-        M4OSA_UInt32 pBGAudioCurrentMediaBeginCutTS,
-        M4OSA_UInt32 pBGAudioCurrentMediaVolumeVal);
-
-    void setObserver(PreviewPlayer *observer);
-    void setSource(const sp<MediaSource> &source);
-    sp<MediaSource> getSource();
-
-    bool isStarted();
-private:
-
-    M4xVSS_AudioMixingSettings *mAudioMixSettings;
-    VideoEditorBGAudioProcessing *mAudioProcess;
-
-    M4OSA_Context mBGAudioPCMFileHandle;
-    int64_t mBGAudioPCMFileLength;
-    int64_t mBGAudioPCMFileTrimmedLength;
-    int64_t mBGAudioPCMFileDuration;
-    int64_t mBGAudioPCMFileSeekPoint;
-    int64_t mBGAudioPCMFileOriginalSeekPoint;
-    int64_t mBGAudioStoryBoardSkimTimeStamp;
-    int64_t mBGAudioStoryBoardCurrentMediaBeginCutTS;
-    int64_t mBGAudioStoryBoardCurrentMediaVolumeVal;
-
-    sp<MediaSource> mSource;
-    sp<AudioTrack> mAudioTrack;
-
-    MediaBuffer *mInputBuffer;
-
-    int mSampleRate;
-    int64_t mLatencyUs;
-    size_t mFrameSize;
-
-    Mutex mLock;
-    int64_t mNumFramesPlayed;
-
-    int64_t mPositionTimeMediaUs;
-    int64_t mPositionTimeRealUs;
-
-    bool mSeeking;
-    bool mReachedEOS;
-    status_t mFinalStatus;
-    int64_t mSeekTimeUs;
-
-    bool mStarted;
-
-    bool mIsFirstBuffer;
-    status_t mFirstBufferResult;
-    MediaBuffer *mFirstBuffer;
-
-    sp<MediaPlayerBase::AudioSink> mAudioSink;
-    PreviewPlayer *mObserver;
-
-    static void AudioCallback(int event, void *user, void *info);
-    void AudioCallback(int event, void *info);
-    size_t fillBuffer(void *data, size_t size);
-    static size_t AudioSinkCallback(
-            MediaPlayerBase::AudioSink *audioSink,
-            void *data, size_t size, void *me,
-            MediaPlayerBase::AudioSink::cb_event_t event);
-
-    void reset();
-    void clear();
-    int64_t getRealTimeUs_l();
-    void setPrimaryTrackVolume(
-            M4OSA_Int16 *data, M4OSA_UInt32 size, M4OSA_Float volLevel);
-
-    VideoEditorAudioPlayer(const VideoEditorAudioPlayer &);
-    VideoEditorAudioPlayer &operator=(const VideoEditorAudioPlayer &);
-};
-
-}  // namespace android
-
-#endif  // VE_AUDIO_PLAYER_H_
diff --git a/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.cpp b/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.cpp
deleted file mode 100755
index 0c12aac..0000000
--- a/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.cpp
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <inttypes.h>
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "VideoEditorBGAudioProcessing"
-#include <utils/Log.h>
-#include "VideoEditorBGAudioProcessing.h"
-
-namespace android {
-
-VideoEditorBGAudioProcessing::VideoEditorBGAudioProcessing() {
-    ALOGV("Constructor");
-
-    mAudVolArrIndex = 0;
-    mDoDucking = 0;
-    mDucking_enable = 0;
-    mDucking_lowVolume = 0;
-    mDucking_threshold = 0;
-    mDuckingFactor = 0;
-
-    mBTVolLevel = 0;
-    mPTVolLevel = 0;
-
-    mIsSSRCneeded = 0;
-    mChannelConversion = 0;
-
-    mBTFormat = MONO_16_BIT;
-
-    mInSampleRate = 8000;
-    mOutSampleRate = 16000;
-    mPTChannelCount = 2;
-    mBTChannelCount = 1;
-}
-
-M4OSA_Int32 VideoEditorBGAudioProcessing::mixAndDuck(
-        void *primaryTrackBuffer,
-        void *backgroundTrackBuffer,
-        void *outBuffer) {
-
-    ALOGV("mixAndDuck: track buffers (primary: %p and background: %p) "
-            "and out buffer %p",
-            primaryTrackBuffer, backgroundTrackBuffer, outBuffer);
-
-    M4AM_Buffer16* pPrimaryTrack   = (M4AM_Buffer16*)primaryTrackBuffer;
-    M4AM_Buffer16* pBackgroundTrack = (M4AM_Buffer16*)backgroundTrackBuffer;
-    M4AM_Buffer16* pMixedOutBuffer  = (M4AM_Buffer16*)outBuffer;
-
-    // Output size if same as PT size
-    pMixedOutBuffer->m_bufferSize = pPrimaryTrack->m_bufferSize;
-
-    // Before mixing, we need to have only PT as out buffer
-    memcpy((void *)pMixedOutBuffer->m_dataAddress,
-        (void *)pPrimaryTrack->m_dataAddress, pMixedOutBuffer->m_bufferSize);
-
-    // Initialize ducking variables
-    // Initially contains the input primary track
-    M4OSA_Int16 *pPTMdata2 = (M4OSA_Int16*)pMixedOutBuffer->m_dataAddress;
-
-    // Contains BG track processed data(like channel conversion etc..
-    M4OSA_Int16 *pBTMdata1 = (M4OSA_Int16*) pBackgroundTrack->m_dataAddress;
-
-    // Since we need to give sample count and not buffer size
-    M4OSA_UInt32 uiPCMsize = pMixedOutBuffer->m_bufferSize / 2 ;
-
-    if ((mDucking_enable) && (mPTVolLevel != 0.0)) {
-        M4OSA_Int32 peakDbValue = 0;
-        M4OSA_Int32 previousDbValue = 0;
-        M4OSA_Int16 *pPCM16Sample = (M4OSA_Int16*)pPrimaryTrack->m_dataAddress;
-        const size_t n = pPrimaryTrack->m_bufferSize / sizeof(M4OSA_Int16);
-
-        for (size_t loopIndex = 0; loopIndex < n; ++loopIndex) {
-            if (pPCM16Sample[loopIndex] >= 0) {
-                peakDbValue = previousDbValue > pPCM16Sample[loopIndex] ?
-                        previousDbValue : pPCM16Sample[loopIndex];
-                previousDbValue = peakDbValue;
-            } else {
-                peakDbValue = previousDbValue > -pPCM16Sample[loopIndex] ?
-                        previousDbValue: -pPCM16Sample[loopIndex];
-                previousDbValue = peakDbValue;
-            }
-        }
-
-        mAudioVolumeArray[mAudVolArrIndex] = getDecibelSound(peakDbValue);
-
-        // Check for threshold is done after kProcessingWindowSize cycles
-        if (mAudVolArrIndex >= kProcessingWindowSize - 1) {
-            mDoDucking = isThresholdBreached(
-                    mAudioVolumeArray, mAudVolArrIndex, mDucking_threshold);
-
-            mAudVolArrIndex = 0;
-        } else {
-            mAudVolArrIndex++;
-        }
-
-        //
-        // Below logic controls the mixing weightage
-        // for Background and Primary Tracks
-        // for the duration of window under analysis,
-        // to give fade-out for Background and fade-in for primary
-        // Current fading factor is distributed in equal range over
-        // the defined window size.
-        // For a window size = 25
-        // (500 ms (window under analysis) / 20 ms (sample duration))
-        //
-
-        if (mDoDucking) {
-            if (mDuckingFactor > mDucking_lowVolume) {
-                // FADE OUT BG Track
-                // Increment ducking factor in total steps in factor
-                // of low volume steps to reach low volume level
-                mDuckingFactor -= mDucking_lowVolume;
-            } else {
-                mDuckingFactor = mDucking_lowVolume;
-            }
-        } else {
-            if (mDuckingFactor < 1.0 ) {
-                // FADE IN BG Track
-                // Increment ducking factor in total steps of
-                // low volume factor to reach orig.volume level
-                mDuckingFactor += mDucking_lowVolume;
-            } else {
-                mDuckingFactor = 1.0;
-            }
-        }
-    } // end if - mDucking_enable
-
-
-    // Mixing logic
-    ALOGV("Out of Ducking analysis uiPCMsize %d %f %f",
-            mDoDucking, mDuckingFactor, mBTVolLevel);
-    while (uiPCMsize-- > 0) {
-
-        // Set vol factor for BT and PT
-        *pBTMdata1 = (M4OSA_Int16)(*pBTMdata1*mBTVolLevel);
-        *pPTMdata2 = (M4OSA_Int16)(*pPTMdata2*mPTVolLevel);
-
-        // Mix the two samples
-        if (mDoDucking) {
-
-            // Duck the BG track to ducking factor value before mixing
-            *pBTMdata1 = (M4OSA_Int16)((*pBTMdata1)*(mDuckingFactor));
-
-            // mix as normal case
-            *pBTMdata1 = (M4OSA_Int16)(*pBTMdata1 /2 + *pPTMdata2 /2);
-        } else {
-
-            *pBTMdata1 = (M4OSA_Int16)((*pBTMdata1)*(mDuckingFactor));
-            *pBTMdata1 = (M4OSA_Int16)(*pBTMdata1 /2 + *pPTMdata2 /2);
-        }
-
-        M4OSA_Int32 temp;
-        if (*pBTMdata1 < 0) {
-            temp = -(*pBTMdata1) * 2; // bring to original Amplitude level
-
-            if (temp > 32767) {
-                *pBTMdata1 = -32766; // less then max allowed value
-            } else {
-                *pBTMdata1 = (M4OSA_Int16)(-temp);
-            }
-        } else {
-            temp = (*pBTMdata1) * 2; // bring to original Amplitude level
-            if ( temp > 32768) {
-                *pBTMdata1 = 32767; // less than max allowed value
-            } else {
-                *pBTMdata1 = (M4OSA_Int16)temp;
-            }
-        }
-
-        pBTMdata1++;
-        pPTMdata2++;
-    }
-
-    memcpy((void *)pMixedOutBuffer->m_dataAddress,
-        (void *)pBackgroundTrack->m_dataAddress,
-        pBackgroundTrack->m_bufferSize);
-
-    ALOGV("mixAndDuck: X");
-    return M4NO_ERROR;
-}
-
-M4OSA_Int32 VideoEditorBGAudioProcessing::calculateOutResampleBufSize() {
-
-    // This already takes care of channel count in mBTBuffer.m_bufferSize
-    return (mOutSampleRate / mInSampleRate) * mBTBuffer.m_bufferSize;
-}
-
-void VideoEditorBGAudioProcessing::setMixParams(
-        const AudioMixSettings& setting) {
-    ALOGV("setMixParams");
-
-    mDucking_enable       = setting.lvInDucking_enable;
-    mDucking_lowVolume    = setting.lvInDucking_lowVolume;
-    mDucking_threshold    = setting.lvInDucking_threshold;
-    mPTVolLevel           = setting.lvPTVolLevel;
-    mBTVolLevel           = setting.lvBTVolLevel ;
-    mBTChannelCount       = setting.lvBTChannelCount;
-    mPTChannelCount       = setting.lvPTChannelCount;
-    mBTFormat             = setting.lvBTFormat;
-    mInSampleRate         = setting.lvInSampleRate;
-    mOutSampleRate        = setting.lvOutSampleRate;
-
-    // Reset the following params to default values
-    mAudVolArrIndex       = 0;
-    mDoDucking            = 0;
-    mDuckingFactor        = 1.0;
-
-    ALOGV("ducking enable 0x%x lowVolume %f threshold %" PRIu32 " "
-            "fPTVolLevel %f BTVolLevel %f",
-            mDucking_enable, mDucking_lowVolume, mDucking_threshold,
-            mPTVolLevel, mPTVolLevel);
-
-    // Decides if SSRC support is needed for this mixing
-    mIsSSRCneeded = (setting.lvInSampleRate != setting.lvOutSampleRate);
-    if (setting.lvBTChannelCount != setting.lvPTChannelCount){
-        if (setting.lvBTChannelCount == 2){
-            mChannelConversion = 1; // convert to MONO
-        } else {
-            mChannelConversion = 2; // Convert to STEREO
-        }
-    } else {
-        mChannelConversion = 0;
-    }
-}
-
-// Fast way to compute 10 * log(value)
-M4OSA_Int32 VideoEditorBGAudioProcessing::getDecibelSound(M4OSA_UInt32 value) {
-    ALOGV("getDecibelSound: %ld", value);
-
-    if (value <= 0 || value > 0x8000) {
-        return 0;
-    } else if (value > 0x4000) { // 32768
-        return 90;
-    } else if (value > 0x2000) { // 16384
-        return 84;
-    } else if (value > 0x1000) { // 8192
-        return 78;
-    } else if (value > 0x0800) { // 4028
-        return 72;
-    } else if (value > 0x0400) { // 2048
-        return 66;
-    } else if (value > 0x0200) { // 1024
-        return 60;
-    } else if (value > 0x0100) { // 512
-        return 54;
-    } else if (value > 0x0080) { // 256
-        return 48;
-    } else if (value > 0x0040) { // 128
-        return 42;
-    } else if (value > 0x0020) { // 64
-        return 36;
-    } else if (value > 0x0010) { // 32
-        return 30;
-    } else if (value > 0x0008) { // 16
-        return 24;
-    } else if (value > 0x0007) { // 8
-        return 24;
-    } else if (value > 0x0003) { // 4
-        return 18;
-    } else if (value > 0x0001) { // 2
-        return 12;
-    } else  { // 1
-        return 6;
-    }
-}
-
-M4OSA_Bool VideoEditorBGAudioProcessing::isThresholdBreached(
-        M4OSA_Int32* averageValue,
-        M4OSA_Int32 storeCount,
-        M4OSA_Int32 thresholdValue) {
-
-    ALOGV("isThresholdBreached");
-
-    int totalValue = 0;
-    for (int i = 0; i < storeCount; ++i) {
-        totalValue += averageValue[i];
-    }
-    return (totalValue / storeCount > thresholdValue);
-}
-
-}//namespace android
diff --git a/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.h b/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.h
deleted file mode 100755
index cb7a69f..0000000
--- a/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VE_BACKGROUND_AUDIO_PROC_H
-#define VE_BACKGROUND_AUDIO_PROC_H
-
-#include "M4OSA_Error.h"
-#include "M4OSA_Types.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Export.h"
-#include "M4OSA_CoreID.h"
-
-
-namespace android {
-
-typedef struct {
-    M4OSA_UInt16*   m_dataAddress; // Android SRC needs a Int16 pointer
-    M4OSA_UInt32    m_bufferSize;
-} M4AM_Buffer16;    // Structure contains Int16_t pointer
-
-enum AudioFormat {
-    MONO_16_BIT,
-    STEREO_16_BIT
-};
-
-// Following struct will be used by app to supply the PT and BT properties
-// along with ducking values
-typedef struct {
-    M4OSA_Int32 lvInSampleRate; // Sampling audio freq (8000,16000 or more )
-    M4OSA_Int32 lvOutSampleRate; //Sampling audio freq (8000,16000 or more )
-    AudioFormat lvBTFormat;
-
-    M4OSA_Int32 lvInDucking_threshold;
-    M4OSA_Float lvInDucking_lowVolume;
-    M4OSA_Bool lvInDucking_enable;
-    M4OSA_Float lvPTVolLevel;
-    M4OSA_Float lvBTVolLevel;
-    M4OSA_Int32 lvBTChannelCount;
-    M4OSA_Int32 lvPTChannelCount;
-} AudioMixSettings;
-
-// This class is defined to get SF SRC access
-class VideoEditorBGAudioProcessing {
-public:
-    VideoEditorBGAudioProcessing();
-    ~VideoEditorBGAudioProcessing() {}
-
-    void setMixParams(const AudioMixSettings& params);
-
-    M4OSA_Int32 mixAndDuck(
-                    void* primaryTrackBuffer,
-                    void* backgroundTrackBuffer,
-                    void* mixedOutputBuffer);
-
-private:
-    enum {
-        kProcessingWindowSize = 10,
-    };
-
-    M4OSA_Int32 mInSampleRate;
-    M4OSA_Int32 mOutSampleRate;
-    AudioFormat mBTFormat;
-
-    M4OSA_Bool mIsSSRCneeded;
-    M4OSA_Int32 mBTChannelCount;
-    M4OSA_Int32 mPTChannelCount;
-    M4OSA_UInt8 mChannelConversion;
-
-    M4OSA_UInt32 mDucking_threshold;
-    M4OSA_Float mDucking_lowVolume;
-    M4OSA_Float mDuckingFactor ;
-    M4OSA_Bool mDucking_enable;
-    M4OSA_Int32 mAudioVolumeArray[kProcessingWindowSize];
-    M4OSA_Int32 mAudVolArrIndex;
-    M4OSA_Bool mDoDucking;
-    M4OSA_Float mPTVolLevel;
-    M4OSA_Float mBTVolLevel;
-
-    M4AM_Buffer16 mBTBuffer;
-
-    M4OSA_Int32 getDecibelSound(M4OSA_UInt32 value);
-    M4OSA_Bool  isThresholdBreached(M4OSA_Int32* averageValue,
-                    M4OSA_Int32 storeCount, M4OSA_Int32 thresholdValue);
-
-    // This returns the size of buffer which needs to allocated
-    // before resampling is called
-    M4OSA_Int32 calculateOutResampleBufSize();
-
-    // Don't call me.
-    VideoEditorBGAudioProcessing(const VideoEditorBGAudioProcessing&);
-    VideoEditorBGAudioProcessing& operator=(
-            const VideoEditorBGAudioProcessing&);
-};
-
-}  // namespace android
-
-#endif // VE_BACKGROUND_AUDIO_PROC_H
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.cpp b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
deleted file mode 100755
index 8d656c4..0000000
--- a/libvideoeditor/lvpp/VideoEditorPlayer.cpp
+++ /dev/null
@@ -1,595 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_NDEBUG 1
-#define LOG_TAG "VideoEditorPlayer"
-#include <utils/Log.h>
-
-#include "VideoEditorPlayer.h"
-#include "PreviewPlayer.h"
-
-#include <media/Metadata.h>
-#include <media/stagefright/MediaExtractor.h>
-
-#include <system/audio.h>
-
-namespace android {
-
-VideoEditorPlayer::VideoEditorPlayer(NativeWindowRenderer* renderer)
-    : mPlayer(new PreviewPlayer(renderer)) {
-
-    ALOGV("VideoEditorPlayer");
-    mPlayer->setListener(this);
-}
-
-VideoEditorPlayer::~VideoEditorPlayer() {
-    ALOGV("~VideoEditorPlayer");
-
-    reset();
-    mVeAudioSink.clear();
-
-    delete mPlayer;
-    mPlayer = NULL;
-}
-
-status_t VideoEditorPlayer::initCheck() {
-    ALOGV("initCheck");
-    return OK;
-}
-
-
-status_t VideoEditorPlayer::setAudioPlayer(VideoEditorAudioPlayer *audioPlayer) {
-    return mPlayer->setAudioPlayer(audioPlayer);
-}
-
-
-status_t VideoEditorPlayer::setDataSource(
-        const char *url, const KeyedVector<String8, String8> *headers) {
-    ALOGI("setDataSource('%s')", url);
-    if (headers != NULL) {
-        ALOGE("Headers parameter is not supported");
-        return INVALID_OPERATION;
-    }
-
-    return mPlayer->setDataSource(url);
-}
-
-//We donot use this in preview, dummy implimentation as this is pure virtual
-status_t VideoEditorPlayer::setDataSource(int fd, int64_t offset,
-    int64_t length) {
-    ALOGE("setDataSource(%d, %lld, %lld) Not supported", fd, offset, length);
-    return (!OK);
-}
-
-status_t VideoEditorPlayer::setVideoSurface(const sp<Surface> &surface) {
-    ALOGV("setVideoSurface");
-
-    mPlayer->setSurface(surface);
-    return OK;
-}
-
-status_t VideoEditorPlayer::setVideoSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer) {
-    ALOGV("setVideoSurfaceTexture");
-
-    mPlayer->setSurfaceTexture(bufferProducer);
-    return OK;
-}
-
-status_t VideoEditorPlayer::prepare() {
-    ALOGV("prepare");
-    return mPlayer->prepare();
-}
-
-status_t VideoEditorPlayer::prepareAsync() {
-    return mPlayer->prepareAsync();
-}
-
-status_t VideoEditorPlayer::start() {
-    ALOGV("start");
-    return mPlayer->play();
-}
-
-status_t VideoEditorPlayer::stop() {
-    ALOGV("stop");
-    return pause();
-}
-
-status_t VideoEditorPlayer::pause() {
-    ALOGV("pause");
-    return mPlayer->pause();
-}
-
-bool VideoEditorPlayer::isPlaying() {
-    ALOGV("isPlaying");
-    return mPlayer->isPlaying();
-}
-
-status_t VideoEditorPlayer::seekTo(int msec) {
-    ALOGV("seekTo");
-    status_t err = mPlayer->seekTo((int64_t)msec * 1000);
-    return err;
-}
-
-status_t VideoEditorPlayer::getCurrentPosition(int *msec) {
-    ALOGV("getCurrentPosition");
-    int64_t positionUs;
-    status_t err = mPlayer->getPosition(&positionUs);
-
-    if (err != OK) {
-        return err;
-    }
-
-    *msec = (positionUs + 500) / 1000;
-    return OK;
-}
-
-status_t VideoEditorPlayer::getDuration(int *msec) {
-    ALOGV("getDuration");
-
-    int64_t durationUs;
-    status_t err = mPlayer->getDuration(&durationUs);
-
-    if (err != OK) {
-        *msec = 0;
-        return OK;
-    }
-
-    *msec = (durationUs + 500) / 1000;
-    return OK;
-}
-
-status_t VideoEditorPlayer::reset() {
-    ALOGV("reset");
-    mPlayer->reset();
-    return OK;
-}
-
-status_t VideoEditorPlayer::setLooping(int loop) {
-    ALOGV("setLooping");
-    return mPlayer->setLooping(loop);
-}
-
-status_t VideoEditorPlayer::setParameter(int key, const Parcel &request) {
-    ALOGE("setParameter not implemented");
-    return INVALID_OPERATION;
-}
-
-status_t VideoEditorPlayer::getParameter(int key, Parcel *reply) {
-    ALOGE("getParameter not implemented");
-    return INVALID_OPERATION;
-}
-
-player_type VideoEditorPlayer::playerType() {
-    ALOGV("playerType");
-    return STAGEFRIGHT_PLAYER;
-}
-
-void VideoEditorPlayer::acquireLock() {
-    ALOGV("acquireLock");
-    mPlayer->acquireLock();
-}
-
-void VideoEditorPlayer::releaseLock() {
-    ALOGV("releaseLock");
-    mPlayer->releaseLock();
-}
-
-status_t VideoEditorPlayer::invoke(const Parcel &request, Parcel *reply) {
-    return INVALID_OPERATION;
-}
-
-void VideoEditorPlayer::setAudioSink(const sp<AudioSink> &audioSink) {
-    MediaPlayerInterface::setAudioSink(audioSink);
-
-    mPlayer->setAudioSink(audioSink);
-}
-
-status_t VideoEditorPlayer::getMetadata(
-        const media::Metadata::Filter& ids, Parcel *records) {
-    using media::Metadata;
-
-    uint32_t flags = mPlayer->getSourceSeekFlags();
-
-    Metadata metadata(records);
-
-    metadata.appendBool(
-            Metadata::kPauseAvailable,
-            flags & MediaExtractor::CAN_PAUSE);
-
-    metadata.appendBool(
-            Metadata::kSeekBackwardAvailable,
-            flags & MediaExtractor::CAN_SEEK_BACKWARD);
-
-    metadata.appendBool(
-            Metadata::kSeekForwardAvailable,
-            flags & MediaExtractor::CAN_SEEK_FORWARD);
-
-    metadata.appendBool(
-            Metadata::kSeekAvailable,
-            flags & MediaExtractor::CAN_SEEK);
-
-    return OK;
-}
-
-status_t VideoEditorPlayer::loadEffectsSettings(
-    M4VSS3GPP_EffectSettings* pEffectSettings, int nEffects) {
-    ALOGV("loadEffectsSettings");
-    return mPlayer->loadEffectsSettings(pEffectSettings, nEffects);
-}
-
-status_t VideoEditorPlayer::loadAudioMixSettings(
-    M4xVSS_AudioMixingSettings* pAudioMixSettings) {
-    ALOGV("VideoEditorPlayer: loadAudioMixSettings");
-    return mPlayer->loadAudioMixSettings(pAudioMixSettings);
-}
-
-status_t VideoEditorPlayer::setAudioMixPCMFileHandle(
-    M4OSA_Context pAudioMixPCMFileHandle) {
-
-    ALOGV("VideoEditorPlayer: loadAudioMixSettings");
-    return mPlayer->setAudioMixPCMFileHandle(pAudioMixPCMFileHandle);
-}
-
-status_t VideoEditorPlayer::setAudioMixStoryBoardParam(
-    M4OSA_UInt32 audioMixStoryBoardTS,
-    M4OSA_UInt32 currentMediaBeginCutTime,
-    M4OSA_UInt32 primaryTrackVolValue) {
-
-    ALOGV("VideoEditorPlayer: loadAudioMixSettings");
-    return mPlayer->setAudioMixStoryBoardParam(audioMixStoryBoardTS,
-     currentMediaBeginCutTime, primaryTrackVolValue);
-}
-
-status_t VideoEditorPlayer::setPlaybackBeginTime(uint32_t msec) {
-    ALOGV("setPlaybackBeginTime");
-    return mPlayer->setPlaybackBeginTime(msec);
-}
-
-status_t VideoEditorPlayer::setPlaybackEndTime(uint32_t msec) {
-    ALOGV("setPlaybackEndTime");
-    return mPlayer->setPlaybackEndTime(msec);
-}
-
-status_t VideoEditorPlayer::setStoryboardStartTime(uint32_t msec) {
-    ALOGV("setStoryboardStartTime");
-    return mPlayer->setStoryboardStartTime(msec);
-}
-
-status_t VideoEditorPlayer::setProgressCallbackInterval(uint32_t cbInterval) {
-    ALOGV("setProgressCallbackInterval");
-    return mPlayer->setProgressCallbackInterval(cbInterval);
-}
-
-status_t VideoEditorPlayer::setMediaRenderingMode(
-    M4xVSS_MediaRendering mode,
-    M4VIDEOEDITING_VideoFrameSize outputVideoSize) {
-
-    ALOGV("setMediaRenderingMode");
-    return mPlayer->setMediaRenderingMode(mode, outputVideoSize);
-}
-
-status_t VideoEditorPlayer::resetJniCallbackTimeStamp() {
-    ALOGV("resetJniCallbackTimeStamp");
-    return mPlayer->resetJniCallbackTimeStamp();
-}
-
-status_t VideoEditorPlayer::setImageClipProperties(
-    uint32_t width, uint32_t height) {
-    return mPlayer->setImageClipProperties(width, height);
-}
-
-status_t VideoEditorPlayer::readFirstVideoFrame() {
-    return mPlayer->readFirstVideoFrame();
-}
-
-status_t VideoEditorPlayer::getLastRenderedTimeMs(uint32_t *lastRenderedTimeMs) {
-    mPlayer->getLastRenderedTimeMs(lastRenderedTimeMs);
-    return NO_ERROR;
-}
-
-/* Implementation of AudioSink interface */
-#undef LOG_TAG
-#define LOG_TAG "VeAudioSink"
-
-int VideoEditorPlayer::VeAudioOutput::mMinBufferCount = 4;
-bool VideoEditorPlayer::VeAudioOutput::mIsOnEmulator = false;
-
-VideoEditorPlayer::VeAudioOutput::VeAudioOutput()
-    : mCallback(NULL),
-      mCallbackCookie(NULL) {
-    mStreamType = AUDIO_STREAM_MUSIC;
-    mLeftVolume = 1.0;
-    mRightVolume = 1.0;
-    mLatency = 0;
-    mMsecsPerFrame = 0;
-    mNumFramesWritten = 0;
-    setMinBufferCount();
-}
-
-VideoEditorPlayer::VeAudioOutput::~VeAudioOutput() {
-    close();
-}
-
-void VideoEditorPlayer::VeAudioOutput::setMinBufferCount() {
-
-    mIsOnEmulator = false;
-    mMinBufferCount = 4;
-}
-
-bool VideoEditorPlayer::VeAudioOutput::isOnEmulator() {
-
-    setMinBufferCount();
-    return mIsOnEmulator;
-}
-
-int VideoEditorPlayer::VeAudioOutput::getMinBufferCount() {
-
-    setMinBufferCount();
-    return mMinBufferCount;
-}
-
-ssize_t VideoEditorPlayer::VeAudioOutput::bufferSize() const {
-
-    if (mTrack == 0) return NO_INIT;
-    return mTrack->frameCount() * frameSize();
-}
-
-ssize_t VideoEditorPlayer::VeAudioOutput::frameCount() const {
-
-    if (mTrack == 0) return NO_INIT;
-    return mTrack->frameCount();
-}
-
-ssize_t VideoEditorPlayer::VeAudioOutput::channelCount() const
-{
-    if (mTrack == 0) return NO_INIT;
-    return mTrack->channelCount();
-}
-
-ssize_t VideoEditorPlayer::VeAudioOutput::frameSize() const
-{
-    if (mTrack == 0) return NO_INIT;
-    return mTrack->frameSize();
-}
-
-uint32_t VideoEditorPlayer::VeAudioOutput::latency () const
-{
-    return mLatency;
-}
-
-float VideoEditorPlayer::VeAudioOutput::msecsPerFrame() const
-{
-    return mMsecsPerFrame;
-}
-
-status_t VideoEditorPlayer::VeAudioOutput::getPosition(uint32_t *position) const {
-
-    if (mTrack == 0) return NO_INIT;
-    return mTrack->getPosition(position);
-}
-
-status_t VideoEditorPlayer::VeAudioOutput::getFramesWritten(uint32_t *written) const {
-
-    if (mTrack == 0) return NO_INIT;
-    *written = mNumFramesWritten;
-    return OK;
-}
-
-status_t VideoEditorPlayer::VeAudioOutput::open(
-        uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
-        audio_format_t format, int bufferCount,
-        AudioCallback cb, void *cookie, audio_output_flags_t flags,
-        const audio_offload_info_t *offloadInfo) {
-
-    mCallback = cb;
-    mCallbackCookie = cookie;
-
-    // Check argument "bufferCount" against the mininum buffer count
-    if (bufferCount < mMinBufferCount) {
-        ALOGV("bufferCount (%d) is too small and increased to %d",
-            bufferCount, mMinBufferCount);
-        bufferCount = mMinBufferCount;
-
-    }
-    ALOGV("open(%u, %d, %d, %d)", sampleRate, channelCount, format, bufferCount);
-    if (mTrack != 0) close();
-    uint32_t afSampleRate;
-    size_t afFrameCount;
-    int frameCount;
-
-    if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) !=
-     NO_ERROR) {
-        return NO_INIT;
-    }
-    if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) !=
-     NO_ERROR) {
-        return NO_INIT;
-    }
-
-    frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
-
-    if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
-        switch(channelCount) {
-          case 1:
-            channelMask = AUDIO_CHANNEL_OUT_MONO;
-            break;
-          case 2:
-            channelMask = AUDIO_CHANNEL_OUT_STEREO;
-            break;
-          default:
-            return NO_INIT;
-        }
-    }
-
-    sp<AudioTrack> t;
-    if (mCallback != NULL) {
-        t = new AudioTrack(
-                mStreamType,
-                sampleRate,
-                format,
-                channelMask,
-                frameCount,
-                flags,
-                CallbackWrapper,
-                this);
-    } else {
-        t = new AudioTrack(
-                mStreamType,
-                sampleRate,
-                format,
-                channelMask,
-                frameCount,
-                flags);
-    }
-
-    if ((t == 0) || (t->initCheck() != NO_ERROR)) {
-        ALOGE("Unable to create audio track");
-        return NO_INIT;
-    }
-
-    ALOGV("setVolume");
-    t->setVolume(mLeftVolume, mRightVolume);
-    mMsecsPerFrame = 1.e3 / (float) sampleRate;
-    mLatency = t->latency();
-    mTrack = t;
-    return NO_ERROR;
-}
-
-status_t VideoEditorPlayer::VeAudioOutput::start() {
-
-    ALOGV("start");
-    if (mTrack != 0) {
-        mTrack->setVolume(mLeftVolume, mRightVolume);
-        status_t status = mTrack->start();
-        if (status == NO_ERROR) {
-            mTrack->getPosition(&mNumFramesWritten);
-        }
-        return status;
-    }
-    return NO_INIT;
-}
-
-void VideoEditorPlayer::VeAudioOutput::snoopWrite(
-    const void* buffer, size_t size) {
-    // Visualization buffers not supported
-    return;
-
-}
-
-ssize_t VideoEditorPlayer::VeAudioOutput::write(
-     const void* buffer, size_t size) {
-
-    LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
-
-    //ALOGV("write(%p, %u)", buffer, size);
-    if (mTrack != 0) {
-        snoopWrite(buffer, size);
-        ssize_t ret = mTrack->write(buffer, size);
-        mNumFramesWritten += ret / 4; // assume 16 bit stereo
-        return ret;
-    }
-    return NO_INIT;
-}
-
-void VideoEditorPlayer::VeAudioOutput::stop() {
-
-    ALOGV("stop");
-    if (mTrack != 0) mTrack->stop();
-}
-
-void VideoEditorPlayer::VeAudioOutput::flush() {
-
-    ALOGV("flush");
-    if (mTrack != 0) mTrack->flush();
-}
-
-void VideoEditorPlayer::VeAudioOutput::pause() {
-
-    ALOGV("VeAudioOutput::pause");
-    if (mTrack != 0) mTrack->pause();
-}
-
-void VideoEditorPlayer::VeAudioOutput::close() {
-
-    ALOGV("close");
-    mTrack.clear();
-}
-
-void VideoEditorPlayer::VeAudioOutput::setVolume(float left, float right) {
-
-    ALOGV("setVolume(%f, %f)", left, right);
-    mLeftVolume = left;
-    mRightVolume = right;
-    if (mTrack != 0) {
-        mTrack->setVolume(left, right);
-    }
-}
-
-// static
-void VideoEditorPlayer::VeAudioOutput::CallbackWrapper(
-        int event, void *cookie, void *info) {
-    //ALOGV("VeAudioOutput::callbackwrapper");
-    if (event != AudioTrack::EVENT_MORE_DATA) {
-        return;
-    }
-
-    VeAudioOutput *me = (VeAudioOutput *)cookie;
-    AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
-
-    size_t actualSize = (*me->mCallback)(
-            me, buffer->raw, buffer->size, me->mCallbackCookie,
-            MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER);
-
-    buffer->size = actualSize;
-
-    if (actualSize > 0) {
-        me->snoopWrite(buffer->raw, actualSize);
-    }
-}
-
-status_t VideoEditorPlayer::VeAudioOutput::dump(int fd, const Vector<String16>& args) const
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    result.append(" VeAudioOutput\n");
-    snprintf(buffer, SIZE-1, "  stream type(%d), left - right volume(%f, %f)\n",
-            mStreamType, mLeftVolume, mRightVolume);
-    result.append(buffer);
-    snprintf(buffer, SIZE-1, "  msec per frame(%f), latency (%d)\n",
-            mMsecsPerFrame, mLatency);
-    result.append(buffer);
-    ::write(fd, result.string(), result.size());
-    if (mTrack != 0) {
-        mTrack->dump(fd, args);
-    }
-    return NO_ERROR;
-}
-
-int VideoEditorPlayer::VeAudioOutput::getSessionId() const {
-
-    return mSessionId;
-}
-
-uint32_t VideoEditorPlayer::VeAudioOutput::getSampleRate() const {
-    if (mMsecsPerFrame == 0) {
-        return 0;
-    }
-    return (uint32_t)(1.e3 / mMsecsPerFrame);
-}
-
-}  // namespace android
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.h b/libvideoeditor/lvpp/VideoEditorPlayer.h
deleted file mode 100755
index b8c1254..0000000
--- a/libvideoeditor/lvpp/VideoEditorPlayer.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_VIDEOEDITOR_PLAYER_H
-#define ANDROID_VIDEOEDITOR_PLAYER_H
-
-#include <media/MediaPlayerInterface.h>
-#include <media/AudioTrack.h>
-#include "M4xVSS_API.h"
-#include "VideoEditorMain.h"
-#include "VideoEditorTools.h"
-#include "VideoEditorAudioPlayer.h"
-#include "NativeWindowRenderer.h"
-
-namespace android {
-
-struct PreviewPlayer;
-
-class VideoEditorPlayer : public MediaPlayerInterface {
-    public:
-    class VeAudioOutput: public MediaPlayerBase::AudioSink
-    {
-    public:
-                                VeAudioOutput();
-        virtual                 ~VeAudioOutput();
-
-        virtual bool            ready() const { return mTrack != NULL; }
-        virtual bool            realtime() const { return true; }
-        virtual ssize_t         bufferSize() const;
-        virtual ssize_t         frameCount() const;
-        virtual ssize_t         channelCount() const;
-        virtual ssize_t         frameSize() const;
-        virtual uint32_t        latency() const;
-        virtual float           msecsPerFrame() const;
-        virtual status_t        getPosition(uint32_t *position) const;
-        virtual status_t        getFramesWritten(uint32_t*) const;
-        virtual int             getSessionId() const;
-        virtual uint32_t        getSampleRate() const;
-
-        virtual status_t        open(
-                uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
-                audio_format_t format, int bufferCount,
-                AudioCallback cb, void *cookie, audio_output_flags_t flags,
-                const audio_offload_info_t *offloadInfo);
-
-        virtual status_t        start();
-        virtual ssize_t         write(const void* buffer, size_t size);
-        virtual void            stop();
-        virtual void            flush();
-        virtual void            pause();
-        virtual void            close();
-        void setAudioStreamType(audio_stream_type_t streamType) { mStreamType = streamType; }
-        virtual audio_stream_type_t getAudioStreamType() const { return mStreamType; }
-                void            setVolume(float left, float right);
-        virtual status_t        dump(int fd,const Vector<String16>& args) const;
-
-        static bool             isOnEmulator();
-        static int              getMinBufferCount();
-    private:
-        static void             setMinBufferCount();
-        static void             CallbackWrapper(
-                int event, void *me, void *info);
-
-        sp<AudioTrack>          mTrack;
-        AudioCallback           mCallback;
-        void *                  mCallbackCookie;
-        audio_stream_type_t     mStreamType;
-        float                   mLeftVolume;
-        float                   mRightVolume;
-        float                   mMsecsPerFrame;
-        uint32_t                mLatency;
-        int                     mSessionId;
-        static bool             mIsOnEmulator;
-        static int              mMinBufferCount; // 12 for emulator; otherwise 4
-
-        public:
-        uint32_t                mNumFramesWritten;
-        void                    snoopWrite(const void*, size_t);
-    };
-
-public:
-    VideoEditorPlayer(NativeWindowRenderer* renderer);
-    virtual ~VideoEditorPlayer();
-
-    virtual status_t initCheck();
-
-    virtual status_t setDataSource(
-            const char *url, const KeyedVector<String8, String8> *headers);
-
-    virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
-    virtual status_t setVideoSurface(const sp<Surface> &surface);
-    virtual status_t setVideoSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer);
-    virtual status_t prepare();
-    virtual status_t prepareAsync();
-    virtual status_t start();
-    virtual status_t stop();
-    virtual status_t pause();
-    virtual bool isPlaying();
-    virtual status_t seekTo(int msec);
-    virtual status_t getCurrentPosition(int *msec);
-    virtual status_t getDuration(int *msec);
-    virtual status_t reset();
-    virtual status_t setLooping(int loop);
-    virtual player_type playerType();
-    virtual status_t invoke(const Parcel &request, Parcel *reply);
-    virtual void setAudioSink(const sp<AudioSink> &audioSink);
-    virtual void acquireLock();
-    virtual void releaseLock();
-    virtual status_t setParameter(int key, const Parcel &request);
-    virtual status_t getParameter(int key, Parcel *reply);
-
-    virtual status_t getMetadata(
-                        const media::Metadata::Filter& ids, Parcel *records);
-
-    virtual status_t loadEffectsSettings(
-                         M4VSS3GPP_EffectSettings* pEffectSettings, int nEffects);
-
-    virtual status_t loadAudioMixSettings(
-                         M4xVSS_AudioMixingSettings* pAudioMixSettings);
-
-    virtual status_t setAudioMixPCMFileHandle(
-                         M4OSA_Context pAudioMixPCMFileHandle);
-
-    virtual status_t setAudioMixStoryBoardParam(
-                         M4OSA_UInt32 x, M4OSA_UInt32 y, M4OSA_UInt32 z);
-
-    virtual status_t setPlaybackBeginTime(uint32_t msec);
-    virtual status_t setPlaybackEndTime(uint32_t msec);
-    virtual status_t setStoryboardStartTime(uint32_t msec);
-    virtual status_t setProgressCallbackInterval(uint32_t cbInterval);
-
-    virtual status_t setMediaRenderingMode(M4xVSS_MediaRendering mode,
-                          M4VIDEOEDITING_VideoFrameSize outputVideoSize);
-
-    virtual status_t resetJniCallbackTimeStamp();
-    virtual status_t setImageClipProperties(uint32_t width, uint32_t height);
-    virtual status_t readFirstVideoFrame();
-    virtual status_t getLastRenderedTimeMs(uint32_t *lastRenderedTimeMs);
-
-    status_t setAudioPlayer(VideoEditorAudioPlayer *audioPlayer);
-private:
-    PreviewPlayer       *mPlayer;
-    sp<VeAudioOutput>    mVeAudioSink;
-
-    VideoEditorPlayer(const VideoEditorPlayer &);
-    VideoEditorPlayer &operator=(const VideoEditorPlayer &);
-};
-
-}  // namespace android
-
-#endif  // ANDROID_VIDEOEDITOR_PLAYER_H
diff --git a/libvideoeditor/lvpp/VideoEditorPreviewController.cpp b/libvideoeditor/lvpp/VideoEditorPreviewController.cpp
deleted file mode 100755
index c3cd3d0..0000000
--- a/libvideoeditor/lvpp/VideoEditorPreviewController.cpp
+++ /dev/null
@@ -1,1467 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "PreviewController"
-#include <utils/Log.h>
-
-#include <gui/Surface.h>
-
-#include "VideoEditorAudioPlayer.h"
-#include "PreviewRenderer.h"
-#include "M4OSA_Semaphore.h"
-#include "M4OSA_Thread.h"
-#include "VideoEditorPreviewController.h"
-
-namespace android {
-
-
-VideoEditorPreviewController::VideoEditorPreviewController()
-    : mCurrentPlayer(0),
-      mThreadContext(NULL),
-      mPlayerState(VePlayerIdle),
-      mPrepareReqest(M4OSA_FALSE),
-      mClipList(NULL),
-      mNumberClipsInStoryBoard(0),
-      mNumberClipsToPreview(0),
-      mStartingClipIndex(0),
-      mPreviewLooping(M4OSA_FALSE),
-      mCallBackAfterFrameCnt(0),
-      mEffectsSettings(NULL),
-      mNumberEffects(0),
-      mCurrentClipNumber(-1),
-      mClipTotalDuration(0),
-      mCurrentVideoEffect(VIDEO_EFFECT_NONE),
-      mBackgroundAudioSetting(NULL),
-      mAudioMixPCMFileHandle(NULL),
-      mTarget(NULL),
-      mJniCookie(NULL),
-      mJniCallback(NULL),
-      mCurrentPlayedDuration(0),
-      mCurrentClipDuration(0),
-      mVideoStoryBoardTimeMsUptoFirstPreviewClip(0),
-      mOverlayState(OVERLAY_CLEAR),
-      mActivePlayerIndex(0),
-      mOutputVideoWidth(0),
-      mOutputVideoHeight(0),
-      bStopThreadInProgress(false),
-      mSemThreadWait(NULL) {
-    ALOGV("VideoEditorPreviewController");
-    mRenderingMode = M4xVSS_kBlackBorders;
-    mIsFiftiesEffectStarted = false;
-
-    for (int i = 0; i < kTotalNumPlayerInstances; ++i) {
-        mVePlayer[i] = NULL;
-    }
-}
-
-VideoEditorPreviewController::~VideoEditorPreviewController() {
-    ALOGV("~VideoEditorPreviewController");
-    M4OSA_UInt32 i = 0;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    // Stop the thread if its still running
-    if(mThreadContext != NULL) {
-        err = M4OSA_threadSyncStop(mThreadContext);
-        if(err != M4NO_ERROR) {
-            ALOGV("~VideoEditorPreviewController: error 0x%x \
-            in trying to stop thread", err);
-            // Continue even if error
-        }
-
-        err = M4OSA_threadSyncClose(mThreadContext);
-        if(err != M4NO_ERROR) {
-            ALOGE("~VideoEditorPreviewController: error 0x%x \
-            in trying to close thread", (unsigned int) err);
-            // Continue even if error
-        }
-
-        mThreadContext = NULL;
-    }
-
-    for (int playerInst=0; playerInst<kTotalNumPlayerInstances;
-         playerInst++) {
-        if(mVePlayer[playerInst] != NULL) {
-            ALOGV("clearing mVePlayer %d", playerInst);
-            mVePlayer[playerInst].clear();
-        }
-    }
-
-    if(mClipList != NULL) {
-        // Clean up
-        for(i=0;i<mNumberClipsInStoryBoard;i++)
-        {
-            if(mClipList[i]->pFile != NULL) {
-                free(mClipList[i]->pFile);
-                mClipList[i]->pFile = NULL;
-            }
-
-            free(mClipList[i]);
-        }
-        free(mClipList);
-        mClipList = NULL;
-    }
-
-    if(mEffectsSettings) {
-        for(i=0;i<mNumberEffects;i++) {
-            if(mEffectsSettings[i].xVSS.pFramingBuffer != NULL) {
-                free(mEffectsSettings[i].xVSS.pFramingBuffer->pac_data);
-
-                free(mEffectsSettings[i].xVSS.pFramingBuffer);
-
-                mEffectsSettings[i].xVSS.pFramingBuffer = NULL;
-            }
-        }
-        free(mEffectsSettings);
-        mEffectsSettings = NULL;
-    }
-
-    if (mAudioMixPCMFileHandle) {
-        err = M4OSA_fileReadClose (mAudioMixPCMFileHandle);
-        mAudioMixPCMFileHandle = M4OSA_NULL;
-    }
-
-    if (mBackgroundAudioSetting != NULL) {
-        free(mBackgroundAudioSetting);
-        mBackgroundAudioSetting = NULL;
-    }
-
-    if(mTarget != NULL) {
-        delete mTarget;
-        mTarget = NULL;
-    }
-
-    mOverlayState = OVERLAY_CLEAR;
-
-    ALOGV("~VideoEditorPreviewController returns");
-}
-
-M4OSA_ERR VideoEditorPreviewController::loadEditSettings(
-    M4VSS3GPP_EditSettings* pSettings,M4xVSS_AudioMixingSettings* bgmSettings) {
-
-    M4OSA_UInt32 i = 0, iClipDuration = 0, rgbSize = 0;
-    M4VIFI_UInt8 *tmp = NULL;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    ALOGV("loadEditSettings");
-    ALOGV("loadEditSettings Channels = %d, sampling Freq %d",
-          bgmSettings->uiNbChannels, bgmSettings->uiSamplingFrequency  );
-          bgmSettings->uiSamplingFrequency = 32000;
-
-    ALOGV("loadEditSettings Channels = %d, sampling Freq %d",
-          bgmSettings->uiNbChannels, bgmSettings->uiSamplingFrequency  );
-    Mutex::Autolock autoLock(mLock);
-
-    // Clean up any previous Edit settings before loading new ones
-    mCurrentVideoEffect = VIDEO_EFFECT_NONE;
-
-    if(mAudioMixPCMFileHandle) {
-        err = M4OSA_fileReadClose (mAudioMixPCMFileHandle);
-        mAudioMixPCMFileHandle = M4OSA_NULL;
-    }
-
-    if(mBackgroundAudioSetting != NULL) {
-        free(mBackgroundAudioSetting);
-        mBackgroundAudioSetting = NULL;
-    }
-
-    if(mClipList != NULL) {
-        // Clean up
-        for(i=0;i<mNumberClipsInStoryBoard;i++)
-        {
-            if(mClipList[i]->pFile != NULL) {
-                free(mClipList[i]->pFile);
-                mClipList[i]->pFile = NULL;
-            }
-
-            free(mClipList[i]);
-        }
-        free(mClipList);
-        mClipList = NULL;
-    }
-
-    if(mEffectsSettings) {
-        for(i=0;i<mNumberEffects;i++) {
-            if(mEffectsSettings[i].xVSS.pFramingBuffer != NULL) {
-                free(mEffectsSettings[i].xVSS.pFramingBuffer->pac_data);
-
-                free(mEffectsSettings[i].xVSS.pFramingBuffer);
-
-                mEffectsSettings[i].xVSS.pFramingBuffer = NULL;
-            }
-        }
-        free(mEffectsSettings);
-        mEffectsSettings = NULL;
-    }
-
-    if(mClipList == NULL) {
-        mNumberClipsInStoryBoard = pSettings->uiClipNumber;
-        ALOGV("loadEditSettings: # of Clips = %d", mNumberClipsInStoryBoard);
-
-        mClipList = (M4VSS3GPP_ClipSettings**)M4OSA_32bitAlignedMalloc(
-         sizeof(M4VSS3GPP_ClipSettings*)*pSettings->uiClipNumber, M4VS,
-         (M4OSA_Char*)"LvPP, copy of pClipList");
-
-        if(NULL == mClipList) {
-            ALOGE("loadEditSettings: Malloc error");
-            return M4ERR_ALLOC;
-        }
-        memset((void *)mClipList,0,
-         sizeof(M4VSS3GPP_ClipSettings*)*pSettings->uiClipNumber);
-
-        for(i=0;i<pSettings->uiClipNumber;i++) {
-
-            // Allocate current clip
-            mClipList[i] =
-             (M4VSS3GPP_ClipSettings*)M4OSA_32bitAlignedMalloc(
-              sizeof(M4VSS3GPP_ClipSettings),M4VS,(M4OSA_Char*)"clip settings");
-
-            if(mClipList[i] == NULL) {
-
-                ALOGE("loadEditSettings: Allocation error for mClipList[%d]", (int)i);
-                return M4ERR_ALLOC;
-            }
-            // Copy plain structure
-            memcpy((void *)mClipList[i],
-             (void *)pSettings->pClipList[i],
-             sizeof(M4VSS3GPP_ClipSettings));
-
-            if(NULL != pSettings->pClipList[i]->pFile) {
-                mClipList[i]->pFile = (M4OSA_Char*)M4OSA_32bitAlignedMalloc(
-                pSettings->pClipList[i]->filePathSize, M4VS,
-                (M4OSA_Char*)"pClipSettingsDest->pFile");
-
-                if(NULL == mClipList[i]->pFile)
-                {
-                    ALOGE("loadEditSettings : ERROR allocating filename");
-                    return M4ERR_ALLOC;
-                }
-
-                memcpy((void *)mClipList[i]->pFile,
-                 (void *)pSettings->pClipList[i]->pFile,
-                 pSettings->pClipList[i]->filePathSize);
-            }
-            else {
-                ALOGE("NULL file path");
-                return M4ERR_PARAMETER;
-            }
-
-            // Calculate total duration of all clips
-            iClipDuration = pSettings->pClipList[i]->uiEndCutTime -
-             pSettings->pClipList[i]->uiBeginCutTime;
-
-            mClipTotalDuration = mClipTotalDuration+iClipDuration;
-        }
-    }
-
-    if(mEffectsSettings == NULL) {
-        mNumberEffects = pSettings->nbEffects;
-        ALOGV("loadEditSettings: mNumberEffects = %d", mNumberEffects);
-
-        if(mNumberEffects != 0) {
-            mEffectsSettings = (M4VSS3GPP_EffectSettings*)M4OSA_32bitAlignedMalloc(
-             mNumberEffects*sizeof(M4VSS3GPP_EffectSettings),
-             M4VS, (M4OSA_Char*)"effects settings");
-
-            if(mEffectsSettings == NULL) {
-                ALOGE("loadEffectsSettings: Allocation error");
-                return M4ERR_ALLOC;
-            }
-
-            memset((void *)mEffectsSettings,0,
-             mNumberEffects*sizeof(M4VSS3GPP_EffectSettings));
-
-            for(i=0;i<mNumberEffects;i++) {
-
-                mEffectsSettings[i].xVSS.pFramingFilePath = NULL;
-                mEffectsSettings[i].xVSS.pFramingBuffer = NULL;
-                mEffectsSettings[i].xVSS.pTextBuffer = NULL;
-
-                memcpy((void *)&(mEffectsSettings[i]),
-                 (void *)&(pSettings->Effects[i]),
-                 sizeof(M4VSS3GPP_EffectSettings));
-
-                if(pSettings->Effects[i].VideoEffectType ==
-                 (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Framing) {
-                    // Allocate the pFraming RGB buffer
-                    mEffectsSettings[i].xVSS.pFramingBuffer =
-                    (M4VIFI_ImagePlane *)M4OSA_32bitAlignedMalloc(sizeof(M4VIFI_ImagePlane),
-                     M4VS, (M4OSA_Char*)"lvpp framing buffer");
-
-                    if(mEffectsSettings[i].xVSS.pFramingBuffer == NULL) {
-                        ALOGE("loadEffectsSettings:Alloc error for pFramingBuf");
-                        free(mEffectsSettings);
-                        mEffectsSettings = NULL;
-                        return M4ERR_ALLOC;
-                    }
-
-                    // Allocate the pac_data (RGB)
-                    if(pSettings->Effects[i].xVSS.rgbType == M4VSS3GPP_kRGB565){
-                        rgbSize =
-                         pSettings->Effects[i].xVSS.pFramingBuffer->u_width *
-                         pSettings->Effects[i].xVSS.pFramingBuffer->u_height*2;
-                    }
-                    else if(
-                     pSettings->Effects[i].xVSS.rgbType == M4VSS3GPP_kRGB888) {
-                        rgbSize =
-                         pSettings->Effects[i].xVSS.pFramingBuffer->u_width *
-                         pSettings->Effects[i].xVSS.pFramingBuffer->u_height*3;
-                    }
-                    else {
-                        ALOGE("loadEffectsSettings: wrong RGB type");
-                        free(mEffectsSettings);
-                        mEffectsSettings = NULL;
-                        return M4ERR_PARAMETER;
-                    }
-
-                    tmp = (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(rgbSize, M4VS,
-                     (M4OSA_Char*)"framing buffer pac_data");
-
-                    if(tmp == NULL) {
-                        ALOGE("loadEffectsSettings:Alloc error pFramingBuf pac");
-                        free(mEffectsSettings);
-                        mEffectsSettings = NULL;
-                        free(mEffectsSettings[i].xVSS.pFramingBuffer);
-
-                        mEffectsSettings[i].xVSS.pFramingBuffer = NULL;
-                        return M4ERR_ALLOC;
-                    }
-                    /* Initialize the pFramingBuffer*/
-                    mEffectsSettings[i].xVSS.pFramingBuffer->pac_data = tmp;
-                    mEffectsSettings[i].xVSS.pFramingBuffer->u_height =
-                     pSettings->Effects[i].xVSS.pFramingBuffer->u_height;
-
-                    mEffectsSettings[i].xVSS.pFramingBuffer->u_width =
-                     pSettings->Effects[i].xVSS.pFramingBuffer->u_width;
-
-                    mEffectsSettings[i].xVSS.pFramingBuffer->u_stride =
-                     pSettings->Effects[i].xVSS.pFramingBuffer->u_stride;
-
-                    mEffectsSettings[i].xVSS.pFramingBuffer->u_topleft =
-                     pSettings->Effects[i].xVSS.pFramingBuffer->u_topleft;
-
-                    mEffectsSettings[i].xVSS.uialphaBlendingStart =
-                     pSettings->Effects[i].xVSS.uialphaBlendingStart;
-
-                    mEffectsSettings[i].xVSS.uialphaBlendingMiddle =
-                     pSettings->Effects[i].xVSS.uialphaBlendingMiddle;
-
-                    mEffectsSettings[i].xVSS.uialphaBlendingEnd =
-                     pSettings->Effects[i].xVSS.uialphaBlendingEnd;
-
-                    mEffectsSettings[i].xVSS.uialphaBlendingFadeInTime =
-                     pSettings->Effects[i].xVSS.uialphaBlendingFadeInTime;
-                    mEffectsSettings[i].xVSS.uialphaBlendingFadeOutTime =
-                     pSettings->Effects[i].xVSS.uialphaBlendingFadeOutTime;
-
-                    // Copy the pFraming data
-                    memcpy((void *)
-                    mEffectsSettings[i].xVSS.pFramingBuffer->pac_data,
-                    (void *)pSettings->Effects[i].xVSS.pFramingBuffer->pac_data,
-                    rgbSize);
-
-                    mEffectsSettings[i].xVSS.rgbType =
-                     pSettings->Effects[i].xVSS.rgbType;
-                }
-            }
-        }
-    }
-
-    if (mBackgroundAudioSetting == NULL) {
-
-        mBackgroundAudioSetting = (M4xVSS_AudioMixingSettings*)M4OSA_32bitAlignedMalloc(
-        sizeof(M4xVSS_AudioMixingSettings), M4VS,
-        (M4OSA_Char*)"LvPP, copy of bgmSettings");
-
-        if(NULL == mBackgroundAudioSetting) {
-            ALOGE("loadEditSettings: mBackgroundAudioSetting Malloc failed");
-            return M4ERR_ALLOC;
-        }
-
-        memset((void *)mBackgroundAudioSetting, 0,sizeof(M4xVSS_AudioMixingSettings*));
-        memcpy((void *)mBackgroundAudioSetting, (void *)bgmSettings, sizeof(M4xVSS_AudioMixingSettings));
-
-        if ( mBackgroundAudioSetting->pFile != M4OSA_NULL ) {
-
-            mBackgroundAudioSetting->pFile = (M4OSA_Void*) bgmSettings->pPCMFilePath;
-            mBackgroundAudioSetting->uiNbChannels = 2;
-            mBackgroundAudioSetting->uiSamplingFrequency = 32000;
-        }
-
-        // Open the BG file
-        if ( mBackgroundAudioSetting->pFile != M4OSA_NULL ) {
-            err = M4OSA_fileReadOpen(&mAudioMixPCMFileHandle,
-             mBackgroundAudioSetting->pFile, M4OSA_kFileRead);
-
-            if (err != M4NO_ERROR) {
-                ALOGE("loadEditSettings: mBackgroundAudio PCM File open failed");
-                return M4ERR_PARAMETER;
-            }
-        }
-    }
-
-    mOutputVideoSize = pSettings->xVSS.outputVideoSize;
-    mFrameStr.pBuffer = M4OSA_NULL;
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR VideoEditorPreviewController::setSurface(const sp<Surface> &surface) {
-    ALOGV("setSurface");
-    Mutex::Autolock autoLock(mLock);
-
-    mSurface = surface;
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR VideoEditorPreviewController::startPreview(
-    M4OSA_UInt32 fromMS, M4OSA_Int32 toMs, M4OSA_UInt16 callBackAfterFrameCount,
-    M4OSA_Bool loop) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt32 i = 0, iIncrementedDuration = 0;
-    ALOGV("startPreview");
-
-    if(fromMS > (M4OSA_UInt32)toMs) {
-        ALOGE("startPreview: fromMS > toMs");
-        return M4ERR_PARAMETER;
-    }
-
-    if(toMs == 0) {
-        ALOGE("startPreview: toMs is 0");
-        return M4ERR_PARAMETER;
-    }
-
-    // If already started, then stop preview first
-    for(int playerInst=0; playerInst<kTotalNumPlayerInstances; playerInst++) {
-        if(mVePlayer[playerInst] != NULL) {
-            ALOGV("startPreview: stopping previously started preview playback");
-            stopPreview();
-            break;
-        }
-    }
-
-    // If renderPreview was called previously, then delete Renderer object first
-    if(mTarget != NULL) {
-        ALOGV("startPreview: delete previous PreviewRenderer");
-        delete mTarget;
-        mTarget = NULL;
-    }
-
-    // Create Audio player to be used for entire
-    // storyboard duration
-    mVEAudioSink = new VideoEditorPlayer::VeAudioOutput();
-    mVEAudioPlayer = new VideoEditorAudioPlayer(mVEAudioSink);
-    mVEAudioPlayer->setAudioMixSettings(mBackgroundAudioSetting);
-    mVEAudioPlayer->setAudioMixPCMFileHandle(mAudioMixPCMFileHandle);
-
-    // Create Video Renderer to be used for the entire storyboard duration.
-    uint32_t width, height;
-    getVideoSizeByResolution(mOutputVideoSize, &width, &height);
-    mNativeWindowRenderer = new NativeWindowRenderer(mSurface, width, height);
-
-    ALOGV("startPreview: loop = %d", loop);
-    mPreviewLooping = loop;
-
-    ALOGV("startPreview: callBackAfterFrameCount = %d", callBackAfterFrameCount);
-    mCallBackAfterFrameCnt = callBackAfterFrameCount;
-
-    for (int playerInst=0; playerInst<kTotalNumPlayerInstances; playerInst++) {
-        mVePlayer[playerInst] = new VideoEditorPlayer(mNativeWindowRenderer);
-        if(mVePlayer[playerInst] == NULL) {
-            ALOGE("startPreview:Error creating VideoEditorPlayer %d",playerInst);
-            return M4ERR_ALLOC;
-        }
-        ALOGV("startPreview: object created");
-
-        mVePlayer[playerInst]->setNotifyCallback(this,(notify_callback_f)notify);
-        ALOGV("startPreview: notify callback set");
-
-        mVePlayer[playerInst]->loadEffectsSettings(mEffectsSettings,
-         mNumberEffects);
-        ALOGV("startPreview: effects settings loaded");
-
-        mVePlayer[playerInst]->loadAudioMixSettings(mBackgroundAudioSetting);
-        ALOGV("startPreview: AudioMixSettings settings loaded");
-
-        mVePlayer[playerInst]->setAudioMixPCMFileHandle(mAudioMixPCMFileHandle);
-        ALOGV("startPreview: AudioMixPCMFileHandle set");
-
-        mVePlayer[playerInst]->setProgressCallbackInterval(
-         mCallBackAfterFrameCnt);
-        ALOGV("startPreview: setProgressCallBackInterval");
-    }
-
-    mPlayerState = VePlayerIdle;
-    mPrepareReqest = M4OSA_FALSE;
-
-    if(fromMS == 0) {
-        mCurrentClipNumber = -1;
-        // Save original value
-        mFirstPreviewClipBeginTime = mClipList[0]->uiBeginCutTime;
-        mVideoStoryBoardTimeMsUptoFirstPreviewClip = 0;
-    }
-    else {
-        ALOGV("startPreview: fromMS=%d", fromMS);
-        if(fromMS >= mClipTotalDuration) {
-            ALOGE("startPreview: fromMS >= mClipTotalDuration");
-            return M4ERR_PARAMETER;
-        }
-        for(i=0;i<mNumberClipsInStoryBoard;i++) {
-            if(fromMS < (iIncrementedDuration + (mClipList[i]->uiEndCutTime -
-             mClipList[i]->uiBeginCutTime))) {
-                // Set to 1 index below,
-                // as threadProcess first increments the clip index
-                // and then processes clip in thread loop
-                mCurrentClipNumber = i-1;
-                ALOGD("startPreview:mCurrentClipNumber = %d fromMS=%d",i,fromMS);
-
-                // Save original value
-                mFirstPreviewClipBeginTime = mClipList[i]->uiBeginCutTime;
-
-                // Set correct begin time to start playback
-                if((fromMS+mClipList[i]->uiBeginCutTime) >
-                (iIncrementedDuration+mClipList[i]->uiBeginCutTime)) {
-
-                    mClipList[i]->uiBeginCutTime =
-                     mClipList[i]->uiBeginCutTime +
-                     (fromMS - iIncrementedDuration);
-                }
-                break;
-            }
-            else {
-                iIncrementedDuration = iIncrementedDuration +
-                 (mClipList[i]->uiEndCutTime - mClipList[i]->uiBeginCutTime);
-            }
-        }
-        mVideoStoryBoardTimeMsUptoFirstPreviewClip = iIncrementedDuration;
-    }
-
-    for (int playerInst=0; playerInst<kTotalNumPlayerInstances; playerInst++) {
-        mVePlayer[playerInst]->setAudioMixStoryBoardParam(fromMS,
-         mFirstPreviewClipBeginTime,
-         mClipList[i]->ClipProperties.uiClipAudioVolumePercentage);
-
-        ALOGV("startPreview:setAudioMixStoryBoardSkimTimeStamp set %d cuttime \
-         %d", fromMS, mFirstPreviewClipBeginTime);
-    }
-
-    mStartingClipIndex = mCurrentClipNumber+1;
-
-    // Start playing with player instance 0
-    mCurrentPlayer = 0;
-    mActivePlayerIndex = 0;
-
-    if(toMs == -1) {
-        ALOGV("startPreview: Preview till end of storyboard");
-        mNumberClipsToPreview = mNumberClipsInStoryBoard;
-        // Save original value
-        mLastPreviewClipEndTime =
-         mClipList[mNumberClipsToPreview-1]->uiEndCutTime;
-    }
-    else {
-        ALOGV("startPreview: toMs=%d", toMs);
-        if((M4OSA_UInt32)toMs > mClipTotalDuration) {
-            ALOGE("startPreview: toMs > mClipTotalDuration");
-            return M4ERR_PARAMETER;
-        }
-
-        iIncrementedDuration = 0;
-
-        for(i=0;i<mNumberClipsInStoryBoard;i++) {
-            if((M4OSA_UInt32)toMs <= (iIncrementedDuration +
-             (mClipList[i]->uiEndCutTime - mClipList[i]->uiBeginCutTime))) {
-                // Save original value
-                mLastPreviewClipEndTime = mClipList[i]->uiEndCutTime;
-                // Set the end cut time of clip index i to toMs
-                mClipList[i]->uiEndCutTime = toMs;
-
-                // Number of clips to be previewed is from index 0 to i
-                // increment by 1 as i starts from 0
-                mNumberClipsToPreview = i+1;
-                break;
-            }
-            else {
-                iIncrementedDuration = iIncrementedDuration +
-                 (mClipList[i]->uiEndCutTime - mClipList[i]->uiBeginCutTime);
-            }
-        }
-    }
-
-    // Open the thread semaphore
-    M4OSA_semaphoreOpen(&mSemThreadWait, 1);
-
-    // Open the preview process thread
-    err = M4OSA_threadSyncOpen(&mThreadContext, (M4OSA_ThreadDoIt)threadProc);
-    if (M4NO_ERROR != err) {
-        ALOGE("VideoEditorPreviewController:M4OSA_threadSyncOpen error %d", (int) err);
-        return err;
-    }
-
-    // Set the stacksize
-    err = M4OSA_threadSyncSetOption(mThreadContext, M4OSA_ThreadStackSize,
-     (M4OSA_DataOption) kPreviewThreadStackSize);
-
-    if (M4NO_ERROR != err) {
-        ALOGE("VideoEditorPreviewController: threadSyncSetOption error %d", (int) err);
-        M4OSA_threadSyncClose(mThreadContext);
-        mThreadContext = NULL;
-        return err;
-    }
-
-     // Start the thread
-     err = M4OSA_threadSyncStart(mThreadContext, (M4OSA_Void*)this);
-     if (M4NO_ERROR != err) {
-        ALOGE("VideoEditorPreviewController: threadSyncStart error %d", (int) err);
-        M4OSA_threadSyncClose(mThreadContext);
-        mThreadContext = NULL;
-        return err;
-    }
-    bStopThreadInProgress = false;
-
-    ALOGV("startPreview: process thread started");
-    return M4NO_ERROR;
-}
-
-M4OSA_UInt32 VideoEditorPreviewController::stopPreview() {
-    M4OSA_ERR err = M4NO_ERROR;
-    uint32_t lastRenderedFrameTimeMs = 0;
-    ALOGV("stopPreview");
-
-    // Stop the thread
-    if(mThreadContext != NULL) {
-        bStopThreadInProgress = true;
-        {
-            Mutex::Autolock autoLock(mLockSem);
-            if (mSemThreadWait != NULL) {
-                err = M4OSA_semaphorePost(mSemThreadWait);
-            }
-        }
-
-        err = M4OSA_threadSyncStop(mThreadContext);
-        if(err != M4NO_ERROR) {
-            ALOGV("stopPreview: error 0x%x in trying to stop thread", err);
-            // Continue even if error
-        }
-
-        err = M4OSA_threadSyncClose(mThreadContext);
-        if(err != M4NO_ERROR) {
-            ALOGE("stopPreview: error 0x%x in trying to close thread", (unsigned int)err);
-            // Continue even if error
-        }
-
-        mThreadContext = NULL;
-    }
-
-    // Close the semaphore first
-    {
-        Mutex::Autolock autoLock(mLockSem);
-        if(mSemThreadWait != NULL) {
-            err = M4OSA_semaphoreClose(mSemThreadWait);
-            ALOGV("stopPreview: close semaphore returns 0x%x", err);
-            mSemThreadWait = NULL;
-        }
-    }
-
-    for (int playerInst=0; playerInst<kTotalNumPlayerInstances; playerInst++) {
-        if(mVePlayer[playerInst] != NULL) {
-            if(mVePlayer[playerInst]->isPlaying()) {
-                ALOGV("stop the player first");
-                mVePlayer[playerInst]->stop();
-            }
-            if (playerInst == mActivePlayerIndex) {
-                // Return the last rendered frame time stamp
-                mVePlayer[mActivePlayerIndex]->getLastRenderedTimeMs(&lastRenderedFrameTimeMs);
-            }
-
-            //This is used to syncronize onStreamDone() in PreviewPlayer and
-            //stopPreview() in PreviewController
-            sp<VideoEditorPlayer> temp = mVePlayer[playerInst];
-            temp->acquireLock();
-            ALOGV("stopPreview: clearing mVePlayer");
-            mVePlayer[playerInst].clear();
-            mVePlayer[playerInst] = NULL;
-            temp->releaseLock();
-        }
-    }
-    ALOGV("stopPreview: clear audioSink and audioPlayer");
-    mVEAudioSink.clear();
-    if (mVEAudioPlayer) {
-        delete mVEAudioPlayer;
-        mVEAudioPlayer = NULL;
-    }
-
-    delete mNativeWindowRenderer;
-    mNativeWindowRenderer = NULL;
-
-    // If image file playing, then free the buffer pointer
-    if(mFrameStr.pBuffer != M4OSA_NULL) {
-        free(mFrameStr.pBuffer);
-        mFrameStr.pBuffer = M4OSA_NULL;
-    }
-
-    // Reset original begin cuttime of first previewed clip*/
-    mClipList[mStartingClipIndex]->uiBeginCutTime = mFirstPreviewClipBeginTime;
-    // Reset original end cuttime of last previewed clip*/
-    mClipList[mNumberClipsToPreview-1]->uiEndCutTime = mLastPreviewClipEndTime;
-
-    mPlayerState = VePlayerIdle;
-    mPrepareReqest = M4OSA_FALSE;
-
-    mCurrentPlayedDuration = 0;
-    mCurrentClipDuration = 0;
-    mRenderingMode = M4xVSS_kBlackBorders;
-    mOutputVideoWidth = 0;
-    mOutputVideoHeight = 0;
-
-    ALOGV("stopPreview() lastRenderedFrameTimeMs %ld", lastRenderedFrameTimeMs);
-    return lastRenderedFrameTimeMs;
-}
-
-M4OSA_ERR VideoEditorPreviewController::clearSurface(
-    const sp<Surface> &surface, VideoEditor_renderPreviewFrameStr* pFrameInfo) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditor_renderPreviewFrameStr* pFrameStr = pFrameInfo;
-    M4OSA_UInt32 outputBufferWidth =0, outputBufferHeight=0;
-    M4VIFI_ImagePlane planeOut[3];
-    ALOGV("Inside preview clear frame");
-
-    Mutex::Autolock autoLock(mLock);
-
-    // Delete previous renderer instance
-    if(mTarget != NULL) {
-        delete mTarget;
-        mTarget = NULL;
-    }
-
-    outputBufferWidth = pFrameStr->uiFrameWidth;
-    outputBufferHeight = pFrameStr->uiFrameHeight;
-
-    // Initialize the renderer
-    if(mTarget == NULL) {
-
-        mTarget = PreviewRenderer::CreatePreviewRenderer(
-            surface,
-            outputBufferWidth, outputBufferHeight);
-
-        if(mTarget == NULL) {
-            ALOGE("renderPreviewFrame: cannot create PreviewRenderer");
-            return M4ERR_ALLOC;
-        }
-    }
-
-    // Out plane
-    uint8_t* outBuffer;
-    size_t outBufferStride = 0;
-
-    ALOGV("doMediaRendering CALL getBuffer()");
-    mTarget->getBufferYV12(&outBuffer, &outBufferStride);
-
-    // Set the output YUV420 plane to be compatible with YV12 format
-    //In YV12 format, sizes must be even
-    M4OSA_UInt32 yv12PlaneWidth = ((outputBufferWidth +1)>>1)<<1;
-    M4OSA_UInt32 yv12PlaneHeight = ((outputBufferHeight+1)>>1)<<1;
-
-    prepareYV12ImagePlane(planeOut, yv12PlaneWidth, yv12PlaneHeight,
-     (M4OSA_UInt32)outBufferStride, (M4VIFI_UInt8 *)outBuffer);
-
-    /* Fill the surface with black frame */
-    memset((void *)planeOut[0].pac_data,0x00,planeOut[0].u_width *
-                            planeOut[0].u_height * 1.5);
-    memset((void *)planeOut[1].pac_data,128,planeOut[1].u_width *
-                            planeOut[1].u_height);
-    memset((void *)planeOut[2].pac_data,128,planeOut[2].u_width *
-                             planeOut[2].u_height);
-
-    mTarget->renderYV12();
-    return err;
-}
-
-M4OSA_ERR VideoEditorPreviewController::renderPreviewFrame(
-            const sp<Surface> &surface,
-            VideoEditor_renderPreviewFrameStr* pFrameInfo,
-            VideoEditorCurretEditInfo *pCurrEditInfo) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt32 i = 0, iIncrementedDuration = 0, tnTimeMs=0, framesize =0;
-    VideoEditor_renderPreviewFrameStr* pFrameStr = pFrameInfo;
-    M4VIFI_UInt8 *pixelArray = NULL;
-    Mutex::Autolock autoLock(mLock);
-
-    if (pCurrEditInfo != NULL) {
-        pCurrEditInfo->overlaySettingsIndex = -1;
-    }
-    // Delete previous renderer instance
-    if(mTarget != NULL) {
-        delete mTarget;
-        mTarget = NULL;
-    }
-
-    if(mOutputVideoWidth == 0) {
-        mOutputVideoWidth = pFrameStr->uiFrameWidth;
-    }
-
-    if(mOutputVideoHeight == 0) {
-        mOutputVideoHeight = pFrameStr->uiFrameHeight;
-    }
-
-    // Initialize the renderer
-    if(mTarget == NULL) {
-         mTarget = PreviewRenderer::CreatePreviewRenderer(
-            surface,
-            mOutputVideoWidth, mOutputVideoHeight);
-
-        if(mTarget == NULL) {
-            ALOGE("renderPreviewFrame: cannot create PreviewRenderer");
-            return M4ERR_ALLOC;
-        }
-    }
-
-    pixelArray = NULL;
-
-    // Apply rotation if required
-    if (pFrameStr->videoRotationDegree != 0) {
-        err = applyVideoRotation((M4OSA_Void *)pFrameStr->pBuffer,
-                  pFrameStr->uiFrameWidth, pFrameStr->uiFrameHeight,
-                  pFrameStr->videoRotationDegree);
-        if (M4NO_ERROR != err) {
-            ALOGE("renderPreviewFrame: cannot rotate video, err 0x%x", (unsigned int)err);
-            delete mTarget;
-            mTarget = NULL;
-            return err;
-        } else {
-           // Video rotation done.
-           // Swap width and height if 90 or 270 degrees
-           if (pFrameStr->videoRotationDegree != 180) {
-               int32_t temp = pFrameStr->uiFrameWidth;
-               pFrameStr->uiFrameWidth = pFrameStr->uiFrameHeight;
-               pFrameStr->uiFrameHeight = temp;
-           }
-        }
-    }
-    // Postprocessing (apply video effect)
-    if(pFrameStr->bApplyEffect == M4OSA_TRUE) {
-
-        for(i=0;i<mNumberEffects;i++) {
-            // First check if effect starttime matches the clip being previewed
-            if((mEffectsSettings[i].uiStartTime < pFrameStr->clipBeginCutTime)
-             ||(mEffectsSettings[i].uiStartTime >= pFrameStr->clipEndCutTime)) {
-                // This effect doesn't belong to this clip, check next one
-                continue;
-            }
-            if((mEffectsSettings[i].uiStartTime <= pFrameStr->timeMs) &&
-            ((mEffectsSettings[i].uiStartTime+mEffectsSettings[i].uiDuration) >=
-             pFrameStr->timeMs) && (mEffectsSettings[i].uiDuration != 0)) {
-                setVideoEffectType(mEffectsSettings[i].VideoEffectType, TRUE);
-            }
-            else {
-                setVideoEffectType(mEffectsSettings[i].VideoEffectType, FALSE);
-            }
-        }
-
-        //Provide the overlay Update indication when there is an overlay effect
-        if (mCurrentVideoEffect & VIDEO_EFFECT_FRAMING) {
-            M4OSA_UInt32 index;
-            mCurrentVideoEffect &= ~VIDEO_EFFECT_FRAMING; //never apply framing here.
-
-            // Find the effect in effectSettings array
-            for (index = 0; index < mNumberEffects; index++) {
-                if(mEffectsSettings[index].VideoEffectType ==
-                    (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Framing) {
-
-                    if((mEffectsSettings[index].uiStartTime <= pFrameInfo->timeMs) &&
-                        ((mEffectsSettings[index].uiStartTime+
-                        mEffectsSettings[index].uiDuration) >= pFrameInfo->timeMs))
-                    {
-                        break;
-                    }
-                }
-            }
-            if ((index < mNumberEffects) && (pCurrEditInfo != NULL)) {
-                pCurrEditInfo->overlaySettingsIndex = index;
-                ALOGV("Framing index = %d", index);
-            } else {
-                ALOGV("No framing effects found");
-            }
-        }
-
-        if(mCurrentVideoEffect != VIDEO_EFFECT_NONE) {
-            err = applyVideoEffect((M4OSA_Void *)pFrameStr->pBuffer,
-             OMX_COLOR_FormatYUV420Planar, pFrameStr->uiFrameWidth,
-             pFrameStr->uiFrameHeight, pFrameStr->timeMs,
-             (M4OSA_Void *)pixelArray);
-
-            if(err != M4NO_ERROR) {
-                ALOGE("renderPreviewFrame: applyVideoEffect error 0x%x", (unsigned int)err);
-                delete mTarget;
-                mTarget = NULL;
-                free(pixelArray);
-                pixelArray = NULL;
-                return err;
-           }
-           mCurrentVideoEffect = VIDEO_EFFECT_NONE;
-        }
-        else {
-            // Apply the rendering mode
-            err = doImageRenderingMode((M4OSA_Void *)pFrameStr->pBuffer,
-             OMX_COLOR_FormatYUV420Planar, pFrameStr->uiFrameWidth,
-             pFrameStr->uiFrameHeight, (M4OSA_Void *)pixelArray);
-
-            if(err != M4NO_ERROR) {
-                ALOGE("renderPreviewFrame:doImageRenderingMode error 0x%x", (unsigned int)err);
-                delete mTarget;
-                mTarget = NULL;
-                free(pixelArray);
-                pixelArray = NULL;
-                return err;
-            }
-        }
-    }
-    else {
-        // Apply the rendering mode
-        err = doImageRenderingMode((M4OSA_Void *)pFrameStr->pBuffer,
-         OMX_COLOR_FormatYUV420Planar, pFrameStr->uiFrameWidth,
-         pFrameStr->uiFrameHeight, (M4OSA_Void *)pixelArray);
-
-        if(err != M4NO_ERROR) {
-            ALOGE("renderPreviewFrame: doImageRenderingMode error 0x%x", (unsigned int)err);
-            delete mTarget;
-            mTarget = NULL;
-            free(pixelArray);
-            pixelArray = NULL;
-            return err;
-        }
-    }
-
-    mTarget->renderYV12();
-    return err;
-}
-
-M4OSA_Void VideoEditorPreviewController::setJniCallback(void* cookie,
-    jni_progress_callback_fct callbackFct) {
-    //ALOGV("setJniCallback");
-    mJniCookie = cookie;
-    mJniCallback = callbackFct;
-}
-
-M4OSA_ERR VideoEditorPreviewController::preparePlayer(
-    void* param, int playerInstance, int index) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorPreviewController *pController =
-     (VideoEditorPreviewController *)param;
-
-    ALOGV("preparePlayer: instance %d file %d", playerInstance, index);
-
-    const char* fileName = (const char*) pController->mClipList[index]->pFile;
-    pController->mVePlayer[playerInstance]->setDataSource(fileName, NULL);
-
-    ALOGV("preparePlayer: setDataSource instance %s",
-     (const char *)pController->mClipList[index]->pFile);
-
-    pController->mVePlayer[playerInstance]->setVideoSurface(
-     pController->mSurface);
-    ALOGV("preparePlayer: setVideoSurface");
-
-    pController->mVePlayer[playerInstance]->setMediaRenderingMode(
-     pController->mClipList[index]->xVSS.MediaRendering,
-     pController->mOutputVideoSize);
-    ALOGV("preparePlayer: setMediaRenderingMode");
-
-    if((M4OSA_UInt32)index == pController->mStartingClipIndex) {
-        pController->mVePlayer[playerInstance]->setPlaybackBeginTime(
-        pController->mFirstPreviewClipBeginTime);
-    }
-    else {
-        pController->mVePlayer[playerInstance]->setPlaybackBeginTime(
-        pController->mClipList[index]->uiBeginCutTime);
-    }
-    ALOGV("preparePlayer: setPlaybackBeginTime(%d)",
-     pController->mClipList[index]->uiBeginCutTime);
-
-    pController->mVePlayer[playerInstance]->setPlaybackEndTime(
-     pController->mClipList[index]->uiEndCutTime);
-    ALOGV("preparePlayer: setPlaybackEndTime(%d)",
-     pController->mClipList[index]->uiEndCutTime);
-
-    if(pController->mClipList[index]->FileType == M4VIDEOEDITING_kFileType_ARGB8888) {
-        pController->mVePlayer[playerInstance]->setImageClipProperties(
-                 pController->mClipList[index]->ClipProperties.uiVideoWidth,
-                 pController->mClipList[index]->ClipProperties.uiVideoHeight);
-        ALOGV("preparePlayer: setImageClipProperties");
-    }
-
-    pController->mVePlayer[playerInstance]->prepare();
-    ALOGV("preparePlayer: prepared");
-
-    if(pController->mClipList[index]->uiBeginCutTime > 0) {
-        pController->mVePlayer[playerInstance]->seekTo(
-         pController->mClipList[index]->uiBeginCutTime);
-
-        ALOGV("preparePlayer: seekTo(%d)",
-         pController->mClipList[index]->uiBeginCutTime);
-    }
-    pController->mVePlayer[pController->mCurrentPlayer]->setAudioPlayer(pController->mVEAudioPlayer);
-
-    pController->mVePlayer[playerInstance]->readFirstVideoFrame();
-    ALOGV("preparePlayer: readFirstVideoFrame of clip");
-
-    return err;
-}
-
-M4OSA_ERR VideoEditorPreviewController::threadProc(M4OSA_Void* param) {
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_Int32 index = 0;
-    VideoEditorPreviewController *pController =
-     (VideoEditorPreviewController *)param;
-
-    ALOGV("inside threadProc");
-    if(pController->mPlayerState == VePlayerIdle) {
-        (pController->mCurrentClipNumber)++;
-
-        ALOGD("threadProc: playing file index %d total clips %d",
-         pController->mCurrentClipNumber, pController->mNumberClipsToPreview);
-
-        if((M4OSA_UInt32)pController->mCurrentClipNumber >=
-         pController->mNumberClipsToPreview) {
-
-            ALOGD("All clips previewed");
-
-            pController->mCurrentPlayedDuration = 0;
-            pController->mCurrentClipDuration = 0;
-            pController->mCurrentPlayer = 0;
-
-            if(pController->mPreviewLooping == M4OSA_TRUE) {
-                pController->mCurrentClipNumber =
-                 pController->mStartingClipIndex;
-
-                ALOGD("Preview looping TRUE, restarting from clip index %d",
-                 pController->mCurrentClipNumber);
-
-                // Reset the story board timestamp inside the player
-                for (int playerInst=0; playerInst<kTotalNumPlayerInstances;
-                 playerInst++) {
-                    pController->mVePlayer[playerInst]->resetJniCallbackTimeStamp();
-                }
-            }
-            else {
-                M4OSA_UInt32 endArgs = 0;
-                if(pController->mJniCallback != NULL) {
-                    pController->mJniCallback(
-                     pController->mJniCookie, MSG_TYPE_PREVIEW_END, &endArgs);
-                }
-                pController->mPlayerState = VePlayerAutoStop;
-
-                // Reset original begin cuttime of first previewed clip
-                pController->mClipList[pController->mStartingClipIndex]->uiBeginCutTime =
-                 pController->mFirstPreviewClipBeginTime;
-                // Reset original end cuttime of last previewed clip
-                pController->mClipList[pController->mNumberClipsToPreview-1]->uiEndCutTime =
-                 pController->mLastPreviewClipEndTime;
-
-                // Return a warning to M4OSA thread handler
-                // so that thread is moved from executing state to open state
-                return M4WAR_NO_MORE_STREAM;
-            }
-        }
-
-        index=pController->mCurrentClipNumber;
-        if((M4OSA_UInt32)pController->mCurrentClipNumber == pController->mStartingClipIndex) {
-            pController->mCurrentPlayedDuration +=
-             pController->mVideoStoryBoardTimeMsUptoFirstPreviewClip;
-
-            pController->mCurrentClipDuration =
-             pController->mClipList[pController->mCurrentClipNumber]->uiEndCutTime
-              - pController->mFirstPreviewClipBeginTime;
-
-            preparePlayer((void*)pController, pController->mCurrentPlayer, index);
-        }
-        else {
-            pController->mCurrentPlayedDuration +=
-             pController->mCurrentClipDuration;
-
-            pController->mCurrentClipDuration =
-             pController->mClipList[pController->mCurrentClipNumber]->uiEndCutTime -
-             pController->mClipList[pController->mCurrentClipNumber]->uiBeginCutTime;
-        }
-
-        pController->mVePlayer[pController->mCurrentPlayer]->setStoryboardStartTime(
-         pController->mCurrentPlayedDuration);
-        ALOGV("threadProc: setStoryboardStartTime");
-
-        // Set the next clip duration for Audio mix here
-        if((M4OSA_UInt32)pController->mCurrentClipNumber != pController->mStartingClipIndex) {
-
-            pController->mVePlayer[pController->mCurrentPlayer]->setAudioMixStoryBoardParam(
-             pController->mCurrentPlayedDuration,
-             pController->mClipList[index]->uiBeginCutTime,
-             pController->mClipList[index]->ClipProperties.uiClipAudioVolumePercentage);
-
-            ALOGV("threadProc: setAudioMixStoryBoardParam fromMS %d \
-             ClipBeginTime %d", pController->mCurrentPlayedDuration +
-             pController->mClipList[index]->uiBeginCutTime,
-             pController->mClipList[index]->uiBeginCutTime,
-             pController->mClipList[index]->ClipProperties.uiClipAudioVolumePercentage);
-        }
-        // Capture the active player being used
-        pController->mActivePlayerIndex = pController->mCurrentPlayer;
-
-        pController->mVePlayer[pController->mCurrentPlayer]->start();
-        ALOGV("threadProc: started");
-
-        pController->mPlayerState = VePlayerBusy;
-
-    } else if(pController->mPlayerState == VePlayerAutoStop) {
-        ALOGV("Preview completed..auto stop the player");
-    } else if ((pController->mPlayerState == VePlayerBusy) && (pController->mPrepareReqest)) {
-        // Prepare the player here
-        pController->mPrepareReqest = M4OSA_FALSE;
-        preparePlayer((void*)pController, pController->mCurrentPlayer,
-            pController->mCurrentClipNumber+1);
-        if (pController->mSemThreadWait != NULL) {
-            err = M4OSA_semaphoreWait(pController->mSemThreadWait,
-                M4OSA_WAIT_FOREVER);
-        }
-    } else {
-        if (!pController->bStopThreadInProgress) {
-            ALOGV("threadProc: state busy...wait for sem");
-            if (pController->mSemThreadWait != NULL) {
-                err = M4OSA_semaphoreWait(pController->mSemThreadWait,
-                 M4OSA_WAIT_FOREVER);
-             }
-        }
-        ALOGV("threadProc: sem wait returned err = 0x%x", err);
-    }
-
-    //Always return M4NO_ERROR to ensure the thread keeps running
-    return M4NO_ERROR;
-}
-
-void VideoEditorPreviewController::notify(
-    void* cookie, int msg, int ext1, int ext2)
-{
-    VideoEditorPreviewController *pController =
-     (VideoEditorPreviewController *)cookie;
-
-    M4OSA_ERR err = M4NO_ERROR;
-    uint32_t clipDuration = 0;
-    switch (msg) {
-        case MEDIA_NOP: // interface test message
-            ALOGV("MEDIA_NOP");
-            break;
-        case MEDIA_PREPARED:
-            ALOGV("MEDIA_PREPARED");
-            break;
-        case MEDIA_PLAYBACK_COMPLETE:
-        {
-            ALOGD("notify:MEDIA_PLAYBACK_COMPLETE, mCurrentClipNumber = %d",
-                    pController->mCurrentClipNumber);
-            pController->mPlayerState = VePlayerIdle;
-
-            //send progress callback with last frame timestamp
-            if((M4OSA_UInt32)pController->mCurrentClipNumber ==
-             pController->mStartingClipIndex) {
-                clipDuration =
-                 pController->mClipList[pController->mCurrentClipNumber]->uiEndCutTime
-                  - pController->mFirstPreviewClipBeginTime;
-            }
-            else {
-                clipDuration =
-                 pController->mClipList[pController->mCurrentClipNumber]->uiEndCutTime
-                  - pController->mClipList[pController->mCurrentClipNumber]->uiBeginCutTime;
-            }
-
-            M4OSA_UInt32 playedDuration = clipDuration+pController->mCurrentPlayedDuration;
-            pController->mJniCallback(
-                 pController->mJniCookie, MSG_TYPE_PROGRESS_INDICATION,
-                 &playedDuration);
-
-            if ((pController->mOverlayState == OVERLAY_UPDATE) &&
-                ((M4OSA_UInt32)pController->mCurrentClipNumber !=
-                (pController->mNumberClipsToPreview-1))) {
-                VideoEditorCurretEditInfo *pEditInfo =
-                    (VideoEditorCurretEditInfo*)M4OSA_32bitAlignedMalloc(sizeof(VideoEditorCurretEditInfo),
-                    M4VS, (M4OSA_Char*)"Current Edit info");
-                pEditInfo->overlaySettingsIndex = ext2;
-                pEditInfo->clipIndex = pController->mCurrentClipNumber;
-                pController->mOverlayState == OVERLAY_CLEAR;
-                if (pController->mJniCallback != NULL) {
-                        pController->mJniCallback(pController->mJniCookie,
-                            MSG_TYPE_OVERLAY_CLEAR, pEditInfo);
-                }
-                free(pEditInfo);
-            }
-            {
-                Mutex::Autolock autoLock(pController->mLockSem);
-                if (pController->mSemThreadWait != NULL) {
-                    M4OSA_semaphorePost(pController->mSemThreadWait);
-                    return;
-                }
-            }
-
-            break;
-        }
-        case MEDIA_ERROR:
-        {
-            int err_val = ext1;
-          // Always log errors.
-          // ext1: Media framework error code.
-          // ext2: Implementation dependant error code.
-            ALOGE("MEDIA_ERROR; error (%d, %d)", ext1, ext2);
-            if(pController->mJniCallback != NULL) {
-                pController->mJniCallback(pController->mJniCookie,
-                 MSG_TYPE_PLAYER_ERROR, &err_val);
-            }
-            break;
-        }
-        case MEDIA_INFO:
-        {
-            int info_val = ext2;
-            // ext1: Media framework error code.
-            // ext2: Implementation dependant error code.
-            //ALOGW("MEDIA_INFO; info/warning (%d, %d)", ext1, ext2);
-            if(pController->mJniCallback != NULL) {
-                pController->mJniCallback(pController->mJniCookie,
-                 MSG_TYPE_PROGRESS_INDICATION, &info_val);
-            }
-            break;
-        }
-        case MEDIA_SEEK_COMPLETE:
-            ALOGV("MEDIA_SEEK_COMPLETE; Received seek complete");
-            break;
-        case MEDIA_BUFFERING_UPDATE:
-            ALOGV("MEDIA_BUFFERING_UPDATE; buffering %d", ext1);
-            break;
-        case MEDIA_SET_VIDEO_SIZE:
-            ALOGV("MEDIA_SET_VIDEO_SIZE; New video size %d x %d", ext1, ext2);
-            break;
-        case static_cast<int>(0xAAAAAAAA):
-            ALOGV("VIDEO PLAYBACK ALMOST over, prepare next player");
-            // Select next player and prepare it
-            // If there is a clip after this one
-            if ((M4OSA_UInt32)(pController->mCurrentClipNumber+1) <
-             pController->mNumberClipsToPreview) {
-                pController->mPrepareReqest = M4OSA_TRUE;
-                pController->mCurrentPlayer++;
-                if (pController->mCurrentPlayer >= kTotalNumPlayerInstances) {
-                    pController->mCurrentPlayer = 0;
-                }
-                // Prepare the first clip to be played
-                {
-                    Mutex::Autolock autoLock(pController->mLockSem);
-                    if (pController->mSemThreadWait != NULL) {
-                        M4OSA_semaphorePost(pController->mSemThreadWait);
-                    }
-                }
-            }
-            break;
-        case static_cast<int>(0xBBBBBBBB):
-        {
-            ALOGV("VIDEO PLAYBACK, Update Overlay");
-            int overlayIndex = ext2;
-            VideoEditorCurretEditInfo *pEditInfo =
-                    (VideoEditorCurretEditInfo*)M4OSA_32bitAlignedMalloc(sizeof(VideoEditorCurretEditInfo),
-                    M4VS, (M4OSA_Char*)"Current Edit info");
-            //ext1 = 1; start the overlay display
-            //     = 2; Clear the overlay.
-            pEditInfo->overlaySettingsIndex = ext2;
-            pEditInfo->clipIndex = pController->mCurrentClipNumber;
-            ALOGV("pController->mCurrentClipNumber = %d",pController->mCurrentClipNumber);
-            if (pController->mJniCallback != NULL) {
-                if (ext1 == 1) {
-                    pController->mOverlayState = OVERLAY_UPDATE;
-                    pController->mJniCallback(pController->mJniCookie,
-                        MSG_TYPE_OVERLAY_UPDATE, pEditInfo);
-                } else {
-                    pController->mOverlayState = OVERLAY_CLEAR;
-                    pController->mJniCallback(pController->mJniCookie,
-                        MSG_TYPE_OVERLAY_CLEAR, pEditInfo);
-                }
-            }
-            free(pEditInfo);
-            break;
-        }
-        default:
-            ALOGV("unrecognized message: (%d, %d, %d)", msg, ext1, ext2);
-            break;
-    }
-}
-
-void VideoEditorPreviewController::setVideoEffectType(
-    M4VSS3GPP_VideoEffectType type, M4OSA_Bool enable) {
-
-    M4OSA_UInt32 effect = VIDEO_EFFECT_NONE;
-
-    // map M4VSS3GPP_VideoEffectType to local enum
-    switch(type) {
-        case M4VSS3GPP_kVideoEffectType_FadeFromBlack:
-            effect = VIDEO_EFFECT_FADEFROMBLACK;
-            break;
-
-        case M4VSS3GPP_kVideoEffectType_FadeToBlack:
-            effect = VIDEO_EFFECT_FADETOBLACK;
-            break;
-
-        case M4xVSS_kVideoEffectType_BlackAndWhite:
-            effect = VIDEO_EFFECT_BLACKANDWHITE;
-            break;
-
-        case M4xVSS_kVideoEffectType_Pink:
-            effect = VIDEO_EFFECT_PINK;
-            break;
-
-        case M4xVSS_kVideoEffectType_Green:
-            effect = VIDEO_EFFECT_GREEN;
-            break;
-
-        case M4xVSS_kVideoEffectType_Sepia:
-            effect = VIDEO_EFFECT_SEPIA;
-            break;
-
-        case M4xVSS_kVideoEffectType_Negative:
-            effect = VIDEO_EFFECT_NEGATIVE;
-            break;
-
-        case M4xVSS_kVideoEffectType_Framing:
-            effect = VIDEO_EFFECT_FRAMING;
-            break;
-
-        case M4xVSS_kVideoEffectType_Fifties:
-            effect = VIDEO_EFFECT_FIFTIES;
-            break;
-
-        case M4xVSS_kVideoEffectType_ColorRGB16:
-            effect = VIDEO_EFFECT_COLOR_RGB16;
-            break;
-
-        case M4xVSS_kVideoEffectType_Gradient:
-            effect = VIDEO_EFFECT_GRADIENT;
-            break;
-
-        default:
-            effect = VIDEO_EFFECT_NONE;
-            break;
-    }
-
-    if(enable == M4OSA_TRUE) {
-        // If already set, then no need to set again
-        if(!(mCurrentVideoEffect & effect))
-            mCurrentVideoEffect |= effect;
-            if(effect == VIDEO_EFFECT_FIFTIES) {
-                mIsFiftiesEffectStarted = true;
-            }
-    }
-    else  {
-        // Reset only if already set
-        if(mCurrentVideoEffect & effect)
-            mCurrentVideoEffect &= ~effect;
-    }
-
-    return;
-}
-
-
-M4OSA_ERR VideoEditorPreviewController::applyVideoEffect(
-    M4OSA_Void * dataPtr, M4OSA_UInt32 colorFormat, M4OSA_UInt32 videoWidth,
-    M4OSA_UInt32 videoHeight, M4OSA_UInt32 timeMs, M4OSA_Void* outPtr) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    vePostProcessParams postProcessParams;
-
-    postProcessParams.vidBuffer = (M4VIFI_UInt8*)dataPtr;
-    postProcessParams.videoWidth = videoWidth;
-    postProcessParams.videoHeight = videoHeight;
-    postProcessParams.timeMs = timeMs;
-    postProcessParams.timeOffset = 0; //Since timeMS already takes care of offset in this case
-    postProcessParams.effectsSettings = mEffectsSettings;
-    postProcessParams.numberEffects = mNumberEffects;
-    postProcessParams.outVideoWidth = mOutputVideoWidth;
-    postProcessParams.outVideoHeight = mOutputVideoHeight;
-    postProcessParams.currentVideoEffect = mCurrentVideoEffect;
-    postProcessParams.renderingMode = mRenderingMode;
-    if(mIsFiftiesEffectStarted == M4OSA_TRUE) {
-        postProcessParams.isFiftiesEffectStarted = M4OSA_TRUE;
-        mIsFiftiesEffectStarted = M4OSA_FALSE;
-    }
-    else {
-       postProcessParams.isFiftiesEffectStarted = M4OSA_FALSE;
-    }
-    //postProcessParams.renderer = mTarget;
-    postProcessParams.overlayFrameRGBBuffer = NULL;
-    postProcessParams.overlayFrameYUVBuffer = NULL;
-
-    mTarget->getBufferYV12(&(postProcessParams.pOutBuffer), &(postProcessParams.outBufferStride));
-
-    err = applyEffectsAndRenderingMode(&postProcessParams, videoWidth, videoHeight);
-    return err;
-}
-
-status_t VideoEditorPreviewController::setPreviewFrameRenderingMode(
-    M4xVSS_MediaRendering mode, M4VIDEOEDITING_VideoFrameSize outputVideoSize) {
-
-    ALOGV("setMediaRenderingMode: outputVideoSize = %d", outputVideoSize);
-    mRenderingMode = mode;
-
-    status_t err = OK;
-    /* get the video width and height by resolution */
-    err = getVideoSizeByResolution(outputVideoSize,
-              &mOutputVideoWidth, &mOutputVideoHeight);
-
-    return err;
-}
-
-M4OSA_ERR VideoEditorPreviewController::doImageRenderingMode(
-    M4OSA_Void * dataPtr, M4OSA_UInt32 colorFormat, M4OSA_UInt32 videoWidth,
-    M4OSA_UInt32 videoHeight, M4OSA_Void* outPtr) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4VIFI_ImagePlane planeIn[3], planeOut[3];
-    M4VIFI_UInt8 *inBuffer = M4OSA_NULL;
-    M4OSA_UInt32 outputBufferWidth =0, outputBufferHeight=0;
-
-    //frameSize = (videoWidth*videoHeight*3) >> 1;
-    inBuffer = (M4OSA_UInt8 *)dataPtr;
-
-    // In plane
-    prepareYUV420ImagePlane(planeIn, videoWidth,
-      videoHeight, (M4VIFI_UInt8 *)inBuffer, videoWidth, videoHeight);
-
-    outputBufferWidth = mOutputVideoWidth;
-    outputBufferHeight = mOutputVideoHeight;
-
-    // Out plane
-    uint8_t* outBuffer;
-    size_t outBufferStride = 0;
-
-    ALOGV("doMediaRendering CALL getBuffer()");
-    mTarget->getBufferYV12(&outBuffer, &outBufferStride);
-
-    // Set the output YUV420 plane to be compatible with YV12 format
-    //In YV12 format, sizes must be even
-    M4OSA_UInt32 yv12PlaneWidth = ((mOutputVideoWidth +1)>>1)<<1;
-    M4OSA_UInt32 yv12PlaneHeight = ((mOutputVideoHeight+1)>>1)<<1;
-
-    prepareYV12ImagePlane(planeOut, yv12PlaneWidth, yv12PlaneHeight,
-     (M4OSA_UInt32)outBufferStride, (M4VIFI_UInt8 *)outBuffer);
-
-    err = applyRenderingMode(planeIn, planeOut, mRenderingMode);
-    if(err != M4NO_ERROR) {
-        ALOGE("doImageRenderingMode: applyRenderingMode returned err=0x%x", (unsigned int)err);
-    }
-    return err;
-}
-
-} //namespace android
diff --git a/libvideoeditor/lvpp/VideoEditorPreviewController.h b/libvideoeditor/lvpp/VideoEditorPreviewController.h
deleted file mode 100755
index 1756f32..0000000
--- a/libvideoeditor/lvpp/VideoEditorPreviewController.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_VE_PREVIEWCONTROLLER_H
-#define ANDROID_VE_PREVIEWCONTROLLER_H
-
-#include "VideoEditorPlayer.h"
-#include "VideoEditorTools.h"
-
-namespace android {
-
-// Callback mechanism from PreviewController to Jni  */
-typedef void (*jni_progress_callback_fct)(void* cookie, M4OSA_UInt32 msgType, void *argc);
-
-struct Surface;
-struct PreviewRenderer;
-
-class VideoEditorPreviewController {
-
-public:
-    VideoEditorPreviewController();
-    ~VideoEditorPreviewController();
-
-    M4OSA_ERR loadEditSettings(
-            M4VSS3GPP_EditSettings* pSettings,
-            M4xVSS_AudioMixingSettings* bgmSettings);
-
-    M4OSA_ERR setSurface(const sp<Surface>& surface);
-
-    M4OSA_ERR startPreview(
-            M4OSA_UInt32 fromMS, M4OSA_Int32 toMs,
-            M4OSA_UInt16 callBackAfterFrameCount,
-            M4OSA_Bool loop) ;
-
-    M4OSA_UInt32 stopPreview();
-
-    M4OSA_ERR renderPreviewFrame(
-            const sp<Surface>& surface,
-            VideoEditor_renderPreviewFrameStr* pFrameInfo,
-            VideoEditorCurretEditInfo *pCurrEditInfo);
-
-    M4OSA_ERR clearSurface(
-            const sp<Surface>& surface,
-            VideoEditor_renderPreviewFrameStr* pFrameInfo);
-
-    M4OSA_Void setJniCallback(
-            void* cookie,
-            jni_progress_callback_fct callbackFct);
-
-    status_t setPreviewFrameRenderingMode(
-            M4xVSS_MediaRendering mode,
-            M4VIDEOEDITING_VideoFrameSize outputVideoSize);
-
-private:
-    enum {
-        kTotalNumPlayerInstances = 2,
-        kPreviewThreadStackSize = 65536,
-    };
-
-    typedef enum {
-        VePlayerIdle = 0,
-        VePlayerBusy,
-        VePlayerAutoStop
-    } PlayerState;
-
-    typedef enum {
-        OVERLAY_UPDATE = 0,
-        OVERLAY_CLEAR
-    } OverlayState;
-
-    sp<VideoEditorPlayer> mVePlayer[kTotalNumPlayerInstances];
-    int mCurrentPlayer;  // player instance currently being used
-    sp<Surface>  mSurface;
-    mutable Mutex mLock;
-    M4OSA_Context mThreadContext;
-    PlayerState mPlayerState;
-    M4OSA_Bool    mPrepareReqest;
-    M4VSS3GPP_ClipSettings **mClipList;
-    M4OSA_UInt32 mNumberClipsInStoryBoard;
-    M4OSA_UInt32 mNumberClipsToPreview;
-    M4OSA_UInt32 mStartingClipIndex;
-    M4OSA_Bool mPreviewLooping;
-    M4OSA_UInt32 mCallBackAfterFrameCnt;
-    M4VSS3GPP_EffectSettings* mEffectsSettings;
-    M4OSA_UInt32 mNumberEffects;
-    M4OSA_Int32 mCurrentClipNumber;
-    M4OSA_UInt32 mClipTotalDuration;
-    M4OSA_UInt32 mCurrentVideoEffect;
-    M4xVSS_AudioMixingSettings* mBackgroundAudioSetting;
-    M4OSA_Context mAudioMixPCMFileHandle;
-    PreviewRenderer *mTarget;
-    M4OSA_Context mJniCookie;
-    jni_progress_callback_fct mJniCallback;
-    VideoEditor_renderPreviewFrameStr mFrameStr;
-    M4OSA_UInt32 mCurrentPlayedDuration;
-    M4OSA_UInt32 mCurrentClipDuration;
-    M4VIDEOEDITING_VideoFrameSize mOutputVideoSize;
-    M4OSA_UInt32 mFirstPreviewClipBeginTime;
-    M4OSA_UInt32 mLastPreviewClipEndTime;
-    M4OSA_UInt32 mVideoStoryBoardTimeMsUptoFirstPreviewClip;
-    OverlayState mOverlayState;
-    int mActivePlayerIndex;
-
-    M4xVSS_MediaRendering mRenderingMode;
-    uint32_t mOutputVideoWidth;
-    uint32_t mOutputVideoHeight;
-    bool bStopThreadInProgress;
-    M4OSA_Context mSemThreadWait;
-    bool mIsFiftiesEffectStarted;
-
-    sp<VideoEditorPlayer::VeAudioOutput> mVEAudioSink;
-    VideoEditorAudioPlayer *mVEAudioPlayer;
-    NativeWindowRenderer* mNativeWindowRenderer;
-
-    M4VIFI_UInt8*  mFrameRGBBuffer;
-    M4VIFI_UInt8*  mFrameYUVBuffer;
-    mutable Mutex mLockSem;
-
-
-    static M4OSA_ERR preparePlayer(void* param, int playerInstance, int index);
-    static M4OSA_ERR threadProc(M4OSA_Void* param);
-    static void notify(void* cookie, int msg, int ext1, int ext2);
-
-    void setVideoEffectType(M4VSS3GPP_VideoEffectType type, M4OSA_Bool enable);
-
-    M4OSA_ERR applyVideoEffect(
-            M4OSA_Void * dataPtr, M4OSA_UInt32 colorFormat,
-            M4OSA_UInt32 videoWidth, M4OSA_UInt32 videoHeight,
-            M4OSA_UInt32 timeMs, M4OSA_Void* outPtr);
-
-    M4OSA_ERR doImageRenderingMode(
-            M4OSA_Void * dataPtr,
-            M4OSA_UInt32 colorFormat, M4OSA_UInt32 videoWidth,
-            M4OSA_UInt32 videoHeight, M4OSA_Void* outPtr);
-
-    // Don't call me!
-    VideoEditorPreviewController(const VideoEditorPreviewController &);
-    VideoEditorPreviewController &operator=(
-            const VideoEditorPreviewController &);
-};
-
-}
-
-#endif // ANDROID_VE_PREVIEWCONTROLLER_H
diff --git a/libvideoeditor/lvpp/VideoEditorSRC.cpp b/libvideoeditor/lvpp/VideoEditorSRC.cpp
deleted file mode 100755
index 6beabfa..0000000
--- a/libvideoeditor/lvpp/VideoEditorSRC.cpp
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "VideoEditorSRC"
-
-#include <stdlib.h>
-#include <utils/Log.h>
-#include <audio_utils/primitives.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>
-#include "VideoEditorSRC.h"
-
-
-namespace android {
-
-VideoEditorSRC::VideoEditorSRC(const sp<MediaSource> &source) {
-    ALOGV("VideoEditorSRC %p(%p)", this, source.get());
-    static const int32_t kDefaultSamplingFreqencyHz = kFreq32000Hz;
-    mSource = source;
-    mResampler = NULL;
-    mChannelCnt = 0;
-    mSampleRate = 0;
-    mOutputSampleRate = kDefaultSamplingFreqencyHz;
-    mStarted = false;
-    mInitialTimeStampUs = -1;
-    mAccuOutBufferSize  = 0;
-    mSeekTimeUs = -1;
-    mBuffer = NULL;
-    mLeftover = 0;
-    mFormatChanged = false;
-    mStopPending = false;
-    mSeekMode = ReadOptions::SEEK_PREVIOUS_SYNC;
-
-    // Input Source validation
-    sp<MetaData> format = mSource->getFormat();
-    const char *mime;
-    CHECK(format->findCString(kKeyMIMEType, &mime));
-    CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
-
-    // Set the metadata of the output after resampling.
-    mOutputFormat = new MetaData;
-    mOutputFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
-    mOutputFormat->setInt32(kKeySampleRate, kDefaultSamplingFreqencyHz);
-    mOutputFormat->setInt32(kKeyChannelCount, 2);  // always stereo
-}
-
-VideoEditorSRC::~VideoEditorSRC() {
-    ALOGV("~VideoEditorSRC %p(%p)", this, mSource.get());
-    stop();
-}
-
-status_t VideoEditorSRC::start(MetaData *params) {
-    ALOGV("start %p(%p)", this, mSource.get());
-    CHECK(!mStarted);
-
-    // Set resampler if required
-    checkAndSetResampler();
-
-    mSeekTimeUs = -1;
-    mSeekMode = ReadOptions::SEEK_PREVIOUS_SYNC;
-    mStarted = true;
-    mSource->start();
-
-    return OK;
-}
-
-status_t VideoEditorSRC::stop() {
-    ALOGV("stop %p(%p)", this, mSource.get());
-    if (!mStarted) {
-        return OK;
-    }
-
-    if (mBuffer) {
-        mBuffer->release();
-        mBuffer = NULL;
-    }
-    mSource->stop();
-    if (mResampler != NULL) {
-        delete mResampler;
-        mResampler = NULL;
-    }
-
-    mStarted = false;
-    mInitialTimeStampUs = -1;
-    mAccuOutBufferSize = 0;
-    mLeftover = 0;
-
-    return OK;
-}
-
-sp<MetaData> VideoEditorSRC::getFormat() {
-    ALOGV("getFormat");
-    return mOutputFormat;
-}
-
-status_t VideoEditorSRC::read(
-        MediaBuffer **buffer_out, const ReadOptions *options) {
-    ALOGV("read %p(%p)", this, mSource.get());
-    *buffer_out = NULL;
-
-    if (!mStarted) {
-        return ERROR_END_OF_STREAM;
-    }
-
-    if (mResampler) {
-        // Store the seek parameters
-        int64_t seekTimeUs;
-        ReadOptions::SeekMode mode = ReadOptions::SEEK_PREVIOUS_SYNC;
-        if (options && options->getSeekTo(&seekTimeUs, &mode)) {
-            ALOGV("read Seek %lld", seekTimeUs);
-            mSeekTimeUs = seekTimeUs;
-            mSeekMode = mode;
-        }
-
-        // We ask for 1024 frames in output
-        // resampler output is always 2 channels and 32 bits
-        const size_t kOutputFrameCount = 1024;
-        const size_t kBytes = kOutputFrameCount * 2 * sizeof(int32_t);
-        int32_t *pTmpBuffer = (int32_t *)calloc(1, kBytes);
-        if (!pTmpBuffer) {
-            ALOGE("calloc failed to allocate memory: %d bytes", kBytes);
-            return NO_MEMORY;
-        }
-
-        // Resample to target quality
-        mResampler->resample(pTmpBuffer, kOutputFrameCount, this);
-
-        if (mStopPending) {
-            stop();
-            mStopPending = false;
-        }
-
-        // Change resampler and retry if format change happened
-        if (mFormatChanged) {
-            mFormatChanged = false;
-            checkAndSetResampler();
-            free(pTmpBuffer);
-            return read(buffer_out, NULL);
-        }
-
-        // Create a new MediaBuffer
-        int32_t outBufferSize = kOutputFrameCount * 2 * sizeof(int16_t);
-        MediaBuffer* outBuffer = new MediaBuffer(outBufferSize);
-
-        // Convert back to 2 channels and 16 bits
-        ditherAndClamp(
-                (int32_t *)((uint8_t*)outBuffer->data() + outBuffer->range_offset()),
-                pTmpBuffer, kOutputFrameCount);
-        free(pTmpBuffer);
-
-        // Compute and set the new timestamp
-        sp<MetaData> to = outBuffer->meta_data();
-        int64_t totalOutDurationUs = (mAccuOutBufferSize * 1000000) / (mOutputSampleRate * 2 * 2);
-        int64_t timeUs = mInitialTimeStampUs + totalOutDurationUs;
-        to->setInt64(kKeyTime, timeUs);
-
-        // update the accumulate size
-        mAccuOutBufferSize += outBufferSize;
-        *buffer_out = outBuffer;
-    } else {
-        // Resampling not required. Read and pass-through.
-        MediaBuffer *aBuffer;
-        status_t err = mSource->read(&aBuffer, options);
-        if (err != OK) {
-            ALOGV("read returns err = %d", err);
-        }
-
-        if (err == INFO_FORMAT_CHANGED) {
-            checkAndSetResampler();
-            return read(buffer_out, NULL);
-        }
-
-        // EOS or some other error
-        if(err != OK) {
-            stop();
-            *buffer_out = NULL;
-            return err;
-        }
-        *buffer_out = aBuffer;
-    }
-
-    return OK;
-}
-
-status_t VideoEditorSRC::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, int64_t pts) {
-    ALOGV("getNextBuffer %d, chan = %d", pBuffer->frameCount, mChannelCnt);
-    uint32_t done = 0;
-    uint32_t want = pBuffer->frameCount * mChannelCnt * 2;
-    pBuffer->raw = malloc(want);
-
-    while (mStarted && want > 0) {
-        // If we don't have any data left, read a new buffer.
-        if (!mBuffer) {
-            // if we seek, reset the initial time stamp and accumulated time
-            ReadOptions options;
-            if (mSeekTimeUs >= 0) {
-                ALOGV("%p cacheMore_l Seek requested = %lld", this, mSeekTimeUs);
-                ReadOptions::SeekMode mode = mSeekMode;
-                options.setSeekTo(mSeekTimeUs, mode);
-                mSeekTimeUs = -1;
-                mInitialTimeStampUs = -1;
-                mAccuOutBufferSize = 0;
-            }
-
-            status_t err = mSource->read(&mBuffer, &options);
-
-            if (err != OK) {
-                free(pBuffer->raw);
-                pBuffer->raw = NULL;
-                pBuffer->frameCount = 0;
-            }
-
-            if (err == INFO_FORMAT_CHANGED) {
-                ALOGV("getNextBuffer: source read returned INFO_FORMAT_CHANGED");
-                // At this point we cannot switch to a new AudioResampler because
-                // we are in a callback called by the AudioResampler itself. So
-                // just remember the fact that the format has changed, and let
-                // read() handles this.
-                mFormatChanged = true;
-                return err;
-            }
-
-            // EOS or some other error
-            if (err != OK) {
-                ALOGV("EOS or some err: %d", err);
-                // We cannot call stop() here because stop() will release the
-                // AudioResampler, and we are in a callback of the AudioResampler.
-                // So just remember the fact and let read() call stop().
-                mStopPending = true;
-                return err;
-            }
-
-            CHECK(mBuffer);
-            mLeftover = mBuffer->range_length();
-            if (mInitialTimeStampUs == -1) {
-                int64_t curTS;
-                sp<MetaData> from = mBuffer->meta_data();
-                from->findInt64(kKeyTime, &curTS);
-                ALOGV("setting mInitialTimeStampUs to %lld", mInitialTimeStampUs);
-                mInitialTimeStampUs = curTS;
-            }
-        }
-
-        // Now copy data to the destination
-        uint32_t todo = mLeftover;
-        if (todo > want) {
-            todo = want;
-        }
-
-        uint8_t* end = (uint8_t*)mBuffer->data() + mBuffer->range_offset()
-                + mBuffer->range_length();
-        memcpy((uint8_t*)pBuffer->raw + done, end - mLeftover, todo);
-        done += todo;
-        want -= todo;
-        mLeftover -= todo;
-
-        // Release MediaBuffer as soon as possible.
-        if (mLeftover == 0) {
-            mBuffer->release();
-            mBuffer = NULL;
-        }
-    }
-
-    pBuffer->frameCount = done / (mChannelCnt * 2);
-    ALOGV("getNextBuffer done %d", pBuffer->frameCount);
-    return OK;
-}
-
-
-void VideoEditorSRC::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
-    ALOGV("releaseBuffer: %p", pBuffer);
-    free(pBuffer->raw);
-    pBuffer->raw = NULL;
-    pBuffer->frameCount = 0;
-}
-
-void VideoEditorSRC::checkAndSetResampler() {
-    ALOGV("checkAndSetResampler");
-
-    static const uint16_t kUnityGain = 0x1000;
-    sp<MetaData> format = mSource->getFormat();
-    const char *mime;
-    CHECK(format->findCString(kKeyMIMEType, &mime));
-    CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
-
-    CHECK(format->findInt32(kKeySampleRate, &mSampleRate));
-    CHECK(format->findInt32(kKeyChannelCount, &mChannelCnt));
-
-    // If a resampler exists, delete it first
-    if (mResampler != NULL) {
-        delete mResampler;
-        mResampler = NULL;
-    }
-
-    // Clear previous buffer
-    if (mBuffer) {
-        mBuffer->release();
-        mBuffer = NULL;
-    }
-
-    if (mSampleRate != mOutputSampleRate || mChannelCnt != 2) {
-        ALOGV("Resampling required (%d => %d Hz, # channels = %d)",
-            mSampleRate, mOutputSampleRate, mChannelCnt);
-
-        mResampler = AudioResampler::create(
-                        16 /* bit depth */,
-                        mChannelCnt,
-                        mOutputSampleRate);
-        CHECK(mResampler);
-        mResampler->setSampleRate(mSampleRate);
-        mResampler->setVolume(kUnityGain, kUnityGain);
-    } else {
-        ALOGV("Resampling not required (%d => %d Hz, # channels = %d)",
-            mSampleRate, mOutputSampleRate, mChannelCnt);
-    }
-}
-
-} //namespce android
diff --git a/libvideoeditor/lvpp/VideoEditorSRC.h b/libvideoeditor/lvpp/VideoEditorSRC.h
deleted file mode 100755
index 1707d4d..0000000
--- a/libvideoeditor/lvpp/VideoEditorSRC.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#include <stdint.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/AudioBufferProvider.h>
-#include "AudioResampler.h"
-
-namespace android {
-
-struct MediaBuffer;
-
-class VideoEditorSRC : public MediaSource , public AudioBufferProvider {
-
-public:
-    VideoEditorSRC(const sp<MediaSource> &source);
-
-    virtual status_t start (MetaData *params = NULL);
-    virtual status_t stop();
-    virtual sp<MetaData> getFormat();
-    virtual status_t read (
-                MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-    virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
-    virtual void releaseBuffer(Buffer* buffer);
-
-    // Sampling freqencies
-    enum {
-        kFreq8000Hz  = 8000,
-        kFreq11025Hz = 11025,
-        kFreq12000Hz = 12000,
-        kFreq16000Hz = 16000,
-        kFreq22050Hz = 22050,
-        kFreq24000Hz = 24000,
-        kFreq32000Hz = 32000,
-        kFreq44100Hz = 44100,
-        kFreq48000Hz = 48000,
-    };
-
-protected :
-    virtual ~VideoEditorSRC();
-
-private:
-    AudioResampler *mResampler;
-    sp<MediaSource> mSource;
-    int mChannelCnt;
-    int mSampleRate;
-    int32_t mOutputSampleRate;
-    bool mStarted;
-    sp<MetaData> mOutputFormat;
-
-    MediaBuffer* mBuffer;
-    int32_t mLeftover;
-    bool mFormatChanged;
-    bool mStopPending;
-
-    int64_t mInitialTimeStampUs;
-    int64_t mAccuOutBufferSize;
-
-    int64_t mSeekTimeUs;
-    ReadOptions::SeekMode mSeekMode;
-
-    VideoEditorSRC();
-    void checkAndSetResampler();
-
-    // Don't call me
-    VideoEditorSRC(const VideoEditorSRC&);
-    VideoEditorSRC &operator=(const VideoEditorSRC &);
-
-};
-
-} //namespce android
-
diff --git a/libvideoeditor/lvpp/VideoEditorTools.cpp b/libvideoeditor/lvpp/VideoEditorTools.cpp
deleted file mode 100755
index 2b9fd60..0000000
--- a/libvideoeditor/lvpp/VideoEditorTools.cpp
+++ /dev/null
@@ -1,3883 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "VideoEditorTools.h"
-#include "PreviewRenderer.h"
-/*+ Handle the image files here */
-#include <utils/Log.h>
-/*- Handle the image files here */
-
-const M4VIFI_UInt8   M4VIFI_ClipTable[1256]
-= {
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03,
-0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
-0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
-0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
-0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
-0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
-0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33,
-0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
-0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43,
-0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
-0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
-0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b,
-0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63,
-0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
-0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73,
-0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b,
-0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83,
-0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b,
-0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93,
-0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
-0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3,
-0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab,
-0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,
-0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb,
-0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3,
-0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb,
-0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3,
-0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb,
-0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3,
-0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb,
-0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3,
-0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb,
-0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-};
-
-/* Division table for ( 65535/x ); x = 0 to 512 */
-const M4VIFI_UInt16  M4VIFI_DivTable[512]
-= {
-0, 65535, 32768, 21845, 16384, 13107, 10922, 9362,
-8192, 7281, 6553, 5957, 5461, 5041, 4681, 4369,
-4096, 3855, 3640, 3449, 3276, 3120, 2978, 2849,
-2730, 2621, 2520, 2427, 2340, 2259, 2184, 2114,
-2048, 1985, 1927, 1872, 1820, 1771, 1724, 1680,
-1638, 1598, 1560, 1524, 1489, 1456, 1424, 1394,
-1365, 1337, 1310, 1285, 1260, 1236, 1213, 1191,
-1170, 1149, 1129, 1110, 1092, 1074, 1057, 1040,
-1024, 1008, 992, 978, 963, 949, 936, 923,
-910, 897, 885, 873, 862, 851, 840, 829,
-819, 809, 799, 789, 780, 771, 762, 753,
-744, 736, 728, 720, 712, 704, 697, 689,
-682, 675, 668, 661, 655, 648, 642, 636,
-630, 624, 618, 612, 606, 601, 595, 590,
-585, 579, 574, 569, 564, 560, 555, 550,
-546, 541, 537, 532, 528, 524, 520, 516,
-512, 508, 504, 500, 496, 492, 489, 485,
-481, 478, 474, 471, 468, 464, 461, 458,
-455, 451, 448, 445, 442, 439, 436, 434,
-431, 428, 425, 422, 420, 417, 414, 412,
-409, 407, 404, 402, 399, 397, 394, 392,
-390, 387, 385, 383, 381, 378, 376, 374,
-372, 370, 368, 366, 364, 362, 360, 358,
-356, 354, 352, 350, 348, 346, 344, 343,
-341, 339, 337, 336, 334, 332, 330, 329,
-327, 326, 324, 322, 321, 319, 318, 316,
-315, 313, 312, 310, 309, 307, 306, 304,
-303, 302, 300, 299, 297, 296, 295, 293,
-292, 291, 289, 288, 287, 286, 284, 283,
-282, 281, 280, 278, 277, 276, 275, 274,
-273, 271, 270, 269, 268, 267, 266, 265,
-264, 263, 262, 261, 260, 259, 258, 257,
-256, 255, 254, 253, 252, 251, 250, 249,
-248, 247, 246, 245, 244, 243, 242, 241,
-240, 240, 239, 238, 237, 236, 235, 234,
-234, 233, 232, 231, 230, 229, 229, 228,
-227, 226, 225, 225, 224, 223, 222, 222,
-221, 220, 219, 219, 218, 217, 217, 216,
-215, 214, 214, 213, 212, 212, 211, 210,
-210, 209, 208, 208, 207, 206, 206, 205,
-204, 204, 203, 202, 202, 201, 201, 200,
-199, 199, 198, 197, 197, 196, 196, 195,
-195, 194, 193, 193, 192, 192, 191, 191,
-190, 189, 189, 188, 188, 187, 187, 186,
-186, 185, 185, 184, 184, 183, 183, 182,
-182, 181, 181, 180, 180, 179, 179, 178,
-178, 177, 177, 176, 176, 175, 175, 174,
-174, 173, 173, 172, 172, 172, 171, 171,
-170, 170, 169, 169, 168, 168, 168, 167,
-167, 166, 166, 165, 165, 165, 164, 164,
-163, 163, 163, 162, 162, 161, 161, 161,
-160, 160, 159, 159, 159, 158, 158, 157,
-157, 157, 156, 156, 156, 155, 155, 154,
-154, 154, 153, 153, 153, 152, 152, 152,
-151, 151, 151, 150, 150, 149, 149, 149,
-148, 148, 148, 147, 147, 147, 146, 146,
-146, 145, 145, 145, 144, 144, 144, 144,
-143, 143, 143, 142, 142, 142, 141, 141,
-141, 140, 140, 140, 140, 139, 139, 139,
-138, 138, 138, 137, 137, 137, 137, 136,
-136, 136, 135, 135, 135, 135, 134, 134,
-134, 134, 133, 133, 133, 132, 132, 132,
-132, 131, 131, 131, 131, 130, 130, 130,
-130, 129, 129, 129, 129, 128, 128, 128
-};
-
-const M4VIFI_Int32  const_storage1[8]
-= {
-0x00002568, 0x00003343,0x00000649,0x00000d0f, 0x0000D86C, 0x0000D83B, 0x00010000, 0x00010000
-};
-
-const M4VIFI_Int32  const_storage[8]
-= {
-0x00002568, 0x00003343, 0x1BF800, 0x00000649, 0x00000d0f, 0x110180, 0x40cf, 0x22BE00
-};
-
-
-const M4VIFI_UInt16  *M4VIFI_DivTable_zero
- = &M4VIFI_DivTable[0];
-
-const M4VIFI_UInt8   *M4VIFI_ClipTable_zero
- = &M4VIFI_ClipTable[500];
-
-M4VIFI_UInt8 M4VIFI_YUV420PlanarToYUV420Semiplanar(void *user_data,
-    M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut ) {
-
-    M4VIFI_UInt32 i;
-    M4VIFI_UInt8 *p_buf_src, *p_buf_dest, *p_buf_src_u, *p_buf_src_v;
-    M4VIFI_UInt8    return_code = M4VIFI_OK;
-
-    /* the filter is implemented with the assumption that the width is equal to stride */
-    if(PlaneIn[0].u_width != PlaneIn[0].u_stride)
-        return M4VIFI_INVALID_PARAM;
-
-    /* The input Y Plane is the same as the output Y Plane */
-    p_buf_src = &(PlaneIn[0].pac_data[PlaneIn[0].u_topleft]);
-    p_buf_dest = &(PlaneOut[0].pac_data[PlaneOut[0].u_topleft]);
-    memcpy((void *)p_buf_dest,(void *)p_buf_src ,
-        PlaneOut[0].u_width * PlaneOut[0].u_height);
-
-    /* The U and V components are planar. The need to be made interleaved */
-    p_buf_src_u = &(PlaneIn[1].pac_data[PlaneIn[1].u_topleft]);
-    p_buf_src_v = &(PlaneIn[2].pac_data[PlaneIn[2].u_topleft]);
-    p_buf_dest  = &(PlaneOut[1].pac_data[PlaneOut[1].u_topleft]);
-
-    for(i = 0; i < PlaneOut[1].u_width*PlaneOut[1].u_height; i++)
-    {
-        *p_buf_dest++ = *p_buf_src_u++;
-        *p_buf_dest++ = *p_buf_src_v++;
-    }
-    return return_code;
-}
-
-M4VIFI_UInt8 M4VIFI_SemiplanarYUV420toYUV420(void *user_data,
-    M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut ) {
-
-     M4VIFI_UInt32 i;
-     M4VIFI_UInt8 *p_buf_src, *p_buf_dest, *p_buf_src_u, *p_buf_src_v;
-     M4VIFI_UInt8 *p_buf_dest_u,*p_buf_dest_v,*p_buf_src_uv;
-     M4VIFI_UInt8     return_code = M4VIFI_OK;
-
-     /* the filter is implemented with the assumption that the width is equal to stride */
-     if(PlaneIn[0].u_width != PlaneIn[0].u_stride)
-        return M4VIFI_INVALID_PARAM;
-
-     /* The input Y Plane is the same as the output Y Plane */
-     p_buf_src = &(PlaneIn[0].pac_data[PlaneIn[0].u_topleft]);
-     p_buf_dest = &(PlaneOut[0].pac_data[PlaneOut[0].u_topleft]);
-     memcpy((void *)p_buf_dest,(void *)p_buf_src ,
-         PlaneOut[0].u_width * PlaneOut[0].u_height);
-
-     /* The U and V components are planar. The need to be made interleaved */
-     p_buf_src_uv = &(PlaneIn[1].pac_data[PlaneIn[1].u_topleft]);
-     p_buf_dest_u  = &(PlaneOut[1].pac_data[PlaneOut[1].u_topleft]);
-     p_buf_dest_v  = &(PlaneOut[2].pac_data[PlaneOut[2].u_topleft]);
-
-     for(i = 0; i < PlaneOut[1].u_width*PlaneOut[1].u_height; i++)
-     {
-        *p_buf_dest_u++ = *p_buf_src_uv++;
-        *p_buf_dest_v++ = *p_buf_src_uv++;
-     }
-     return return_code;
-}
-
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
- *                                                  M4VIFI_ImagePlane *PlaneIn,
- *                                                  M4VIFI_ImagePlane *PlaneOut,
- *                                                  M4VSS3GPP_ExternalProgress *pProgress,
- *                                                  M4OSA_UInt32 uiEffectKind)
- *
- * @brief   This function apply a color effect on an input YUV420 planar frame
- * @note
- * @param   pFunctionContext(IN) Contains which color to apply (not very clean ...)
- * @param   PlaneIn         (IN) Input YUV420 planar
- * @param   PlaneOut        (IN/OUT) Output YUV420 planar
- * @param   pProgress       (IN/OUT) Progress indication (0-100)
- * @param   uiEffectKind    (IN) Unused
- *
- * @return  M4VIFI_OK:  No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
-            M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut,
-            M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiEffectKind) {
-
-    M4VIFI_Int32 plane_number;
-    M4VIFI_UInt32 i,j;
-    M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
-    M4xVSS_ColorStruct* ColorContext = (M4xVSS_ColorStruct*)pFunctionContext;
-
-    for (plane_number = 0; plane_number < 3; plane_number++)
-    {
-        p_buf_src =
-         &(PlaneIn[plane_number].pac_data[PlaneIn[plane_number].u_topleft]);
-
-        p_buf_dest =
-         &(PlaneOut[plane_number].pac_data[PlaneOut[plane_number].u_topleft]);
-        for (i = 0; i < PlaneOut[plane_number].u_height; i++)
-        {
-            /**
-             * Chrominance */
-            if(plane_number==1 || plane_number==2)
-            {
-                //switch ((M4OSA_UInt32)pFunctionContext) // commented because a structure for the effects context exist
-                switch (ColorContext->colorEffectType)
-                {
-                case M4xVSS_kVideoEffectType_BlackAndWhite:
-                    memset((void *)p_buf_dest,128,
-                     PlaneIn[plane_number].u_width);
-                    break;
-                case M4xVSS_kVideoEffectType_Pink:
-                    memset((void *)p_buf_dest,255,
-                     PlaneIn[plane_number].u_width);
-                    break;
-                case M4xVSS_kVideoEffectType_Green:
-                    memset((void *)p_buf_dest,0,
-                     PlaneIn[plane_number].u_width);
-                    break;
-                case M4xVSS_kVideoEffectType_Sepia:
-                    if(plane_number==1)
-                    {
-                        memset((void *)p_buf_dest,117,
-                         PlaneIn[plane_number].u_width);
-                    }
-                    else
-                    {
-                        memset((void *)p_buf_dest,139,
-                         PlaneIn[plane_number].u_width);
-                    }
-                    break;
-                case M4xVSS_kVideoEffectType_Negative:
-                    memcpy((void *)p_buf_dest,
-                     (void *)p_buf_src ,PlaneOut[plane_number].u_width);
-                    break;
-
-                case M4xVSS_kVideoEffectType_ColorRGB16:
-                    {
-                        M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
-
-                        /*first get the r, g, b*/
-                        b = (ColorContext->rgb16ColorData &  0x001f);
-                        g = (ColorContext->rgb16ColorData &  0x07e0)>>5;
-                        r = (ColorContext->rgb16ColorData &  0xf800)>>11;
-
-                        /*keep y, but replace u and v*/
-                        if(plane_number==1)
-                        {
-                            /*then convert to u*/
-                            u = U16(r, g, b);
-                            memset((void *)p_buf_dest,(M4OSA_UInt8)u,
-                             PlaneIn[plane_number].u_width);
-                        }
-                        if(plane_number==2)
-                        {
-                            /*then convert to v*/
-                            v = V16(r, g, b);
-                            memset((void *)p_buf_dest,(M4OSA_UInt8)v,
-                             PlaneIn[plane_number].u_width);
-                        }
-                    }
-                    break;
-                case M4xVSS_kVideoEffectType_Gradient:
-                    {
-                        M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
-
-                        /*first get the r, g, b*/
-                        b = (ColorContext->rgb16ColorData &  0x001f);
-                        g = (ColorContext->rgb16ColorData &  0x07e0)>>5;
-                        r = (ColorContext->rgb16ColorData &  0xf800)>>11;
-
-                        /*for color gradation*/
-                        b = (M4OSA_UInt16)( b - ((b*i)/PlaneIn[plane_number].u_height));
-                        g = (M4OSA_UInt16)(g - ((g*i)/PlaneIn[plane_number].u_height));
-                        r = (M4OSA_UInt16)(r - ((r*i)/PlaneIn[plane_number].u_height));
-
-                        /*keep y, but replace u and v*/
-                        if(plane_number==1)
-                        {
-                            /*then convert to u*/
-                            u = U16(r, g, b);
-                            memset((void *)p_buf_dest,(M4OSA_UInt8)u,
-                             PlaneIn[plane_number].u_width);
-                        }
-                        if(plane_number==2)
-                        {
-                            /*then convert to v*/
-                            v = V16(r, g, b);
-                            memset((void *)p_buf_dest,(M4OSA_UInt8)v,
-                             PlaneIn[plane_number].u_width);
-                        }
-                    }
-                    break;
-                default:
-                    return M4VIFI_INVALID_PARAM;
-                }
-            }
-            /**
-             * Luminance */
-            else
-            {
-                //switch ((M4OSA_UInt32)pFunctionContext)// commented because a structure for the effects context exist
-                switch (ColorContext->colorEffectType)
-                {
-                case M4xVSS_kVideoEffectType_Negative:
-                    for(j=0;j<PlaneOut[plane_number].u_width;j++)
-                    {
-                            p_buf_dest[j] = 255 - p_buf_src[j];
-                    }
-                    break;
-                default:
-                    memcpy((void *)p_buf_dest,
-                     (void *)p_buf_src ,PlaneOut[plane_number].u_width);
-                    break;
-                }
-            }
-            p_buf_src += PlaneIn[plane_number].u_stride;
-            p_buf_dest += PlaneOut[plane_number].u_stride;
-        }
-    }
-
-    return M4VIFI_OK;
-}
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_externalVideoEffectFraming(M4OSA_Void *pFunctionContext,
- *                                                  M4VIFI_ImagePlane *PlaneIn,
- *                                                  M4VIFI_ImagePlane *PlaneOut,
- *                                                  M4VSS3GPP_ExternalProgress *pProgress,
- *                                                  M4OSA_UInt32 uiEffectKind)
- *
- * @brief   This function add a fixed or animated image on an input YUV420 planar frame
- * @note
- * @param   pFunctionContext(IN) Contains which color to apply (not very clean ...)
- * @param   PlaneIn         (IN) Input YUV420 planar
- * @param   PlaneOut        (IN/OUT) Output YUV420 planar
- * @param   pProgress       (IN/OUT) Progress indication (0-100)
- * @param   uiEffectKind    (IN) Unused
- *
- * @return  M4VIFI_OK:  No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming(
-            M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn[3],
-            M4VIFI_ImagePlane *PlaneOut, M4VSS3GPP_ExternalProgress *pProgress,
-            M4OSA_UInt32 uiEffectKind ) {
-
-    M4VIFI_UInt32 x,y;
-
-    M4VIFI_UInt8 *p_in_Y = PlaneIn[0].pac_data;
-    M4VIFI_UInt8 *p_in_U = PlaneIn[1].pac_data;
-    M4VIFI_UInt8 *p_in_V = PlaneIn[2].pac_data;
-
-    M4xVSS_FramingStruct* Framing = M4OSA_NULL;
-    M4xVSS_FramingStruct* currentFraming = M4OSA_NULL;
-    M4VIFI_UInt8 *FramingRGB = M4OSA_NULL;
-
-    M4VIFI_UInt8 *p_out0;
-    M4VIFI_UInt8 *p_out1;
-    M4VIFI_UInt8 *p_out2;
-
-    M4VIFI_UInt32 topleft[2];
-
-    M4OSA_UInt8 transparent1 =
-     (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
-    M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
-
-#ifndef DECODE_GIF_ON_SAVING
-    Framing = (M4xVSS_FramingStruct *)userData;
-    currentFraming = (M4xVSS_FramingStruct *)Framing->pCurrent;
-    FramingRGB = Framing->FramingRgb->pac_data;
-#endif /*DECODE_GIF_ON_SAVING*/
-
-#ifdef DECODE_GIF_ON_SAVING
-    M4OSA_ERR err;
-    Framing =
-     (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
-    if(Framing == M4OSA_NULL)
-    {
-        ((M4xVSS_FramingContext*)userData)->clipTime = pProgress->uiOutputTime;
-        err = M4xVSS_internalDecodeGIF(userData);
-        if(M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4VSS3GPP_externalVideoEffectFraming: \
-                Error in M4xVSS_internalDecodeGIF: 0x%x", err);
-            return err;
-        }
-        Framing =
-         (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
-        /* Initializes first GIF time */
-        ((M4xVSS_FramingContext*)userData)->current_gif_time =
-          pProgress->uiOutputTime;
-    }
-    currentFraming = (M4xVSS_FramingStruct *)Framing;
-    FramingRGB = Framing->FramingRgb->pac_data;
-#endif /*DECODE_GIF_ON_SAVING*/
-
-    /**
-     * Initialize input / output plane pointers */
-    p_in_Y += PlaneIn[0].u_topleft;
-    p_in_U += PlaneIn[1].u_topleft;
-    p_in_V += PlaneIn[2].u_topleft;
-
-    p_out0 = PlaneOut[0].pac_data;
-    p_out1 = PlaneOut[1].pac_data;
-    p_out2 = PlaneOut[2].pac_data;
-
-    /**
-     * Depending on time, initialize Framing frame to use */
-    if(Framing->previousClipTime == -1)
-    {
-        Framing->previousClipTime = pProgress->uiOutputTime;
-    }
-
-    /**
-     * If the current clip time has reach the duration of one frame of the framing picture
-     * we need to step to next framing picture */
-#ifdef DECODE_GIF_ON_SAVING
-    if(((M4xVSS_FramingContext*)userData)->b_animated == M4OSA_TRUE)
-    {
-        while((((M4xVSS_FramingContext*)userData)->current_gif_time + currentFraming->duration) < pProgress->uiOutputTime)
-        {
-            ((M4xVSS_FramingContext*)userData)->clipTime =
-             pProgress->uiOutputTime;
-
-            err = M4xVSS_internalDecodeGIF(userData);
-            if(M4NO_ERROR != err)
-            {
-                M4OSA_TRACE1_1("M4VSS3GPP_externalVideoEffectFraming: Error in M4xVSS_internalDecodeGIF: 0x%x", err);
-                return err;
-            }
-            if(currentFraming->duration != 0)
-            {
-                ((M4xVSS_FramingContext*)userData)->current_gif_time += currentFraming->duration;
-            }
-            else
-            {
-                ((M4xVSS_FramingContext*)userData)->current_gif_time +=
-                 pProgress->uiOutputTime - Framing->previousClipTime;
-            }
-            Framing = (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
-            currentFraming = (M4xVSS_FramingStruct *)Framing;
-            FramingRGB = Framing->FramingRgb->pac_data;
-        }
-    }
-#else
-            Framing->pCurrent = currentFraming->pNext;
-            currentFraming = (M4xVSS_FramingStruct*)Framing->pCurrent;
-#endif /*DECODE_GIF_ON_SAVING*/
-
-    Framing->previousClipTime = pProgress->uiOutputTime;
-    FramingRGB = currentFraming->FramingRgb->pac_data;
-    topleft[0] = currentFraming->topleft_x;
-    topleft[1] = currentFraming->topleft_y;
-
-    for( x=0 ;x < PlaneIn[0].u_height ; x++)
-    {
-        for( y=0 ;y < PlaneIn[0].u_width ; y++)
-        {
-            /**
-             * To handle framing with input size != output size
-             * Framing is applyed if coordinates matches between framing/topleft and input plane */
-            if( y < (topleft[0] + currentFraming->FramingYuv[0].u_width)  &&
-                y >= topleft[0] &&
-                x < (topleft[1] + currentFraming->FramingYuv[0].u_height) &&
-                x >= topleft[1])
-            {
-
-                /*Alpha blending support*/
-                M4OSA_Float alphaBlending = 1;
-#ifdef DECODE_GIF_ON_SAVING
-                M4xVSS_internalEffectsAlphaBlending* alphaBlendingStruct =
-                 (M4xVSS_internalEffectsAlphaBlending*)((M4xVSS_FramingContext*)userData)->alphaBlendingStruct;
-#else
-                M4xVSS_internalEffectsAlphaBlending* alphaBlendingStruct =
-                 (M4xVSS_internalEffectsAlphaBlending*)((M4xVSS_FramingStruct*)userData)->alphaBlendingStruct;
-#endif //#ifdef DECODE_GIF_ON_SAVING
-
-                if(alphaBlendingStruct != M4OSA_NULL)
-                {
-                    if(pProgress->uiProgress < (M4OSA_UInt32)(alphaBlendingStruct->m_fadeInTime*10))
-                    {
-                        alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle - alphaBlendingStruct->m_start)*pProgress->uiProgress/(alphaBlendingStruct->m_fadeInTime*10));
-                        alphaBlending += alphaBlendingStruct->m_start;
-                        alphaBlending /= 100;
-                    }
-                    else if(pProgress->uiProgress >= (M4OSA_UInt32)(alphaBlendingStruct->m_fadeInTime*10) && pProgress->uiProgress < 1000 - (M4OSA_UInt32)(alphaBlendingStruct->m_fadeOutTime*10))
-                    {
-                        alphaBlending = (M4OSA_Float)((M4OSA_Float)alphaBlendingStruct->m_middle/100);
-                    }
-                    else if(pProgress->uiProgress >= 1000 - (M4OSA_UInt32)(alphaBlendingStruct->m_fadeOutTime*10))
-                    {
-                        alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle - alphaBlendingStruct->m_end))*(1000 - pProgress->uiProgress)/(alphaBlendingStruct->m_fadeOutTime*10);
-                        alphaBlending += alphaBlendingStruct->m_end;
-                        alphaBlending /= 100;
-                    }
-                }
-
-                /**/
-
-                if((*(FramingRGB)==transparent1) && (*(FramingRGB+1)==transparent2))
-                {
-                    *( p_out0+y+x*PlaneOut[0].u_stride)=(*(p_in_Y+y+x*PlaneIn[0].u_stride));
-                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=(*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride));
-                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=(*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride));
-                }
-                else
-                {
-                    *( p_out0+y+x*PlaneOut[0].u_stride)=(*(currentFraming->FramingYuv[0].pac_data+(y-topleft[0])+(x-topleft[1])*currentFraming->FramingYuv[0].u_stride))*alphaBlending;
-                    *( p_out0+y+x*PlaneOut[0].u_stride)+=(*(p_in_Y+y+x*PlaneIn[0].u_stride))*(1-alphaBlending);
-                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=(*(currentFraming->FramingYuv[1].pac_data+((y-topleft[0])>>1)+((x-topleft[1])>>1)*currentFraming->FramingYuv[1].u_stride))*alphaBlending;
-                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)+=(*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride))*(1-alphaBlending);
-                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=(*(currentFraming->FramingYuv[2].pac_data+((y-topleft[0])>>1)+((x-topleft[1])>>1)*currentFraming->FramingYuv[2].u_stride))*alphaBlending;
-                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)+=(*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride))*(1-alphaBlending);
-                }
-                if( PlaneIn[0].u_width < (topleft[0] + currentFraming->FramingYuv[0].u_width) &&
-                    y == PlaneIn[0].u_width-1)
-                {
-                    FramingRGB = FramingRGB + 2 * (topleft[0] + currentFraming->FramingYuv[0].u_width - PlaneIn[0].u_width + 1);
-                }
-                else
-                {
-                    FramingRGB = FramingRGB + 2;
-                }
-            }
-            /**
-             * Just copy input plane to output plane */
-            else
-            {
-                *( p_out0+y+x*PlaneOut[0].u_stride)=*(p_in_Y+y+x*PlaneIn[0].u_stride);
-                *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride);
-                *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride);
-            }
-        }
-    }
-
-#ifdef DECODE_GIF_ON_SAVING
-    if(pProgress->bIsLast == M4OSA_TRUE
-        && (M4OSA_Bool)((M4xVSS_FramingContext*)userData)->b_IsFileGif == M4OSA_TRUE)
-    {
-        M4xVSS_internalDecodeGIF_Cleaning((M4xVSS_FramingContext*)userData);
-    }
-#endif /*DECODE_GIF_ON_SAVING*/
-    return M4VIFI_OK;
-}
-
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_externalVideoEffectFifties(M4OSA_Void *pFunctionContext,
- *                                                  M4VIFI_ImagePlane *PlaneIn,
- *                                                  M4VIFI_ImagePlane *PlaneOut,
- *                                                  M4VSS3GPP_ExternalProgress *pProgress,
- *                                                  M4OSA_UInt32 uiEffectKind)
- *
- * @brief   This function make a video look as if it was taken in the fifties
- * @note
- * @param   pUserData       (IN) Context
- * @param   pPlaneIn        (IN) Input YUV420 planar
- * @param   pPlaneOut       (IN/OUT) Output YUV420 planar
- * @param   pProgress       (IN/OUT) Progress indication (0-100)
- * @param   uiEffectKind    (IN) Unused
- *
- * @return  M4VIFI_OK:          No error
- * @return  M4ERR_PARAMETER:    pFiftiesData, pPlaneOut or pProgress are NULL (DEBUG only)
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties(
-    M4OSA_Void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
-    M4VIFI_ImagePlane *pPlaneOut, M4VSS3GPP_ExternalProgress *pProgress,
-    M4OSA_UInt32 uiEffectKind )
-{
-    M4VIFI_UInt32 x, y, xShift;
-    M4VIFI_UInt8 *pInY = pPlaneIn[0].pac_data;
-    M4VIFI_UInt8 *pOutY, *pInYbegin;
-    M4VIFI_UInt8 *pInCr,* pOutCr;
-    M4VIFI_Int32 plane_number;
-
-    /* Internal context*/
-    M4xVSS_FiftiesStruct* p_FiftiesData = (M4xVSS_FiftiesStruct *)pUserData;
-
-    /* Initialize input / output plane pointers */
-    pInY += pPlaneIn[0].u_topleft;
-    pOutY = pPlaneOut[0].pac_data;
-    pInYbegin  = pInY;
-
-    /* Initialize the random */
-    if(p_FiftiesData->previousClipTime < 0)
-    {
-        M4OSA_randInit();
-        M4OSA_rand((M4OSA_Int32*)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
-        M4OSA_rand((M4OSA_Int32*)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
-        p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
-    }
-
-    /* Choose random values if we have reached the duration of a partial effect */
-    else if( (pProgress->uiOutputTime - p_FiftiesData->previousClipTime) > p_FiftiesData->fiftiesEffectDuration)
-    {
-        M4OSA_rand((M4OSA_Int32*)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
-        M4OSA_rand((M4OSA_Int32*)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
-        p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
-    }
-
-    /* Put in Sepia the chrominance */
-    for (plane_number = 1; plane_number < 3; plane_number++)
-    {
-        pInCr  = pPlaneIn[plane_number].pac_data  + pPlaneIn[plane_number].u_topleft;
-        pOutCr = pPlaneOut[plane_number].pac_data + pPlaneOut[plane_number].u_topleft;
-
-        for (x = 0; x < pPlaneOut[plane_number].u_height; x++)
-        {
-            if (1 == plane_number)
-                memset((void *)pOutCr, 117,pPlaneIn[plane_number].u_width); /* U value */
-            else
-                memset((void *)pOutCr, 139,pPlaneIn[plane_number].u_width); /* V value */
-
-            pInCr  += pPlaneIn[plane_number].u_stride;
-            pOutCr += pPlaneOut[plane_number].u_stride;
-        }
-    }
-
-    /* Compute the new pixels values */
-    for( x = 0 ; x < pPlaneIn[0].u_height ; x++)
-    {
-        M4VIFI_UInt8 *p_outYtmp, *p_inYtmp;
-
-        /* Compute the xShift (random value) */
-        if (0 == (p_FiftiesData->shiftRandomValue % 5 ))
-            xShift = (x + p_FiftiesData->shiftRandomValue ) % (pPlaneIn[0].u_height - 1);
-        else
-            xShift = (x + (pPlaneIn[0].u_height - p_FiftiesData->shiftRandomValue) ) % (pPlaneIn[0].u_height - 1);
-
-        /* Initialize the pointers */
-        p_outYtmp = pOutY + 1;                                    /* yShift of 1 pixel */
-        p_inYtmp  = pInYbegin + (xShift * pPlaneIn[0].u_stride);  /* Apply the xShift */
-
-        for( y = 0 ; y < pPlaneIn[0].u_width ; y++)
-        {
-            /* Set Y value */
-            if (xShift > (pPlaneIn[0].u_height - 4))
-                *p_outYtmp = 40;        /* Add some horizontal black lines between the two parts of the image */
-            else if ( y == p_FiftiesData->stripeRandomValue)
-                *p_outYtmp = 90;        /* Add a random vertical line for the bulk */
-            else
-                *p_outYtmp = *p_inYtmp;
-
-
-            /* Go to the next pixel */
-            p_outYtmp++;
-            p_inYtmp++;
-
-            /* Restart at the beginning of the line for the last pixel*/
-            if (y == (pPlaneIn[0].u_width - 2))
-                p_outYtmp = pOutY;
-        }
-
-        /* Go to the next line */
-        pOutY += pPlaneOut[0].u_stride;
-    }
-
-    return M4VIFI_OK;
-}
-
-unsigned char M4VFL_modifyLumaWithScale(M4ViComImagePlane *plane_in,
-                                        M4ViComImagePlane *plane_out,
-                                        unsigned long lum_factor,
-                                        void *user_data)
-{
-    unsigned short *p_src, *p_dest, *p_src_line, *p_dest_line;
-    unsigned char *p_csrc, *p_cdest, *p_csrc_line, *p_cdest_line;
-    unsigned long pix_src;
-    unsigned long u_outpx, u_outpx2;
-    unsigned long u_width, u_stride, u_stride_out,u_height, pix;
-    long i, j;
-
-    /* copy or filter chroma */
-    u_width = plane_in[1].u_width;
-    u_height = plane_in[1].u_height;
-    u_stride = plane_in[1].u_stride;
-    u_stride_out = plane_out[1].u_stride;
-    p_cdest_line = (unsigned char *) &plane_out[1].pac_data[plane_out[1].u_topleft];
-    p_csrc_line = (unsigned char *) &plane_in[1].pac_data[plane_in[1].u_topleft];
-
-    if (lum_factor > 256)
-    {
-        p_cdest = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
-        p_csrc = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
-        /* copy chroma */
-        for (j = u_height; j != 0; j--)
-        {
-            for (i = u_width; i != 0; i--)
-            {
-                memcpy((void *)p_cdest_line, (void *)p_csrc_line, u_width);
-                memcpy((void *)p_cdest, (void *)p_csrc, u_width);
-            }
-            p_cdest_line += u_stride_out;
-            p_cdest += u_stride_out;
-            p_csrc_line += u_stride;
-            p_csrc += u_stride;
-        }
-    }
-    else
-    {
-        /* filter chroma */
-        pix = (1024 - lum_factor) << 7;
-        for (j = u_height; j != 0; j--)
-        {
-            p_cdest = p_cdest_line;
-            p_csrc = p_csrc_line;
-            for (i = u_width; i != 0; i--)
-            {
-                *p_cdest++ = ((pix + (*p_csrc++ & 0xFF) * lum_factor) >> LUM_FACTOR_MAX);
-            }
-            p_cdest_line += u_stride_out;
-            p_csrc_line += u_stride;
-        }
-        p_cdest_line = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
-        p_csrc_line = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
-        for (j = u_height; j != 0; j--)
-        {
-            p_cdest = p_cdest_line;
-            p_csrc = p_csrc_line;
-            for (i = u_width; i != 0; i--)
-            {
-                *p_cdest++ = ((pix + (*p_csrc & 0xFF) * lum_factor) >> LUM_FACTOR_MAX);
-            }
-            p_cdest_line += u_stride_out;
-            p_csrc_line += u_stride;
-        }
-    }
-    /* apply luma factor */
-    u_width = plane_in[0].u_width;
-    u_height = plane_in[0].u_height;
-    u_stride = (plane_in[0].u_stride >> 1);
-    u_stride_out = (plane_out[0].u_stride >> 1);
-    p_dest = (unsigned short *) &plane_out[0].pac_data[plane_out[0].u_topleft];
-    p_src = (unsigned short *) &plane_in[0].pac_data[plane_in[0].u_topleft];
-    p_dest_line = p_dest;
-    p_src_line = p_src;
-
-    for (j = u_height; j != 0; j--)
-    {
-        p_dest = p_dest_line;
-        p_src = p_src_line;
-        for (i = (u_width >> 1); i != 0; i--)
-        {
-            pix_src = (unsigned long) *p_src++;
-            pix = pix_src & 0xFF;
-            u_outpx = ((pix * lum_factor) >> LUM_FACTOR_MAX);
-            pix = ((pix_src & 0xFF00) >> 8);
-            u_outpx2 = (((pix * lum_factor) >> LUM_FACTOR_MAX)<< 8) ;
-            *p_dest++ = (unsigned short) (u_outpx2 | u_outpx);
-        }
-        p_dest_line += u_stride_out;
-        p_src_line += u_stride;
-    }
-
-    return 0;
-}
-
-/******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
- * @brief   This function converts an RGB565 plane to YUV420 planar
- * @note    It is used only for framing effect
- *          It allocates output YUV planes
- * @param   framingCtx  (IN) The framing struct containing input RGB565 plane
- *
- * @return  M4NO_ERROR: No error
- * @return  M4ERR_PARAMETER: At least one of the function parameters is null
- * @return  M4ERR_ALLOC: Allocation error (no more memory)
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
-{
-    M4OSA_ERR err;
-
-    /**
-     * Allocate output YUV planes */
-    framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane), M4VS, (M4OSA_Char*)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
-    if(framingCtx->FramingYuv == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
-        return M4ERR_ALLOC;
-    }
-    framingCtx->FramingYuv[0].u_width = framingCtx->FramingRgb->u_width;
-    framingCtx->FramingYuv[0].u_height = framingCtx->FramingRgb->u_height;
-    framingCtx->FramingYuv[0].u_topleft = 0;
-    framingCtx->FramingYuv[0].u_stride = framingCtx->FramingRgb->u_width;
-    framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc((framingCtx->FramingYuv[0].u_width*framingCtx->FramingYuv[0].u_height*3)>>1, M4VS, (M4OSA_Char*)"Alloc for the Convertion output YUV");;
-    if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
-        return M4ERR_ALLOC;
-    }
-    framingCtx->FramingYuv[1].u_width = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[1].u_height = (framingCtx->FramingRgb->u_height)>>1;
-    framingCtx->FramingYuv[1].u_topleft = 0;
-    framingCtx->FramingYuv[1].u_stride = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[1].pac_data = framingCtx->FramingYuv[0].pac_data + framingCtx->FramingYuv[0].u_width * framingCtx->FramingYuv[0].u_height;
-    framingCtx->FramingYuv[2].u_width = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[2].u_height = (framingCtx->FramingRgb->u_height)>>1;
-    framingCtx->FramingYuv[2].u_topleft = 0;
-    framingCtx->FramingYuv[2].u_stride = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[2].pac_data = framingCtx->FramingYuv[1].pac_data + framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height;
-
-    /**
-     * Convert input RGB 565 to YUV 420 to be able to merge it with output video in framing effect */
-    err = M4VIFI_xVSS_RGB565toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalConvertRGBtoYUV: error when converting from RGB to YUV: 0x%x\n", err);
-    }
-
-    framingCtx->duration = 0;
-    framingCtx->previousClipTime = -1;
-    framingCtx->previewOffsetClipTime = -1;
-
-    /**
-     * Only one element in the chained list (no animated image with RGB buffer...) */
-    framingCtx->pCurrent = framingCtx;
-    framingCtx->pNext = framingCtx;
-
-    return M4NO_ERROR;
-}
-
-/******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalConvertRGB888toYUV(M4xVSS_FramingStruct* framingCtx)
- * @brief   This function converts an RGB888 plane to YUV420 planar
- * @note    It is used only for framing effect
- *          It allocates output YUV planes
- * @param   framingCtx  (IN) The framing struct containing input RGB888 plane
- *
- * @return  M4NO_ERROR: No error
- * @return  M4ERR_PARAMETER: At least one of the function parameters is null
- * @return  M4ERR_ALLOC: Allocation error (no more memory)
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_internalConvertRGB888toYUV(M4xVSS_FramingStruct* framingCtx)
-{
-    M4OSA_ERR err;
-
-    /**
-     * Allocate output YUV planes */
-    framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane), M4VS, (M4OSA_Char*)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
-    if(framingCtx->FramingYuv == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
-        return M4ERR_ALLOC;
-    }
-    framingCtx->FramingYuv[0].u_width = framingCtx->FramingRgb->u_width;
-    framingCtx->FramingYuv[0].u_height = framingCtx->FramingRgb->u_height;
-    framingCtx->FramingYuv[0].u_topleft = 0;
-    framingCtx->FramingYuv[0].u_stride = framingCtx->FramingRgb->u_width;
-    framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc((framingCtx->FramingYuv[0].u_width*framingCtx->FramingYuv[0].u_height*3)>>1, M4VS, (M4OSA_Char*)"Alloc for the Convertion output YUV");;
-    if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
-        return M4ERR_ALLOC;
-    }
-    framingCtx->FramingYuv[1].u_width = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[1].u_height = (framingCtx->FramingRgb->u_height)>>1;
-    framingCtx->FramingYuv[1].u_topleft = 0;
-    framingCtx->FramingYuv[1].u_stride = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[1].pac_data = framingCtx->FramingYuv[0].pac_data + framingCtx->FramingYuv[0].u_width * framingCtx->FramingYuv[0].u_height;
-    framingCtx->FramingYuv[2].u_width = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[2].u_height = (framingCtx->FramingRgb->u_height)>>1;
-    framingCtx->FramingYuv[2].u_topleft = 0;
-    framingCtx->FramingYuv[2].u_stride = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[2].pac_data = framingCtx->FramingYuv[1].pac_data + framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height;
-
-    /**
-     * Convert input RGB888 to YUV 420 to be able to merge it with output video in framing effect */
-    err = M4VIFI_RGB888toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalConvertRGBtoYUV: error when converting from RGB to YUV: 0x%x\n", err);
-    }
-
-    framingCtx->duration = 0;
-    framingCtx->previousClipTime = -1;
-    framingCtx->previewOffsetClipTime = -1;
-
-    /**
-     * Only one element in the chained list (no animated image with RGB buffer...) */
-    framingCtx->pCurrent = framingCtx;
-    framingCtx->pNext = framingCtx;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4VIFI_UInt8 M4VIFI_RGB565toYUV420 (void *pUserData,
- *                                   M4VIFI_ImagePlane *pPlaneIn,
- *                                   M4VIFI_ImagePlane *pPlaneOut)
- * @author  Patrice Martinez / Philips Digital Networks - MP4Net
- * @brief   transform RGB565 image to a YUV420 image.
- * @note    Convert RGB565 to YUV420,
- *          Loop on each row ( 2 rows by 2 rows )
- *              Loop on each column ( 2 col by 2 col )
- *                  Get 4 RGB samples from input data and build 4 output Y samples
- *                  and each single U & V data
- *              end loop on col
- *          end loop on row
- * @param   pUserData: (IN) User Specific Data
- * @param   pPlaneIn: (IN) Pointer to RGB565 Plane
- * @param   pPlaneOut: (OUT) Pointer to  YUV420 buffer Plane
- * @return  M4VIFI_OK: there is no error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
- ******************************************************************************
-*/
-M4VIFI_UInt8    M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
-                                                      M4VIFI_ImagePlane *pPlaneOut)
-{
-    M4VIFI_UInt32   u32_width, u32_height;
-    M4VIFI_UInt32   u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
-    M4VIFI_UInt32   u32_stride_rgb, u32_stride_2rgb;
-    M4VIFI_UInt32   u32_col, u32_row;
-
-    M4VIFI_Int32    i32_r00, i32_r01, i32_r10, i32_r11;
-    M4VIFI_Int32    i32_g00, i32_g01, i32_g10, i32_g11;
-    M4VIFI_Int32    i32_b00, i32_b01, i32_b10, i32_b11;
-    M4VIFI_Int32    i32_y00, i32_y01, i32_y10, i32_y11;
-    M4VIFI_Int32    i32_u00, i32_u01, i32_u10, i32_u11;
-    M4VIFI_Int32    i32_v00, i32_v01, i32_v10, i32_v11;
-    M4VIFI_UInt8    *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
-    M4VIFI_UInt8    *pu8_y_data, *pu8_u_data, *pu8_v_data;
-    M4VIFI_UInt8    *pu8_rgbn_data, *pu8_rgbn;
-    M4VIFI_UInt16   u16_pix1, u16_pix2, u16_pix3, u16_pix4;
-    M4VIFI_UInt8 count_null=0;
-
-    /* Check planes height are appropriate */
-    if( (pPlaneIn->u_height != pPlaneOut[0].u_height)           ||
-        (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1))   ||
-        (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
-    {
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-    }
-
-    /* Check planes width are appropriate */
-    if( (pPlaneIn->u_width != pPlaneOut[0].u_width)         ||
-        (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
-        (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
-    {
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-    }
-
-    /* Set the pointer to the beginning of the output data buffers */
-    pu8_y_data = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
-    pu8_u_data = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
-    pu8_v_data = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
-
-    /* Set the pointer to the beginning of the input data buffers */
-    pu8_rgbn_data   = pPlaneIn->pac_data + pPlaneIn->u_topleft;
-
-    /* Get the size of the output image */
-    u32_width = pPlaneOut[0].u_width;
-    u32_height = pPlaneOut[0].u_height;
-
-    /* Set the size of the memory jumps corresponding to row jump in each output plane */
-    u32_stride_Y = pPlaneOut[0].u_stride;
-    u32_stride2_Y = u32_stride_Y << 1;
-    u32_stride_U = pPlaneOut[1].u_stride;
-    u32_stride_V = pPlaneOut[2].u_stride;
-
-    /* Set the size of the memory jumps corresponding to row jump in input plane */
-    u32_stride_rgb = pPlaneIn->u_stride;
-    u32_stride_2rgb = u32_stride_rgb << 1;
-
-
-    /* Loop on each row of the output image, input coordinates are estimated from output ones */
-    /* Two YUV rows are computed at each pass */
-    for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
-    {
-        /* Current Y plane row pointers */
-        pu8_yn = pu8_y_data;
-        /* Next Y plane row pointers */
-        pu8_ys = pu8_yn + u32_stride_Y;
-        /* Current U plane row pointer */
-        pu8_u = pu8_u_data;
-        /* Current V plane row pointer */
-        pu8_v = pu8_v_data;
-
-        pu8_rgbn = pu8_rgbn_data;
-
-        /* Loop on each column of the output image */
-        for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
-        {
-            /* Get four RGB 565 samples from input data */
-            u16_pix1 = *( (M4VIFI_UInt16 *) pu8_rgbn);
-            u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_rgbn + CST_RGB_16_SIZE));
-            u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb));
-            u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb + CST_RGB_16_SIZE));
-
-            /* Unpack RGB565 to 8bit R, G, B */
-            /* (x,y) */
-            GET_RGB565(i32_b00,i32_g00,i32_r00,u16_pix1);
-            /* (x+1,y) */
-            GET_RGB565(i32_b10,i32_g10,i32_r10,u16_pix2);
-            /* (x,y+1) */
-            GET_RGB565(i32_b01,i32_g01,i32_r01,u16_pix3);
-            /* (x+1,y+1) */
-            GET_RGB565(i32_b11,i32_g11,i32_r11,u16_pix4);
-            /* If RGB is transparent color (0, 63, 0), we transform it to white (31,63,31) */
-            if(i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
-            {
-                i32_b00 = 31;
-                i32_r00 = 31;
-            }
-            if(i32_b10 == 0 && i32_g10 == 63 && i32_r10 == 0)
-            {
-                i32_b10 = 31;
-                i32_r10 = 31;
-            }
-            if(i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
-            {
-                i32_b01 = 31;
-                i32_r01 = 31;
-            }
-            if(i32_b11 == 0 && i32_g11 == 63 && i32_r11 == 0)
-            {
-                i32_b11 = 31;
-                i32_r11 = 31;
-            }
-            /* Convert RGB value to YUV */
-            i32_u00 = U16(i32_r00, i32_g00, i32_b00);
-            i32_v00 = V16(i32_r00, i32_g00, i32_b00);
-            /* luminance value */
-            i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
-
-            i32_u10 = U16(i32_r10, i32_g10, i32_b10);
-            i32_v10 = V16(i32_r10, i32_g10, i32_b10);
-            /* luminance value */
-            i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
-
-            i32_u01 = U16(i32_r01, i32_g01, i32_b01);
-            i32_v01 = V16(i32_r01, i32_g01, i32_b01);
-            /* luminance value */
-            i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
-
-            i32_u11 = U16(i32_r11, i32_g11, i32_b11);
-            i32_v11 = V16(i32_r11, i32_g11, i32_b11);
-            /* luminance value */
-            i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
-
-            /* Store luminance data */
-            pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
-            pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
-            pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
-            pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
-            *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
-            *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
-            /* Prepare for next column */
-            pu8_rgbn += (CST_RGB_16_SIZE<<1);
-            /* Update current Y plane line pointer*/
-            pu8_yn += 2;
-            /* Update next Y plane line pointer*/
-            pu8_ys += 2;
-            /* Update U plane line pointer*/
-            pu8_u ++;
-            /* Update V plane line pointer*/
-            pu8_v ++;
-        } /* End of horizontal scanning */
-
-        /* Prepare pointers for the next row */
-        pu8_y_data += u32_stride2_Y;
-        pu8_u_data += u32_stride_U;
-        pu8_v_data += u32_stride_V;
-        pu8_rgbn_data += u32_stride_2rgb;
-
-
-    } /* End of vertical scanning */
-
-    return M4VIFI_OK;
-}
-
-/***************************************************************************
-Proto:
-M4VIFI_UInt8    M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3]);
-Author:     Patrice Martinez / Philips Digital Networks - MP4Net
-Purpose:    filling of the YUV420 plane from a BGR24 plane
-Abstract:   Loop on each row ( 2 rows by 2 rows )
-                Loop on each column ( 2 col by 2 col )
-                    Get 4 BGR samples from input data and build 4 output Y samples and each single U & V data
-                end loop on col
-            end loop on row
-
-In:         RGB24 plane
-InOut:      none
-Out:        array of 3 M4VIFI_ImagePlane structures
-Modified:   ML: RGB function modified to BGR.
-***************************************************************************/
-M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3])
-{
-
-    M4VIFI_UInt32   u32_width, u32_height;
-    M4VIFI_UInt32   u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V, u32_stride_rgb, u32_stride_2rgb;
-    M4VIFI_UInt32   u32_col, u32_row;
-
-    M4VIFI_Int32    i32_r00, i32_r01, i32_r10, i32_r11;
-    M4VIFI_Int32    i32_g00, i32_g01, i32_g10, i32_g11;
-    M4VIFI_Int32    i32_b00, i32_b01, i32_b10, i32_b11;
-    M4VIFI_Int32    i32_y00, i32_y01, i32_y10, i32_y11;
-    M4VIFI_Int32    i32_u00, i32_u01, i32_u10, i32_u11;
-    M4VIFI_Int32    i32_v00, i32_v01, i32_v10, i32_v11;
-    M4VIFI_UInt8    *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
-    M4VIFI_UInt8    *pu8_y_data, *pu8_u_data, *pu8_v_data;
-    M4VIFI_UInt8    *pu8_rgbn_data, *pu8_rgbn;
-
-    /* check sizes */
-    if( (PlaneIn->u_height != PlaneOut[0].u_height)         ||
-        (PlaneOut[0].u_height != (PlaneOut[1].u_height<<1)) ||
-        (PlaneOut[0].u_height != (PlaneOut[2].u_height<<1)))
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-
-    if( (PlaneIn->u_width != PlaneOut[0].u_width)       ||
-        (PlaneOut[0].u_width != (PlaneOut[1].u_width<<1))   ||
-        (PlaneOut[0].u_width != (PlaneOut[2].u_width<<1)))
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-
-
-    /* set the pointer to the beginning of the output data buffers */
-    pu8_y_data  = PlaneOut[0].pac_data + PlaneOut[0].u_topleft;
-    pu8_u_data  = PlaneOut[1].pac_data + PlaneOut[1].u_topleft;
-    pu8_v_data  = PlaneOut[2].pac_data + PlaneOut[2].u_topleft;
-
-    /* idem for input buffer */
-    pu8_rgbn_data   = PlaneIn->pac_data + PlaneIn->u_topleft;
-
-    /* get the size of the output image */
-    u32_width   = PlaneOut[0].u_width;
-    u32_height  = PlaneOut[0].u_height;
-
-    /* set the size of the memory jumps corresponding to row jump in each output plane */
-    u32_stride_Y = PlaneOut[0].u_stride;
-    u32_stride2_Y= u32_stride_Y << 1;
-    u32_stride_U = PlaneOut[1].u_stride;
-    u32_stride_V = PlaneOut[2].u_stride;
-
-    /* idem for input plane */
-    u32_stride_rgb = PlaneIn->u_stride;
-    u32_stride_2rgb = u32_stride_rgb << 1;
-
-    /* loop on each row of the output image, input coordinates are estimated from output ones */
-    /* two YUV rows are computed at each pass */
-    for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
-    {
-        /* update working pointers */
-        pu8_yn  = pu8_y_data;
-        pu8_ys  = pu8_yn + u32_stride_Y;
-
-        pu8_u   = pu8_u_data;
-        pu8_v   = pu8_v_data;
-
-        pu8_rgbn= pu8_rgbn_data;
-
-        /* loop on each column of the output image*/
-        for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
-        {
-            /* get RGB samples of 4 pixels */
-            GET_RGB24(i32_r00, i32_g00, i32_b00, pu8_rgbn, 0);
-            GET_RGB24(i32_r10, i32_g10, i32_b10, pu8_rgbn, CST_RGB_24_SIZE);
-            GET_RGB24(i32_r01, i32_g01, i32_b01, pu8_rgbn, u32_stride_rgb);
-            GET_RGB24(i32_r11, i32_g11, i32_b11, pu8_rgbn, u32_stride_rgb + CST_RGB_24_SIZE);
-
-            i32_u00 = U24(i32_r00, i32_g00, i32_b00);
-            i32_v00 = V24(i32_r00, i32_g00, i32_b00);
-            i32_y00 = Y24(i32_r00, i32_g00, i32_b00);       /* matrix luminance */
-            pu8_yn[0]= (M4VIFI_UInt8)i32_y00;
-
-            i32_u10 = U24(i32_r10, i32_g10, i32_b10);
-            i32_v10 = V24(i32_r10, i32_g10, i32_b10);
-            i32_y10 = Y24(i32_r10, i32_g10, i32_b10);
-            pu8_yn[1]= (M4VIFI_UInt8)i32_y10;
-
-            i32_u01 = U24(i32_r01, i32_g01, i32_b01);
-            i32_v01 = V24(i32_r01, i32_g01, i32_b01);
-            i32_y01 = Y24(i32_r01, i32_g01, i32_b01);
-            pu8_ys[0]= (M4VIFI_UInt8)i32_y01;
-
-            i32_u11 = U24(i32_r11, i32_g11, i32_b11);
-            i32_v11 = V24(i32_r11, i32_g11, i32_b11);
-            i32_y11 = Y24(i32_r11, i32_g11, i32_b11);
-            pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
-
-            *pu8_u  = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
-            *pu8_v  = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
-
-            pu8_rgbn    +=  (CST_RGB_24_SIZE<<1);
-            pu8_yn      += 2;
-            pu8_ys      += 2;
-
-            pu8_u ++;
-            pu8_v ++;
-        } /* end of horizontal scanning */
-
-        pu8_y_data      += u32_stride2_Y;
-        pu8_u_data      += u32_stride_U;
-        pu8_v_data      += u32_stride_V;
-        pu8_rgbn_data   += u32_stride_2rgb;
-
-
-    } /* End of vertical scanning */
-
-    return M4VIFI_OK;
-}
-
-/** YUV420 to YUV420 */
-/**
- *******************************************************************************************
- * M4VIFI_UInt8 M4VIFI_YUV420toYUV420 (void *pUserData,
- *                                     M4VIFI_ImagePlane *pPlaneIn,
- *                                     M4VIFI_ImagePlane *pPlaneOut)
- * @brief   Transform YUV420 image to a YUV420 image.
- * @param   pUserData: (IN) User Specific Data (Unused - could be NULL)
- * @param   pPlaneIn: (IN) Pointer to YUV plane buffer
- * @param   pPlaneOut: (OUT) Pointer to YUV Plane
- * @return  M4VIFI_OK: there is no error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in plane height
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  Error in plane width
- *******************************************************************************************
- */
-
-M4VIFI_UInt8 M4VIFI_YUV420toYUV420(void *user_data, M4VIFI_ImagePlane PlaneIn[3], M4VIFI_ImagePlane *PlaneOut )
-{
-    M4VIFI_Int32 plane_number;
-    M4VIFI_UInt32 i;
-    M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
-
-    for (plane_number = 0; plane_number < 3; plane_number++)
-    {
-        p_buf_src = &(PlaneIn[plane_number].pac_data[PlaneIn[plane_number].u_topleft]);
-        p_buf_dest = &(PlaneOut[plane_number].pac_data[PlaneOut[plane_number].u_topleft]);
-        for (i = 0; i < PlaneOut[plane_number].u_height; i++)
-        {
-            memcpy((void *)p_buf_dest, (void *)p_buf_src ,PlaneOut[plane_number].u_width);
-            p_buf_src += PlaneIn[plane_number].u_stride;
-            p_buf_dest += PlaneOut[plane_number].u_stride;
-        }
-    }
-    return M4VIFI_OK;
-}
-
-/**
- ***********************************************************************************************
- * M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
- *                                                                  M4VIFI_ImagePlane *pPlaneOut)
- * @author  David Dana (PHILIPS Software)
- * @brief   Resizes YUV420 Planar plane.
- * @note    Basic structure of the function
- *          Loop on each row (step 2)
- *              Loop on each column (step 2)
- *                  Get four Y samples and 1 U & V sample
- *                  Resize the Y with corresponing U and V samples
- *                  Place the YUV in the ouput plane
- *              end loop column
- *          end loop row
- *          For resizing bilinear interpolation linearly interpolates along
- *          each row, and then uses that result in a linear interpolation down each column.
- *          Each estimated pixel in the output image is a weighted
- *          combination of its four neighbours. The ratio of compression
- *          or dilatation is estimated using input and output sizes.
- * @param   pUserData: (IN) User Data
- * @param   pPlaneIn: (IN) Pointer to YUV420 (Planar) plane buffer
- * @param   pPlaneOut: (OUT) Pointer to YUV420 (Planar) plane
- * @return  M4VIFI_OK: there is no error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  Error in width
- ***********************************************************************************************
-*/
-M4VIFI_UInt8    M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
-                                                                M4VIFI_ImagePlane *pPlaneIn,
-                                                                M4VIFI_ImagePlane *pPlaneOut)
-{
-    M4VIFI_UInt8    *pu8_data_in, *pu8_data_out, *pu8dum;
-    M4VIFI_UInt32   u32_plane;
-    M4VIFI_UInt32   u32_width_in, u32_width_out, u32_height_in, u32_height_out;
-    M4VIFI_UInt32   u32_stride_in, u32_stride_out;
-    M4VIFI_UInt32   u32_x_inc, u32_y_inc;
-    M4VIFI_UInt32   u32_x_accum, u32_y_accum, u32_x_accum_start;
-    M4VIFI_UInt32   u32_width, u32_height;
-    M4VIFI_UInt32   u32_y_frac;
-    M4VIFI_UInt32   u32_x_frac;
-    M4VIFI_UInt32   u32_temp_value;
-    M4VIFI_UInt8    *pu8_src_top;
-    M4VIFI_UInt8    *pu8_src_bottom;
-
-    M4VIFI_UInt8    u8Wflag = 0;
-    M4VIFI_UInt8    u8Hflag = 0;
-    M4VIFI_UInt32   loop = 0;
-
-
-    /*
-     If input width is equal to output width and input height equal to
-     output height then M4VIFI_YUV420toYUV420 is called.
-    */
-    if ((pPlaneIn[0].u_height == pPlaneOut[0].u_height) &&
-              (pPlaneIn[0].u_width == pPlaneOut[0].u_width))
-    {
-        return M4VIFI_YUV420toYUV420(pUserData, pPlaneIn, pPlaneOut);
-    }
-
-    /* Check for the YUV width and height are even */
-    if ((IS_EVEN(pPlaneIn[0].u_height) == FALSE)    ||
-        (IS_EVEN(pPlaneOut[0].u_height) == FALSE))
-    {
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-    }
-
-    if ((IS_EVEN(pPlaneIn[0].u_width) == FALSE) ||
-        (IS_EVEN(pPlaneOut[0].u_width) == FALSE))
-    {
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-    }
-
-    /* Loop on planes */
-    for(u32_plane = 0;u32_plane < PLANES;u32_plane++)
-    {
-        /* Set the working pointers at the beginning of the input/output data field */
-        pu8_data_in     = pPlaneIn[u32_plane].pac_data + pPlaneIn[u32_plane].u_topleft;
-        pu8_data_out    = pPlaneOut[u32_plane].pac_data + pPlaneOut[u32_plane].u_topleft;
-
-        /* Get the memory jump corresponding to a row jump */
-        u32_stride_in   = pPlaneIn[u32_plane].u_stride;
-        u32_stride_out  = pPlaneOut[u32_plane].u_stride;
-
-        /* Set the bounds of the active image */
-        u32_width_in    = pPlaneIn[u32_plane].u_width;
-        u32_height_in   = pPlaneIn[u32_plane].u_height;
-
-        u32_width_out   = pPlaneOut[u32_plane].u_width;
-        u32_height_out  = pPlaneOut[u32_plane].u_height;
-
-        /*
-        For the case , width_out = width_in , set the flag to avoid
-        accessing one column beyond the input width.In this case the last
-        column is replicated for processing
-        */
-        if (u32_width_out == u32_width_in) {
-            u32_width_out = u32_width_out-1;
-            u8Wflag = 1;
-        }
-
-        /* Compute horizontal ratio between src and destination width.*/
-        if (u32_width_out >= u32_width_in)
-        {
-            u32_x_inc   = ((u32_width_in-1) * MAX_SHORT) / (u32_width_out-1);
-        }
-        else
-        {
-            u32_x_inc   = (u32_width_in * MAX_SHORT) / (u32_width_out);
-        }
-
-        /*
-        For the case , height_out = height_in , set the flag to avoid
-        accessing one row beyond the input height.In this case the last
-        row is replicated for processing
-        */
-        if (u32_height_out == u32_height_in) {
-            u32_height_out = u32_height_out-1;
-            u8Hflag = 1;
-        }
-
-        /* Compute vertical ratio between src and destination height.*/
-        if (u32_height_out >= u32_height_in)
-        {
-            u32_y_inc   = ((u32_height_in - 1) * MAX_SHORT) / (u32_height_out-1);
-        }
-        else
-        {
-            u32_y_inc = (u32_height_in * MAX_SHORT) / (u32_height_out);
-        }
-
-        /*
-        Calculate initial accumulator value : u32_y_accum_start.
-        u32_y_accum_start is coded on 15 bits, and represents a value
-        between 0 and 0.5
-        */
-        if (u32_y_inc >= MAX_SHORT)
-        {
-        /*
-        Keep the fractionnal part, assimung that integer  part is coded
-        on the 16 high bits and the fractional on the 15 low bits
-        */
-            u32_y_accum = u32_y_inc & 0xffff;
-
-            if (!u32_y_accum)
-            {
-                u32_y_accum = MAX_SHORT;
-            }
-
-            u32_y_accum >>= 1;
-        }
-        else
-        {
-            u32_y_accum = 0;
-        }
-
-
-        /*
-        Calculate initial accumulator value : u32_x_accum_start.
-        u32_x_accum_start is coded on 15 bits, and represents a value
-        between 0 and 0.5
-        */
-        if (u32_x_inc >= MAX_SHORT)
-        {
-            u32_x_accum_start = u32_x_inc & 0xffff;
-
-            if (!u32_x_accum_start)
-            {
-                u32_x_accum_start = MAX_SHORT;
-            }
-
-            u32_x_accum_start >>= 1;
-        }
-        else
-        {
-            u32_x_accum_start = 0;
-        }
-
-        u32_height = u32_height_out;
-
-        /*
-        Bilinear interpolation linearly interpolates along each row, and
-        then uses that result in a linear interpolation donw each column.
-        Each estimated pixel in the output image is a weighted combination
-        of its four neighbours according to the formula:
-        F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+
-        f(p+&,q+1)R(1-a)R(b-1) with  R(x) = / x+1  -1 =< x =< 0 \ 1-x
-        0 =< x =< 1 and a (resp. b)weighting coefficient is the distance
-        from the nearest neighbor in the p (resp. q) direction
-        */
-
-        do { /* Scan all the row */
-
-            /* Vertical weight factor */
-            u32_y_frac = (u32_y_accum>>12)&15;
-
-            /* Reinit accumulator */
-            u32_x_accum = u32_x_accum_start;
-
-            u32_width = u32_width_out;
-
-            do { /* Scan along each row */
-                pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
-                pu8_src_bottom = pu8_src_top + u32_stride_in;
-                u32_x_frac = (u32_x_accum >> 12)&15; /* Horizontal weight factor */
-
-                /* Weighted combination */
-                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
-                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
-                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
-                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
-
-                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                /* Update horizontal accumulator */
-                u32_x_accum += u32_x_inc;
-            } while(--u32_width);
-
-            /*
-               This u8Wflag flag gets in to effect if input and output
-               width is same, and height may be different. So previous
-               pixel is replicated here
-            */
-            if (u8Wflag) {
-                *pu8_data_out = (M4VIFI_UInt8)u32_temp_value;
-            }
-
-            pu8dum = (pu8_data_out-u32_width_out);
-            pu8_data_out = pu8_data_out + u32_stride_out - u32_width_out;
-
-            /* Update vertical accumulator */
-            u32_y_accum += u32_y_inc;
-            if (u32_y_accum>>16) {
-                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * u32_stride_in;
-                u32_y_accum &= 0xffff;
-            }
-        } while(--u32_height);
-
-        /*
-        This u8Hflag flag gets in to effect if input and output height
-        is same, and width may be different. So previous pixel row is
-        replicated here
-        */
-        if (u8Hflag) {
-            for(loop =0; loop < (u32_width_out+u8Wflag); loop++) {
-                *pu8_data_out++ = (M4VIFI_UInt8)*pu8dum++;
-            }
-        }
-    }
-
-    return M4VIFI_OK;
-}
-
-M4OSA_ERR applyRenderingMode(M4VIFI_ImagePlane* pPlaneIn, M4VIFI_ImagePlane* pPlaneOut, M4xVSS_MediaRendering mediaRendering)
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    if(mediaRendering == M4xVSS_kResizing)
-    {
-        /**
-         * Call the resize filter. From the intermediate frame to the encoder image plane */
-        err = M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL, pPlaneIn, pPlaneOut);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("applyRenderingMode: M4ViFilResizeBilinearYUV420toYUV420 returns 0x%x!", err);
-            return err;
-        }
-    }
-    else
-    {
-        M4AIR_Params Params;
-        M4OSA_Context m_air_context;
-        M4VIFI_ImagePlane pImagePlanesTemp[3];
-        M4VIFI_ImagePlane* pPlaneTemp;
-        M4OSA_UInt8* pOutPlaneY = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
-        M4OSA_UInt8* pOutPlaneU = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
-        M4OSA_UInt8* pOutPlaneV = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
-        M4OSA_UInt8* pInPlaneY = NULL;
-        M4OSA_UInt8* pInPlaneU = NULL;
-        M4OSA_UInt8* pInPlaneV = NULL;
-        M4OSA_UInt32 i;
-
-        /*to keep media aspect ratio*/
-        /*Initialize AIR Params*/
-        Params.m_inputCoord.m_x = 0;
-        Params.m_inputCoord.m_y = 0;
-        Params.m_inputSize.m_height = pPlaneIn->u_height;
-        Params.m_inputSize.m_width = pPlaneIn->u_width;
-        Params.m_outputSize.m_width = pPlaneOut->u_width;
-        Params.m_outputSize.m_height = pPlaneOut->u_height;
-        Params.m_bOutputStripe = M4OSA_FALSE;
-        Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
-
-        /**
-        Media rendering: Black borders*/
-        if(mediaRendering == M4xVSS_kBlackBorders)
-        {
-            memset((void *)pPlaneOut[0].pac_data,Y_PLANE_BORDER_VALUE,(pPlaneOut[0].u_height*pPlaneOut[0].u_stride));
-            memset((void *)pPlaneOut[1].pac_data,U_PLANE_BORDER_VALUE,(pPlaneOut[1].u_height*pPlaneOut[1].u_stride));
-            memset((void *)pPlaneOut[2].pac_data,V_PLANE_BORDER_VALUE,(pPlaneOut[2].u_height*pPlaneOut[2].u_stride));
-
-            pImagePlanesTemp[0].u_width = pPlaneOut[0].u_width;
-            pImagePlanesTemp[0].u_height = pPlaneOut[0].u_height;
-            pImagePlanesTemp[0].u_stride = pPlaneOut[0].u_width;
-            pImagePlanesTemp[0].u_topleft = 0;
-            pImagePlanesTemp[0].pac_data = M4OSA_NULL;
-
-            pImagePlanesTemp[1].u_width = pPlaneOut[1].u_width;
-            pImagePlanesTemp[1].u_height = pPlaneOut[1].u_height;
-            pImagePlanesTemp[1].u_stride = pPlaneOut[1].u_width;
-            pImagePlanesTemp[1].u_topleft = 0;
-            pImagePlanesTemp[1].pac_data = M4OSA_NULL;
-
-            pImagePlanesTemp[2].u_width = pPlaneOut[2].u_width;
-            pImagePlanesTemp[2].u_height = pPlaneOut[2].u_height;
-            pImagePlanesTemp[2].u_stride = pPlaneOut[2].u_width;
-            pImagePlanesTemp[2].u_topleft = 0;
-            pImagePlanesTemp[2].pac_data = M4OSA_NULL;
-
-            /* Allocates plan in local image plane structure */
-            pImagePlanesTemp[0].pac_data = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[0].u_width * pImagePlanesTemp[0].u_height, M4VS, (M4OSA_Char*)"applyRenderingMode: temporary plane bufferY") ;
-            if(pImagePlanesTemp[0].pac_data == M4OSA_NULL)
-            {
-                M4OSA_TRACE1_0("Error alloc in applyRenderingMode");
-                return M4ERR_ALLOC;
-            }
-            pImagePlanesTemp[1].pac_data = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[1].u_width * pImagePlanesTemp[1].u_height, M4VS, (M4OSA_Char*)"applyRenderingMode: temporary plane bufferU") ;
-            if(pImagePlanesTemp[1].pac_data == M4OSA_NULL)
-            {
-
-                M4OSA_TRACE1_0("Error alloc in applyRenderingMode");
-                return M4ERR_ALLOC;
-            }
-            pImagePlanesTemp[2].pac_data = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[2].u_width * pImagePlanesTemp[2].u_height, M4VS, (M4OSA_Char*)"applyRenderingMode: temporary plane bufferV") ;
-            if(pImagePlanesTemp[2].pac_data == M4OSA_NULL)
-            {
-
-                M4OSA_TRACE1_0("Error alloc in applyRenderingMode");
-                return M4ERR_ALLOC;
-            }
-
-            pInPlaneY = pImagePlanesTemp[0].pac_data ;
-            pInPlaneU = pImagePlanesTemp[1].pac_data ;
-            pInPlaneV = pImagePlanesTemp[2].pac_data ;
-
-            memset((void *)pImagePlanesTemp[0].pac_data,Y_PLANE_BORDER_VALUE,(pImagePlanesTemp[0].u_height*pImagePlanesTemp[0].u_stride));
-            memset((void *)pImagePlanesTemp[1].pac_data,U_PLANE_BORDER_VALUE,(pImagePlanesTemp[1].u_height*pImagePlanesTemp[1].u_stride));
-            memset((void *)pImagePlanesTemp[2].pac_data,V_PLANE_BORDER_VALUE,(pImagePlanesTemp[2].u_height*pImagePlanesTemp[2].u_stride));
-
-            if((M4OSA_UInt32)((pPlaneIn->u_height * pPlaneOut->u_width) /pPlaneIn->u_width) <= pPlaneOut->u_height)//Params.m_inputSize.m_height < Params.m_inputSize.m_width)
-            {
-                /*it is height so black borders will be on the top and on the bottom side*/
-                Params.m_outputSize.m_width = pPlaneOut->u_width;
-                Params.m_outputSize.m_height = (M4OSA_UInt32)((pPlaneIn->u_height * pPlaneOut->u_width) /pPlaneIn->u_width);
-                /*number of lines at the top*/
-                pImagePlanesTemp[0].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_height-Params.m_outputSize.m_height)>>1))*pImagePlanesTemp[0].u_stride;
-                pImagePlanesTemp[0].u_height = Params.m_outputSize.m_height;
-                pImagePlanesTemp[1].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_height-(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanesTemp[1].u_stride;
-                pImagePlanesTemp[1].u_height = Params.m_outputSize.m_height>>1;
-                pImagePlanesTemp[2].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_height-(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanesTemp[2].u_stride;
-                pImagePlanesTemp[2].u_height = Params.m_outputSize.m_height>>1;
-            }
-            else
-            {
-                /*it is width so black borders will be on the left and right side*/
-                Params.m_outputSize.m_height = pPlaneOut->u_height;
-                Params.m_outputSize.m_width = (M4OSA_UInt32)((pPlaneIn->u_width * pPlaneOut->u_height) /pPlaneIn->u_height);
-
-                pImagePlanesTemp[0].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_width-Params.m_outputSize.m_width)>>1));
-                pImagePlanesTemp[0].u_width = Params.m_outputSize.m_width;
-                pImagePlanesTemp[1].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_width-(Params.m_outputSize.m_width>>1)))>>1);
-                pImagePlanesTemp[1].u_width = Params.m_outputSize.m_width>>1;
-                pImagePlanesTemp[2].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_width-(Params.m_outputSize.m_width>>1)))>>1);
-                pImagePlanesTemp[2].u_width = Params.m_outputSize.m_width>>1;
-            }
-
-            /*Width and height have to be even*/
-            Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
-            Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
-            Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
-            Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
-            pImagePlanesTemp[0].u_width = (pImagePlanesTemp[0].u_width>>1)<<1;
-            pImagePlanesTemp[1].u_width = (pImagePlanesTemp[1].u_width>>1)<<1;
-            pImagePlanesTemp[2].u_width = (pImagePlanesTemp[2].u_width>>1)<<1;
-            pImagePlanesTemp[0].u_height = (pImagePlanesTemp[0].u_height>>1)<<1;
-            pImagePlanesTemp[1].u_height = (pImagePlanesTemp[1].u_height>>1)<<1;
-            pImagePlanesTemp[2].u_height = (pImagePlanesTemp[2].u_height>>1)<<1;
-
-            /*Check that values are coherent*/
-            if(Params.m_inputSize.m_height == Params.m_outputSize.m_height)
-            {
-                Params.m_inputSize.m_width = Params.m_outputSize.m_width;
-            }
-            else if(Params.m_inputSize.m_width == Params.m_outputSize.m_width)
-            {
-                Params.m_inputSize.m_height = Params.m_outputSize.m_height;
-            }
-            pPlaneTemp = pImagePlanesTemp;
-
-
-        }
-
-        /**
-        Media rendering: Cropping*/
-        if(mediaRendering == M4xVSS_kCropping)
-        {
-            Params.m_outputSize.m_height = pPlaneOut->u_height;
-            Params.m_outputSize.m_width = pPlaneOut->u_width;
-            if((Params.m_outputSize.m_height * Params.m_inputSize.m_width) /Params.m_outputSize.m_width<Params.m_inputSize.m_height)
-            {
-                /*height will be cropped*/
-                Params.m_inputSize.m_height = (M4OSA_UInt32)((Params.m_outputSize.m_height * Params.m_inputSize.m_width) /Params.m_outputSize.m_width);
-                Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
-                Params.m_inputCoord.m_y = (M4OSA_Int32)((M4OSA_Int32)((pPlaneIn->u_height - Params.m_inputSize.m_height))>>1);
-            }
-            else
-            {
-                /*width will be cropped*/
-                Params.m_inputSize.m_width = (M4OSA_UInt32)((Params.m_outputSize.m_width * Params.m_inputSize.m_height) /Params.m_outputSize.m_height);
-                Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
-                Params.m_inputCoord.m_x = (M4OSA_Int32)((M4OSA_Int32)((pPlaneIn->u_width - Params.m_inputSize.m_width))>>1);
-            }
-            pPlaneTemp = pPlaneOut;
-        }
-
-        /**
-         * Call AIR functions */
-        err = M4AIR_create(&m_air_context, M4AIR_kYUV420P);
-        if(err != M4NO_ERROR)
-        {
-
-            M4OSA_TRACE1_1("applyRenderingMode: Error when initializing AIR: 0x%x", err);
-            for(i=0; i<3; i++)
-            {
-                if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
-                {
-                    free(pImagePlanesTemp[i].pac_data);
-                    pImagePlanesTemp[i].pac_data = M4OSA_NULL;
-                }
-            }
-            return err;
-        }
-
-
-        err = M4AIR_configure(m_air_context, &Params);
-        if(err != M4NO_ERROR)
-        {
-
-            M4OSA_TRACE1_1("applyRenderingMode: Error when configuring AIR: 0x%x", err);
-            M4AIR_cleanUp(m_air_context);
-            for(i=0; i<3; i++)
-            {
-                if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
-                {
-                    free(pImagePlanesTemp[i].pac_data);
-                    pImagePlanesTemp[i].pac_data = M4OSA_NULL;
-                }
-            }
-            return err;
-        }
-
-        err = M4AIR_get(m_air_context, pPlaneIn, pPlaneTemp);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("applyRenderingMode: Error when getting AIR plane: 0x%x", err);
-            M4AIR_cleanUp(m_air_context);
-            for(i=0; i<3; i++)
-            {
-                if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
-                {
-                    free(pImagePlanesTemp[i].pac_data);
-                    pImagePlanesTemp[i].pac_data = M4OSA_NULL;
-                }
-            }
-            return err;
-        }
-
-        if(mediaRendering == M4xVSS_kBlackBorders)
-        {
-            for(i=0; i<pPlaneOut[0].u_height; i++)
-            {
-                memcpy((void *)pOutPlaneY, (void *)pInPlaneY, pPlaneOut[0].u_width);
-                pInPlaneY += pPlaneOut[0].u_width;
-                pOutPlaneY += pPlaneOut[0].u_stride;
-            }
-            for(i=0; i<pPlaneOut[1].u_height; i++)
-            {
-                memcpy((void *)pOutPlaneU, (void *)pInPlaneU, pPlaneOut[1].u_width);
-                pInPlaneU += pPlaneOut[1].u_width;
-                pOutPlaneU += pPlaneOut[1].u_stride;
-            }
-            for(i=0; i<pPlaneOut[2].u_height; i++)
-            {
-                memcpy((void *)pOutPlaneV, (void *)pInPlaneV, pPlaneOut[2].u_width);
-                pInPlaneV += pPlaneOut[2].u_width;
-                pOutPlaneV += pPlaneOut[2].u_stride;
-            }
-
-            for(i=0; i<3; i++)
-            {
-                if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
-                {
-                    free(pImagePlanesTemp[i].pac_data);
-                    pImagePlanesTemp[i].pac_data = M4OSA_NULL;
-                }
-            }
-        }
-
-        if (m_air_context != M4OSA_NULL) {
-            M4AIR_cleanUp(m_air_context);
-            m_air_context = M4OSA_NULL;
-        }
-    }
-
-    return err;
-}
-
-//TODO: remove this code after link with videoartist lib
-/* M4AIR code*/
-#define M4AIR_YUV420_FORMAT_SUPPORTED
-#define M4AIR_YUV420A_FORMAT_SUPPORTED
-
-/************************* COMPILATION CHECKS ***************************/
-#ifndef M4AIR_YUV420_FORMAT_SUPPORTED
-#ifndef M4AIR_BGR565_FORMAT_SUPPORTED
-#ifndef M4AIR_RGB565_FORMAT_SUPPORTED
-#ifndef M4AIR_BGR888_FORMAT_SUPPORTED
-#ifndef M4AIR_RGB888_FORMAT_SUPPORTED
-#ifndef M4AIR_JPG_FORMAT_SUPPORTED
-
-#error "Please define at least one input format for the AIR component"
-
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
-
-/************************ M4AIR INTERNAL TYPES DEFINITIONS ***********************/
-
-/**
- ******************************************************************************
- * enum         M4AIR_States
- * @brief       The following enumeration defines the internal states of the AIR.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4AIR_kCreated,         /**< State after M4AIR_create has been called */
-    M4AIR_kConfigured           /**< State after M4AIR_configure has been called */
-}M4AIR_States;
-
-
-/**
- ******************************************************************************
- * struct       M4AIR_InternalContext
- * @brief       The following structure is the internal context of the AIR.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4AIR_States                m_state;            /**< Internal state */
-    M4AIR_InputFormatType   m_inputFormat;      /**< Input format like YUV420Planar, RGB565, JPG, etc ... */
-    M4AIR_Params            m_params;           /**< Current input Parameter of  the processing */
-    M4OSA_UInt32            u32_x_inc[4];       /**< ratio between input and ouput width for YUV */
-    M4OSA_UInt32            u32_y_inc[4];       /**< ratio between input and ouput height for YUV */
-    M4OSA_UInt32            u32_x_accum_start[4];   /**< horizontal initial accumulator value */
-    M4OSA_UInt32            u32_y_accum_start[4];   /**< Vertical initial accumulator value */
-    M4OSA_UInt32            u32_x_accum[4];     /**< save of horizontal accumulator value */
-    M4OSA_UInt32            u32_y_accum[4];     /**< save of vertical accumulator value */
-    M4OSA_UInt8*            pu8_data_in[4];         /**< Save of input plane pointers in case of stripe mode */
-    M4OSA_UInt32            m_procRows;         /**< Number of processed rows, used in stripe mode only */
-    M4OSA_Bool              m_bOnlyCopy;            /**< Flag to know if we just perform a copy or a bilinear interpolation */
-    M4OSA_Bool              m_bFlipX;               /**< Depend on output orientation, used during processing to revert processing order in X coordinates */
-    M4OSA_Bool              m_bFlipY;               /**< Depend on output orientation, used during processing to revert processing order in Y coordinates */
-    M4OSA_Bool              m_bRevertXY;            /**< Depend on output orientation, used during processing to revert X and Y processing order (+-90° rotation) */
-}M4AIR_InternalContext;
-
-/********************************* MACROS *******************************/
-#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) if ((pointer) == M4OSA_NULL) return ((M4OSA_ERR)(retval));
-
-
-/********************** M4AIR PUBLIC API IMPLEMENTATION ********************/
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
- * @author  Arnaud Collard
- * @brief       This function initialize an instance of the AIR.
- * @param   pContext:   (IN/OUT) Address of the context to create
- * @param   inputFormat:    (IN) input format type.
- * @return  M4NO_ERROR: there is no error
- * @return  M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only). Invalid formatType
- * @return  M4ERR_ALLOC: No more memory is available
- ******************************************************************************
-*/
-M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
-{
-    M4OSA_ERR err = M4NO_ERROR ;
-    M4AIR_InternalContext* pC = M4OSA_NULL ;
-    /* Check that the address on the context is not NULL */
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
-
-    *pContext = M4OSA_NULL ;
-
-    /* Internal Context creation */
-    pC = (M4AIR_InternalContext*)M4OSA_32bitAlignedMalloc(sizeof(M4AIR_InternalContext), M4AIR, (M4OSA_Char*)"AIR internal context") ;
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_ALLOC, pC) ;
-
-
-    /* Check if the input format is supported */
-    switch(inputFormat)
-    {
-#ifdef M4AIR_YUV420_FORMAT_SUPPORTED
-        case M4AIR_kYUV420P:
-        break ;
-#endif
-#ifdef M4AIR_YUV420A_FORMAT_SUPPORTED
-        case M4AIR_kYUV420AP:
-        break ;
-#endif
-        default:
-            err = M4ERR_AIR_FORMAT_NOT_SUPPORTED;
-            goto M4AIR_create_cleanup ;
-    }
-
-    /**< Save input format and update state */
-    pC->m_inputFormat = inputFormat;
-    pC->m_state = M4AIR_kCreated;
-
-    /* Return the context to the caller */
-    *pContext = pC ;
-
-    return M4NO_ERROR ;
-
-M4AIR_create_cleanup:
-    /* Error management : we destroy the context if needed */
-    if(M4OSA_NULL != pC)
-    {
-        free(pC) ;
-    }
-
-    *pContext = M4OSA_NULL ;
-
-    return err ;
-}
-
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
- * @author  Arnaud Collard
- * @brief       This function destroys an instance of the AIR component
- * @param   pContext:   (IN) Context identifying the instance to destroy
- * @return  M4NO_ERROR: there is no error
- * @return  M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
- * @return  M4ERR_STATE: Internal state is incompatible with this function call.
-******************************************************************************
-*/
-M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
-{
-    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
-
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
-
-    /**< Check state */
-    if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
-    {
-        return M4ERR_STATE;
-    }
-    free(pC) ;
-
-    return M4NO_ERROR ;
-
-}
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
- * @brief       This function will configure the AIR.
- * @note    It will set the input and output coordinates and sizes,
- *          and indicates if we will proceed in stripe or not.
- *          In case a M4AIR_get in stripe mode was on going, it will cancel this previous processing
- *          and reset the get process.
- * @param   pContext:               (IN) Context identifying the instance
- * @param   pParams->m_bOutputStripe:(IN) Stripe mode.
- * @param   pParams->m_inputCoord:  (IN) X,Y coordinates of the first valid pixel in input.
- * @param   pParams->m_inputSize:   (IN) input ROI size.
- * @param   pParams->m_outputSize:  (IN) output size.
- * @return  M4NO_ERROR: there is no error
- * @return  M4ERR_ALLOC: No more memory space to add a new effect.
- * @return  M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
- * @return  M4ERR_AIR_FORMAT_NOT_SUPPORTED: the requested input format is not supported.
- ******************************************************************************
-*/
-M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
-{
-    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
-    M4OSA_UInt32    i,u32_width_in, u32_width_out, u32_height_in, u32_height_out;
-    M4OSA_UInt32    nb_planes;
-
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
-
-    if(M4AIR_kYUV420AP == pC->m_inputFormat)
-    {
-        nb_planes = 4;
-    }
-    else
-    {
-        nb_planes = 3;
-    }
-
-    /**< Check state */
-    if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
-    {
-        return M4ERR_STATE;
-    }
-
-    /** Save parameters */
-    pC->m_params = *pParams;
-
-    /* Check for the input&output width and height are even */
-        if( ((pC->m_params.m_inputSize.m_height)&0x1)    ||
-         ((pC->m_params.m_inputSize.m_height)&0x1))
-        {
-         return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
-        }
-
-    if( ((pC->m_params.m_inputSize.m_width)&0x1)    ||
-         ((pC->m_params.m_inputSize.m_width)&0x1))
-        {
-            return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
-        }
-    if(((pC->m_params.m_inputSize.m_width) == (pC->m_params.m_outputSize.m_width))
-        &&((pC->m_params.m_inputSize.m_height) == (pC->m_params.m_outputSize.m_height)))
-    {
-        /**< No resize in this case, we will just copy input in output */
-        pC->m_bOnlyCopy = M4OSA_TRUE;
-    }
-    else
-    {
-        pC->m_bOnlyCopy = M4OSA_FALSE;
-
-        /**< Initialize internal variables used for resize filter */
-        for(i=0;i<nb_planes;i++)
-        {
-
-            u32_width_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_width:(pC->m_params.m_inputSize.m_width+1)>>1;
-            u32_height_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_height:(pC->m_params.m_inputSize.m_height+1)>>1;
-            u32_width_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_width:(pC->m_params.m_outputSize.m_width+1)>>1;
-            u32_height_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_height:(pC->m_params.m_outputSize.m_height+1)>>1;
-
-                /* Compute horizontal ratio between src and destination width.*/
-                if (u32_width_out >= u32_width_in)
-                {
-                    pC->u32_x_inc[i]   = ((u32_width_in-1) * 0x10000) / (u32_width_out-1);
-                }
-                else
-                {
-                    pC->u32_x_inc[i]   = (u32_width_in * 0x10000) / (u32_width_out);
-                }
-
-                /* Compute vertical ratio between src and destination height.*/
-                if (u32_height_out >= u32_height_in)
-                {
-                    pC->u32_y_inc[i]   = ((u32_height_in - 1) * 0x10000) / (u32_height_out-1);
-                }
-                else
-                {
-                    pC->u32_y_inc[i] = (u32_height_in * 0x10000) / (u32_height_out);
-                }
-
-                /*
-                Calculate initial accumulator value : u32_y_accum_start.
-                u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
-                */
-                if (pC->u32_y_inc[i] >= 0x10000)
-                {
-                    /*
-                        Keep the fractionnal part, assimung that integer  part is coded
-                        on the 16 high bits and the fractionnal on the 15 low bits
-                    */
-                    pC->u32_y_accum_start[i] = pC->u32_y_inc[i] & 0xffff;
-
-                    if (!pC->u32_y_accum_start[i])
-                    {
-                        pC->u32_y_accum_start[i] = 0x10000;
-                    }
-
-                    pC->u32_y_accum_start[i] >>= 1;
-                }
-                else
-                {
-                    pC->u32_y_accum_start[i] = 0;
-                }
-                /**< Take into account that Y coordinate can be odd
-                    in this case we have to put a 0.5 offset
-                    for U and V plane as there a 2 times sub-sampled vs Y*/
-                if((pC->m_params.m_inputCoord.m_y&0x1)&&((i==1)||(i==2)))
-                {
-                    pC->u32_y_accum_start[i] += 0x8000;
-                }
-
-                /*
-                    Calculate initial accumulator value : u32_x_accum_start.
-                    u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
-                */
-
-                if (pC->u32_x_inc[i] >= 0x10000)
-                {
-                    pC->u32_x_accum_start[i] = pC->u32_x_inc[i] & 0xffff;
-
-                    if (!pC->u32_x_accum_start[i])
-                    {
-                        pC->u32_x_accum_start[i] = 0x10000;
-                    }
-
-                    pC->u32_x_accum_start[i] >>= 1;
-                }
-                else
-                {
-                    pC->u32_x_accum_start[i] = 0;
-                }
-                /**< Take into account that X coordinate can be odd
-                    in this case we have to put a 0.5 offset
-                    for U and V plane as there a 2 times sub-sampled vs Y*/
-                if((pC->m_params.m_inputCoord.m_x&0x1)&&((i==1)||(i==2)))
-                {
-                    pC->u32_x_accum_start[i] += 0x8000;
-                }
-        }
-    }
-
-    /**< Reset variable used for stripe mode */
-    pC->m_procRows = 0;
-
-    /**< Initialize var for X/Y processing order according to orientation */
-    pC->m_bFlipX = M4OSA_FALSE;
-    pC->m_bFlipY = M4OSA_FALSE;
-    pC->m_bRevertXY = M4OSA_FALSE;
-    switch(pParams->m_outputOrientation)
-    {
-        case M4COMMON_kOrientationTopLeft:
-            break;
-        case M4COMMON_kOrientationTopRight:
-            pC->m_bFlipX = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationBottomRight:
-            pC->m_bFlipX = M4OSA_TRUE;
-            pC->m_bFlipY = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationBottomLeft:
-            pC->m_bFlipY = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationLeftTop:
-            pC->m_bRevertXY = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationRightTop:
-            pC->m_bRevertXY = M4OSA_TRUE;
-            pC->m_bFlipY = M4OSA_TRUE;
-        break;
-        case M4COMMON_kOrientationRightBottom:
-            pC->m_bRevertXY = M4OSA_TRUE;
-            pC->m_bFlipX = M4OSA_TRUE;
-            pC->m_bFlipY = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationLeftBottom:
-            pC->m_bRevertXY = M4OSA_TRUE;
-            pC->m_bFlipX = M4OSA_TRUE;
-            break;
-        default:
-        return M4ERR_PARAMETER;
-    }
-    /**< Update state */
-    pC->m_state = M4AIR_kConfigured;
-
-    return M4NO_ERROR ;
-}
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
- * @brief   This function will provide the requested resized area of interest according to settings
- *          provided in M4AIR_configure.
- * @note    In case the input format type is JPEG, input plane(s)
- *          in pIn is not used. In normal mode, dimension specified in output plane(s) structure must be the
- *          same than the one specified in M4AIR_configure. In stripe mode, only the width will be the same,
- *          height will be taken as the stripe height (typically 16).
- *          In normal mode, this function is call once to get the full output picture. In stripe mode, it is called
- *          for each stripe till the whole picture has been retrieved,and  the position of the output stripe in the output picture
- *          is internally incremented at each step.
- *          Any call to M4AIR_configure during stripe process will reset this one to the beginning of the output picture.
- * @param   pContext:   (IN) Context identifying the instance
- * @param   pIn:            (IN) Plane structure containing input Plane(s).
- * @param   pOut:       (IN/OUT)  Plane structure containing output Plane(s).
- * @return  M4NO_ERROR: there is no error
- * @return  M4ERR_ALLOC: No more memory space to add a new effect.
- * @return  M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
- ******************************************************************************
-*/
-M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
-{
-    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
-    M4OSA_UInt32 i,j,k,u32_x_frac,u32_y_frac,u32_x_accum,u32_y_accum,u32_shift;
-        M4OSA_UInt8    *pu8_data_in, *pu8_data_in_org, *pu8_data_in_tmp, *pu8_data_out;
-        M4OSA_UInt8    *pu8_src_top;
-        M4OSA_UInt8    *pu8_src_bottom;
-    M4OSA_UInt32    u32_temp_value;
-    M4OSA_Int32 i32_tmp_offset;
-    M4OSA_UInt32    nb_planes;
-
-
-
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
-
-    /**< Check state */
-    if(M4AIR_kConfigured != pC->m_state)
-    {
-        return M4ERR_STATE;
-    }
-
-    if(M4AIR_kYUV420AP == pC->m_inputFormat)
-    {
-        nb_planes = 4;
-    }
-    else
-    {
-        nb_planes = 3;
-    }
-
-    /**< Loop on each Plane */
-    for(i=0;i<nb_planes;i++)
-    {
-
-         /* Set the working pointers at the beginning of the input/output data field */
-
-        u32_shift = ((i==0)||(i==3))?0:1; /**< Depend on Luma or Chroma */
-
-        if((M4OSA_FALSE == pC->m_params.m_bOutputStripe)||((M4OSA_TRUE == pC->m_params.m_bOutputStripe)&&(0 == pC->m_procRows)))
-        {
-            /**< For input, take care about ROI */
-            pu8_data_in     = pIn[i].pac_data + pIn[i].u_topleft + (pC->m_params.m_inputCoord.m_x>>u32_shift)
-                        + (pC->m_params.m_inputCoord.m_y >> u32_shift) * pIn[i].u_stride;
-
-            /** Go at end of line/column in case X/Y scanning is flipped */
-            if(M4OSA_TRUE == pC->m_bFlipX)
-            {
-                pu8_data_in += ((pC->m_params.m_inputSize.m_width)>>u32_shift) -1 ;
-            }
-            if(M4OSA_TRUE == pC->m_bFlipY)
-            {
-                pu8_data_in += ((pC->m_params.m_inputSize.m_height>>u32_shift) -1) * pIn[i].u_stride;
-            }
-
-            /**< Initialize accumulators in case we are using it (bilinear interpolation) */
-            if( M4OSA_FALSE == pC->m_bOnlyCopy)
-            {
-                pC->u32_x_accum[i] = pC->u32_x_accum_start[i];
-                pC->u32_y_accum[i] = pC->u32_y_accum_start[i];
-            }
-
-        }
-        else
-        {
-            /**< In case of stripe mode for other than first stripe, we need to recover input pointer from internal context */
-            pu8_data_in = pC->pu8_data_in[i];
-        }
-
-        /**< In every mode, output data are at the beginning of the output plane */
-        pu8_data_out    = pOut[i].pac_data + pOut[i].u_topleft;
-
-        /**< Initialize input offset applied after each pixel */
-        if(M4OSA_FALSE == pC->m_bFlipY)
-        {
-            i32_tmp_offset = pIn[i].u_stride;
-        }
-        else
-        {
-            i32_tmp_offset = -pIn[i].u_stride;
-        }
-
-        /**< In this case, no bilinear interpolation is needed as input and output dimensions are the same */
-        if( M4OSA_TRUE == pC->m_bOnlyCopy)
-        {
-            /**< No +-90° rotation */
-            if(M4OSA_FALSE == pC->m_bRevertXY)
-            {
-                /**< No flip on X abscissa */
-                if(M4OSA_FALSE == pC->m_bFlipX)
-                {
-                     M4OSA_UInt32 loc_height = pOut[i].u_height;
-                     M4OSA_UInt32 loc_width = pOut[i].u_width;
-                     M4OSA_UInt32 loc_stride = pIn[i].u_stride;
-                    /**< Loop on each row */
-                    for (j=0; j<loc_height; j++)
-                    {
-                        /**< Copy one whole line */
-                        memcpy((void *)pu8_data_out, (void *)pu8_data_in, loc_width);
-
-                        /**< Update pointers */
-                        pu8_data_out += pOut[i].u_stride;
-                        if(M4OSA_FALSE == pC->m_bFlipY)
-                        {
-                            pu8_data_in += loc_stride;
-                        }
-                        else
-                        {
-                            pu8_data_in -= loc_stride;
-                        }
-                    }
-                }
-                else
-                {
-                    /**< Loop on each row */
-                    for(j=0;j<pOut[i].u_height;j++)
-                    {
-                        /**< Loop on each pixel of 1 row */
-                        for(k=0;k<pOut[i].u_width;k++)
-                        {
-                            *pu8_data_out++ = *pu8_data_in--;
-                        }
-
-                        /**< Update pointers */
-                        pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
-
-                        pu8_data_in += pOut[i].u_width + i32_tmp_offset;
-
-                    }
-                }
-            }
-            /**< Here we have a +-90° rotation */
-            else
-            {
-
-                /**< Loop on each row */
-                for(j=0;j<pOut[i].u_height;j++)
-                {
-                    pu8_data_in_tmp = pu8_data_in;
-
-                    /**< Loop on each pixel of 1 row */
-                    for(k=0;k<pOut[i].u_width;k++)
-                    {
-                        *pu8_data_out++ = *pu8_data_in_tmp;
-
-                        /**< Update input pointer in order to go to next/past line */
-                        pu8_data_in_tmp += i32_tmp_offset;
-                    }
-
-                    /**< Update pointers */
-                    pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
-                    if(M4OSA_FALSE == pC->m_bFlipX)
-                    {
-                        pu8_data_in ++;
-                    }
-                    else
-                    {
-                        pu8_data_in --;
-                    }
-                }
-            }
-        }
-        /**< Bilinear interpolation */
-        else
-        {
-
-        if(3 != i)  /**< other than alpha plane */
-        {
-            /**No +-90° rotation */
-            if(M4OSA_FALSE == pC->m_bRevertXY)
-            {
-
-                /**< Loop on each row */
-                for(j=0;j<pOut[i].u_height;j++)
-                {
-                    /* Vertical weight factor */
-                    u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
-
-                    /* Reinit horizontal weight factor */
-                    u32_x_accum = pC->u32_x_accum_start[i];
-
-
-
-                        if(M4OSA_TRUE ==  pC->m_bFlipX)
-                        {
-
-                            /**< Loop on each output pixel in a row */
-                            for(k=0;k<pOut[i].u_width;k++)
-                            {
-
-                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal weight factor */
-
-                                pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
-
-                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                                /* Weighted combination */
-                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
-                                                                 pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
-                                                                (pu8_src_bottom[1]*(16-u32_x_frac) +
-                                                                 pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
-
-                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                                /* Update horizontal accumulator */
-                                u32_x_accum += pC->u32_x_inc[i];
-                            }
-                        }
-
-                        else
-                        {
-                            /**< Loop on each output pixel in a row */
-                            for(k=0;k<pOut[i].u_width;k++)
-                            {
-                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal weight factor */
-
-                                pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
-
-                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                                /* Weighted combination */
-                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
-                                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
-                                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
-                                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
-
-                                    *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                                /* Update horizontal accumulator */
-                                u32_x_accum += pC->u32_x_inc[i];
-                            }
-
-                        }
-
-                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
-
-                    /* Update vertical accumulator */
-                    pC->u32_y_accum[i] += pC->u32_y_inc[i];
-                    if (pC->u32_y_accum[i]>>16)
-                    {
-                        pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
-                        pC->u32_y_accum[i] &= 0xffff;
-                    }
-                }
-        }
-            /** +-90° rotation */
-            else
-            {
-                pu8_data_in_org = pu8_data_in;
-
-                /**< Loop on each output row */
-                for(j=0;j<pOut[i].u_height;j++)
-                {
-                    /* horizontal weight factor */
-                    u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
-
-                    /* Reinit accumulator */
-                    u32_y_accum = pC->u32_y_accum_start[i];
-
-                    if(M4OSA_TRUE ==  pC->m_bFlipX)
-                    {
-
-                        /**< Loop on each output pixel in a row */
-                        for(k=0;k<pOut[i].u_width;k++)
-                        {
-
-                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
-
-
-                            pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
-
-                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                            /* Weighted combination */
-                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
-                                                                 pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
-                                                                (pu8_src_bottom[1]*(16-u32_x_frac) +
-                                                                 pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
-
-                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                            /* Update vertical accumulator */
-                            u32_y_accum += pC->u32_y_inc[i];
-                            if (u32_y_accum>>16)
-                            {
-                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
-                                u32_y_accum &= 0xffff;
-                            }
-
-                        }
-                    }
-                    else
-                    {
-                        /**< Loop on each output pixel in a row */
-                        for(k=0;k<pOut[i].u_width;k++)
-                        {
-
-                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
-
-                            pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
-
-                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                            /* Weighted combination */
-                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
-                                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
-                                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
-                                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
-
-                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                            /* Update vertical accumulator */
-                            u32_y_accum += pC->u32_y_inc[i];
-                            if (u32_y_accum>>16)
-                            {
-                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
-                                u32_y_accum &= 0xffff;
-                            }
-                        }
-                    }
-                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
-
-                    /* Update horizontal accumulator */
-                    pC->u32_x_accum[i] += pC->u32_x_inc[i];
-
-                    pu8_data_in = pu8_data_in_org;
-                }
-
-            }
-            }/** 3 != i */
-            else
-            {
-            /**No +-90° rotation */
-            if(M4OSA_FALSE == pC->m_bRevertXY)
-            {
-
-                /**< Loop on each row */
-                for(j=0;j<pOut[i].u_height;j++)
-                {
-                    /* Vertical weight factor */
-                    u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
-
-                    /* Reinit horizontal weight factor */
-                    u32_x_accum = pC->u32_x_accum_start[i];
-
-
-
-                        if(M4OSA_TRUE ==  pC->m_bFlipX)
-                        {
-
-                            /**< Loop on each output pixel in a row */
-                            for(k=0;k<pOut[i].u_width;k++)
-                            {
-
-                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal weight factor */
-
-                                pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
-
-                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                                /* Weighted combination */
-                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
-                                                                 pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
-                                                                (pu8_src_bottom[1]*(16-u32_x_frac) +
-                                                                 pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
-
-                                u32_temp_value= (u32_temp_value >> 7)*0xff;
-
-                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                                /* Update horizontal accumulator */
-                                u32_x_accum += pC->u32_x_inc[i];
-                            }
-                        }
-
-                        else
-                        {
-                            /**< Loop on each output pixel in a row */
-                            for(k=0;k<pOut[i].u_width;k++)
-                            {
-                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal weight factor */
-
-                                pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
-
-                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                                /* Weighted combination */
-                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
-                                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
-                                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
-                                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
-
-                                u32_temp_value= (u32_temp_value >> 7)*0xff;
-
-                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                                /* Update horizontal accumulator */
-                                u32_x_accum += pC->u32_x_inc[i];
-                            }
-
-                        }
-
-                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
-
-                    /* Update vertical accumulator */
-                    pC->u32_y_accum[i] += pC->u32_y_inc[i];
-                    if (pC->u32_y_accum[i]>>16)
-                    {
-                        pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
-                        pC->u32_y_accum[i] &= 0xffff;
-                    }
-                }
-
-            } /**< M4OSA_FALSE == pC->m_bRevertXY */
-            /** +-90° rotation */
-            else
-            {
-                pu8_data_in_org = pu8_data_in;
-
-                /**< Loop on each output row */
-                for(j=0;j<pOut[i].u_height;j++)
-                {
-                    /* horizontal weight factor */
-                    u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
-
-                    /* Reinit accumulator */
-                    u32_y_accum = pC->u32_y_accum_start[i];
-
-                    if(M4OSA_TRUE ==  pC->m_bFlipX)
-                    {
-
-                        /**< Loop on each output pixel in a row */
-                        for(k=0;k<pOut[i].u_width;k++)
-                        {
-
-                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
-
-
-                            pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
-
-                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                            /* Weighted combination */
-                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
-                                                                 pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
-                                                                (pu8_src_bottom[1]*(16-u32_x_frac) +
-                                                                 pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
-
-                            u32_temp_value= (u32_temp_value >> 7)*0xff;
-
-                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                            /* Update vertical accumulator */
-                            u32_y_accum += pC->u32_y_inc[i];
-                            if (u32_y_accum>>16)
-                            {
-                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
-                                u32_y_accum &= 0xffff;
-                            }
-
-                        }
-                    }
-                    else
-                    {
-                        /**< Loop on each output pixel in a row */
-                        for(k=0;k<pOut[i].u_width;k++)
-                        {
-
-                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
-
-                            pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
-
-                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                            /* Weighted combination */
-                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
-                                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
-                                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
-                                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
-
-                            u32_temp_value= (u32_temp_value >> 7)*0xff;
-
-                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                            /* Update vertical accumulator */
-                            u32_y_accum += pC->u32_y_inc[i];
-                            if (u32_y_accum>>16)
-                            {
-                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
-                                u32_y_accum &= 0xffff;
-                            }
-                        }
-                    }
-                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
-
-                    /* Update horizontal accumulator */
-                    pC->u32_x_accum[i] += pC->u32_x_inc[i];
-
-                    pu8_data_in = pu8_data_in_org;
-
-                }
-                } /**< M4OSA_TRUE == pC->m_bRevertXY */
-        }/** 3 == i */
-            }
-        /**< In case of stripe mode, save current input pointer */
-        if(M4OSA_TRUE == pC->m_params.m_bOutputStripe)
-        {
-            pC->pu8_data_in[i] = pu8_data_in;
-        }
-    }
-
-    /**< Update number of processed rows, reset it if we have finished with the whole processing */
-    pC->m_procRows += pOut[0].u_height;
-    if(M4OSA_FALSE == pC->m_bRevertXY)
-    {
-        if(pC->m_params.m_outputSize.m_height <= pC->m_procRows)    pC->m_procRows = 0;
-    }
-    else
-    {
-        if(pC->m_params.m_outputSize.m_width <= pC->m_procRows) pC->m_procRows = 0;
-    }
-
-    return M4NO_ERROR ;
-
-}
-/*+ Handle the image files here */
-
-/**
- ******************************************************************************
- * M4OSA_ERR LvGetImageThumbNail(M4OSA_UChar *fileName, M4OSA_Void **pBuffer)
- * @brief   This function gives YUV420 buffer of a given image file (in argb888 format)
- * @Note: The caller of the function is responsible to free the yuv buffer allocated
- * @param   fileName:       (IN) Path to the filename of the image argb data
- * @param   height:     (IN) Height of the image
- * @param     width:             (OUT) pBuffer pointer to the address where the yuv data address needs to be returned.
- * @return  M4NO_ERROR: there is no error
- * @return  M4ERR_ALLOC: No more memory space to add a new effect.
- * @return  M4ERR_FILE_NOT_FOUND: if the file passed does not exists.
- ******************************************************************************
-*/
-M4OSA_ERR LvGetImageThumbNail(const char *fileName, M4OSA_UInt32 height, M4OSA_UInt32 width, M4OSA_Void **pBuffer) {
-
-    M4VIFI_ImagePlane rgbPlane, *yuvPlane;
-    M4OSA_UInt32 frameSize_argb = (width * height * 4); // argb data
-    M4OSA_Context lImageFileFp  = M4OSA_NULL;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_32bitAlignedMalloc(frameSize_argb, M4VS, (M4OSA_Char*)"Image argb data");
-    if(pTmpData == M4OSA_NULL) {
-        ALOGE("Failed to allocate memory for Image clip");
-        return M4ERR_ALLOC;
-    }
-
-       /** Read the argb data from the passed file. */
-    M4OSA_ERR lerr = M4OSA_fileReadOpen(&lImageFileFp, (M4OSA_Void *) fileName, M4OSA_kFileRead);
-
-    if((lerr != M4NO_ERROR) || (lImageFileFp == M4OSA_NULL))
-    {
-        ALOGE("LVPreviewController: Can not open the file ");
-        free(pTmpData);
-        return M4ERR_FILE_NOT_FOUND;
-    }
-    lerr = M4OSA_fileReadData(lImageFileFp, (M4OSA_MemAddr8)pTmpData, &frameSize_argb);
-    if(lerr != M4NO_ERROR)
-    {
-        ALOGE("LVPreviewController: can not read the data ");
-        M4OSA_fileReadClose(lImageFileFp);
-        free(pTmpData);
-        return lerr;
-    }
-    M4OSA_fileReadClose(lImageFileFp);
-
-    M4OSA_UInt32 frameSize = (width * height * 3); //Size of YUV420 data.
-    rgbPlane.pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize, M4VS, (M4OSA_Char*)"Image clip RGB888 data");
-    if(rgbPlane.pac_data == M4OSA_NULL)
-    {
-        ALOGE("Failed to allocate memory for Image clip");
-        free(pTmpData);
-        return M4ERR_ALLOC;
-    }
-
-    /** Remove the alpha channel */
-    for (M4OSA_UInt32 i=0, j = 0; i < frameSize_argb; i++) {
-        if ((i % 4) == 0) continue;
-        rgbPlane.pac_data[j] = pTmpData[i];
-        j++;
-    }
-    free(pTmpData);
-
-#ifdef FILE_DUMP
-    FILE *fp = fopen("/sdcard/Input/test_rgb.raw", "wb");
-    if(fp == NULL)
-        ALOGE("Errors file can not be created");
-    else {
-        fwrite(rgbPlane.pac_data, frameSize, 1, fp);
-        fclose(fp);
-    }
-#endif
-        rgbPlane.u_height = height;
-        rgbPlane.u_width = width;
-        rgbPlane.u_stride = width*3;
-        rgbPlane.u_topleft = 0;
-
-        yuvPlane = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane),
-                M4VS, (M4OSA_Char*)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
-        yuvPlane[0].u_height = height;
-        yuvPlane[0].u_width = width;
-        yuvPlane[0].u_stride = width;
-        yuvPlane[0].u_topleft = 0;
-        yuvPlane[0].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(yuvPlane[0].u_height * yuvPlane[0].u_width * 1.5, M4VS, (M4OSA_Char*)"imageClip YUV data");
-
-        yuvPlane[1].u_height = yuvPlane[0].u_height >>1;
-        yuvPlane[1].u_width = yuvPlane[0].u_width >> 1;
-        yuvPlane[1].u_stride = yuvPlane[1].u_width;
-        yuvPlane[1].u_topleft = 0;
-        yuvPlane[1].pac_data = (M4VIFI_UInt8*)(yuvPlane[0].pac_data + yuvPlane[0].u_height * yuvPlane[0].u_width);
-
-        yuvPlane[2].u_height = yuvPlane[0].u_height >>1;
-        yuvPlane[2].u_width = yuvPlane[0].u_width >> 1;
-        yuvPlane[2].u_stride = yuvPlane[2].u_width;
-        yuvPlane[2].u_topleft = 0;
-        yuvPlane[2].pac_data = (M4VIFI_UInt8*)(yuvPlane[1].pac_data + yuvPlane[1].u_height * yuvPlane[1].u_width);
-
-
-        err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane, yuvPlane);
-        //err = M4VIFI_BGR888toYUV420(M4OSA_NULL, &rgbPlane, yuvPlane);
-        if(err != M4NO_ERROR)
-        {
-            ALOGE("error when converting from RGB to YUV: 0x%x\n", (unsigned int)err);
-        }
-        free(rgbPlane.pac_data);
-
-        //ALOGE("RGB to YUV done");
-#ifdef FILE_DUMP
-        FILE *fp1 = fopen("/sdcard/Input/test_yuv.raw", "wb");
-        if(fp1 == NULL)
-            ALOGE("Errors file can not be created");
-        else {
-            fwrite(yuvPlane[0].pac_data, yuvPlane[0].u_height * yuvPlane[0].u_width * 1.5, 1, fp1);
-            fclose(fp1);
-        }
-#endif
-        *pBuffer = yuvPlane[0].pac_data;
-        free(yuvPlane);
-        return M4NO_ERROR;
-
-}
-M4OSA_Void prepareYUV420ImagePlane(M4VIFI_ImagePlane *plane,
-    M4OSA_UInt32 width, M4OSA_UInt32 height, M4VIFI_UInt8 *buffer,
-    M4OSA_UInt32 reportedWidth, M4OSA_UInt32 reportedHeight) {
-
-    //Y plane
-    plane[0].u_width = width;
-    plane[0].u_height = height;
-    plane[0].u_stride = reportedWidth;
-    plane[0].u_topleft = 0;
-    plane[0].pac_data = buffer;
-
-    // U plane
-    plane[1].u_width = width/2;
-    plane[1].u_height = height/2;
-    plane[1].u_stride = reportedWidth >> 1;
-    plane[1].u_topleft = 0;
-    plane[1].pac_data = buffer+(reportedWidth*reportedHeight);
-
-    // V Plane
-    plane[2].u_width = width/2;
-    plane[2].u_height = height/2;
-    plane[2].u_stride = reportedWidth >> 1;
-    plane[2].u_topleft = 0;
-    plane[2].pac_data = plane[1].pac_data + ((reportedWidth/2)*(reportedHeight/2));
-}
-
-M4OSA_Void prepareYV12ImagePlane(M4VIFI_ImagePlane *plane,
-    M4OSA_UInt32 width, M4OSA_UInt32 height, M4OSA_UInt32 stride,
-    M4VIFI_UInt8 *buffer) {
-
-    //Y plane
-    plane[0].u_width = width;
-    plane[0].u_height = height;
-    plane[0].u_stride = stride;
-    plane[0].u_topleft = 0;
-    plane[0].pac_data = buffer;
-
-    // U plane
-    plane[1].u_width = width/2;
-    plane[1].u_height = height/2;
-    plane[1].u_stride = android::PreviewRenderer::ALIGN(plane[0].u_stride/2, 16);
-    plane[1].u_topleft = 0;
-    plane[1].pac_data = (buffer
-                + plane[0].u_height * plane[0].u_stride
-                + (plane[0].u_height/2) * android::PreviewRenderer::ALIGN((
-                 plane[0].u_stride / 2), 16));
-
-    // V Plane
-    plane[2].u_width = width/2;
-    plane[2].u_height = height/2;
-    plane[2].u_stride = android::PreviewRenderer::ALIGN(plane[0].u_stride/2, 16);
-    plane[2].u_topleft = 0;
-    plane[2].pac_data = (buffer +
-     plane[0].u_height * android::PreviewRenderer::ALIGN(plane[0].u_stride, 16));
-
-
-}
-
-M4OSA_Void swapImagePlanes(
-    M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
-    M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2) {
-
-    planeIn[0].u_height = planeOut[0].u_height;
-    planeIn[0].u_width = planeOut[0].u_width;
-    planeIn[0].u_stride = planeOut[0].u_stride;
-    planeIn[0].u_topleft = planeOut[0].u_topleft;
-    planeIn[0].pac_data = planeOut[0].pac_data;
-
-    /**
-     * U plane */
-    planeIn[1].u_width = planeOut[1].u_width;
-    planeIn[1].u_height = planeOut[1].u_height;
-    planeIn[1].u_stride = planeOut[1].u_stride;
-    planeIn[1].u_topleft = planeOut[1].u_topleft;
-    planeIn[1].pac_data = planeOut[1].pac_data;
-    /**
-     * V Plane */
-    planeIn[2].u_width = planeOut[2].u_width;
-    planeIn[2].u_height = planeOut[2].u_height;
-    planeIn[2].u_stride = planeOut[2].u_stride;
-    planeIn[2].u_topleft = planeOut[2].u_topleft;
-    planeIn[2].pac_data = planeOut[2].pac_data;
-
-    if(planeOut[0].pac_data == (M4VIFI_UInt8*)buffer1)
-    {
-        planeOut[0].pac_data = (M4VIFI_UInt8*)buffer2;
-        planeOut[1].pac_data = (M4VIFI_UInt8*)(buffer2 +
-         planeOut[0].u_width*planeOut[0].u_height);
-
-        planeOut[2].pac_data = (M4VIFI_UInt8*)(buffer2 +
-         planeOut[0].u_width*planeOut[0].u_height +
-         planeOut[1].u_width*planeOut[1].u_height);
-    }
-    else
-    {
-        planeOut[0].pac_data = (M4VIFI_UInt8*)buffer1;
-        planeOut[1].pac_data = (M4VIFI_UInt8*)(buffer1 +
-         planeOut[0].u_width*planeOut[0].u_height);
-
-        planeOut[2].pac_data = (M4VIFI_UInt8*)(buffer1 +
-         planeOut[0].u_width*planeOut[0].u_height +
-         planeOut[1].u_width*planeOut[1].u_height);
-    }
-
-}
-
-M4OSA_Void computePercentageDone(
-    M4OSA_UInt32 ctsMs, M4OSA_UInt32 effectStartTimeMs,
-    M4OSA_UInt32 effectDuration, M4OSA_Double *percentageDone) {
-
-    M4OSA_Double videoEffectTime =0;
-
-    // Compute how far from the beginning of the effect we are, in clip-base time.
-    videoEffectTime =
-     (M4OSA_Int32)(ctsMs+ 0.5) - effectStartTimeMs;
-
-    // To calculate %, substract timeIncrement
-    // because effect should finish on the last frame
-    // which is from CTS = (eof-timeIncrement) till CTS = eof
-    *percentageDone =
-     videoEffectTime / ((M4OSA_Float)effectDuration);
-
-    if(*percentageDone < 0.0) *percentageDone = 0.0;
-    if(*percentageDone > 1.0) *percentageDone = 1.0;
-
-}
-
-
-M4OSA_Void computeProgressForVideoEffect(
-    M4OSA_UInt32 ctsMs, M4OSA_UInt32 effectStartTimeMs,
-    M4OSA_UInt32 effectDuration, M4VSS3GPP_ExternalProgress* extProgress) {
-
-    M4OSA_Double percentageDone =0;
-
-    computePercentageDone(ctsMs, effectStartTimeMs, effectDuration, &percentageDone);
-
-    extProgress->uiProgress = (M4OSA_UInt32)( percentageDone * 1000 );
-    extProgress->uiOutputTime = (M4OSA_UInt32)(ctsMs + 0.5);
-    extProgress->uiClipTime = extProgress->uiOutputTime;
-    extProgress->bIsLast = M4OSA_FALSE;
-}
-
-M4OSA_ERR prepareFramingStructure(
-    M4xVSS_FramingStruct* framingCtx,
-    M4VSS3GPP_EffectSettings* effectsSettings, M4OSA_UInt32 index,
-    M4VIFI_UInt8* overlayRGB, M4VIFI_UInt8* overlayYUV) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-
-    // Force input RGB buffer to even size to avoid errors in YUV conversion
-    framingCtx->FramingRgb = effectsSettings[index].xVSS.pFramingBuffer;
-    framingCtx->FramingRgb->u_width = framingCtx->FramingRgb->u_width & ~1;
-    framingCtx->FramingRgb->u_height = framingCtx->FramingRgb->u_height & ~1;
-    framingCtx->FramingYuv = NULL;
-
-    framingCtx->duration = effectsSettings[index].uiDuration;
-    framingCtx->topleft_x = effectsSettings[index].xVSS.topleft_x;
-    framingCtx->topleft_y = effectsSettings[index].xVSS.topleft_y;
-    framingCtx->pCurrent = framingCtx;
-    framingCtx->pNext = framingCtx;
-    framingCtx->previousClipTime = -1;
-
-    framingCtx->alphaBlendingStruct =
-     (M4xVSS_internalEffectsAlphaBlending*)M4OSA_32bitAlignedMalloc(
-      sizeof(M4xVSS_internalEffectsAlphaBlending), M4VS,
-      (M4OSA_Char*)"alpha blending struct");
-
-    framingCtx->alphaBlendingStruct->m_fadeInTime =
-     effectsSettings[index].xVSS.uialphaBlendingFadeInTime;
-
-    framingCtx->alphaBlendingStruct->m_fadeOutTime =
-     effectsSettings[index].xVSS.uialphaBlendingFadeOutTime;
-
-    framingCtx->alphaBlendingStruct->m_end =
-     effectsSettings[index].xVSS.uialphaBlendingEnd;
-
-    framingCtx->alphaBlendingStruct->m_middle =
-     effectsSettings[index].xVSS.uialphaBlendingMiddle;
-
-    framingCtx->alphaBlendingStruct->m_start =
-     effectsSettings[index].xVSS.uialphaBlendingStart;
-
-    // If new Overlay buffer, convert from RGB to YUV
-    if((overlayRGB != framingCtx->FramingRgb->pac_data) || (overlayYUV == NULL) ) {
-
-        // If YUV buffer exists, delete it
-        if(overlayYUV != NULL) {
-           free(overlayYUV);
-           overlayYUV = NULL;
-        }
-    if(effectsSettings[index].xVSS.rgbType == M4VSS3GPP_kRGB565) {
-        // Input RGB565 plane is provided,
-        // let's convert it to YUV420, and update framing structure
-        err = M4xVSS_internalConvertRGBtoYUV(framingCtx);
-    }
-    else if(effectsSettings[index].xVSS.rgbType == M4VSS3GPP_kRGB888) {
-        // Input RGB888 plane is provided,
-        // let's convert it to YUV420, and update framing structure
-        err = M4xVSS_internalConvertRGB888toYUV(framingCtx);
-    }
-    else {
-        err = M4ERR_PARAMETER;
-    }
-        overlayYUV = framingCtx->FramingYuv[0].pac_data;
-        overlayRGB = framingCtx->FramingRgb->pac_data;
-
-    }
-    else {
-        ALOGV(" YUV buffer reuse");
-        framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(
-            3*sizeof(M4VIFI_ImagePlane), M4VS, (M4OSA_Char*)"YUV");
-
-        if(framingCtx->FramingYuv == M4OSA_NULL) {
-            return M4ERR_ALLOC;
-        }
-
-        framingCtx->FramingYuv[0].u_width = framingCtx->FramingRgb->u_width;
-        framingCtx->FramingYuv[0].u_height = framingCtx->FramingRgb->u_height;
-        framingCtx->FramingYuv[0].u_topleft = 0;
-        framingCtx->FramingYuv[0].u_stride = framingCtx->FramingRgb->u_width;
-        framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)overlayYUV;
-
-        framingCtx->FramingYuv[1].u_width = (framingCtx->FramingRgb->u_width)>>1;
-        framingCtx->FramingYuv[1].u_height = (framingCtx->FramingRgb->u_height)>>1;
-        framingCtx->FramingYuv[1].u_topleft = 0;
-        framingCtx->FramingYuv[1].u_stride = (framingCtx->FramingRgb->u_width)>>1;
-        framingCtx->FramingYuv[1].pac_data = framingCtx->FramingYuv[0].pac_data +
-            framingCtx->FramingYuv[0].u_width * framingCtx->FramingYuv[0].u_height;
-
-        framingCtx->FramingYuv[2].u_width = (framingCtx->FramingRgb->u_width)>>1;
-        framingCtx->FramingYuv[2].u_height = (framingCtx->FramingRgb->u_height)>>1;
-        framingCtx->FramingYuv[2].u_topleft = 0;
-        framingCtx->FramingYuv[2].u_stride = (framingCtx->FramingRgb->u_width)>>1;
-        framingCtx->FramingYuv[2].pac_data = framingCtx->FramingYuv[1].pac_data +
-            framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height;
-
-        framingCtx->duration = 0;
-        framingCtx->previousClipTime = -1;
-        framingCtx->previewOffsetClipTime = -1;
-
-    }
-    return err;
-}
-
-M4OSA_ERR applyColorEffect(M4xVSS_VideoEffectType colorEffect,
-    M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
-    M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2, M4OSA_UInt16 rgbColorData) {
-
-    M4xVSS_ColorStruct colorContext;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    colorContext.colorEffectType = colorEffect;
-    colorContext.rgb16ColorData = rgbColorData;
-
-    err = M4VSS3GPP_externalVideoEffectColor(
-     (M4OSA_Void *)&colorContext, planeIn, planeOut, NULL,
-     colorEffect);
-
-    if(err != M4NO_ERROR) {
-        ALOGV("M4VSS3GPP_externalVideoEffectColor(%d) error %d",
-            colorEffect, err);
-
-        if(NULL != buffer1) {
-            free(buffer1);
-            buffer1 = NULL;
-        }
-        if(NULL != buffer2) {
-            free(buffer2);
-            buffer2 = NULL;
-        }
-        return err;
-    }
-
-    // The out plane now becomes the in plane for adding other effects
-    swapImagePlanes(planeIn, planeOut, buffer1, buffer2);
-
-    return err;
-}
-
-M4OSA_ERR applyLumaEffect(M4VSS3GPP_VideoEffectType videoEffect,
-    M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
-    M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2, M4OSA_Int32 lum_factor) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-
-    err = M4VFL_modifyLumaWithScale(
-         (M4ViComImagePlane*)planeIn,(M4ViComImagePlane*)planeOut,
-         lum_factor, NULL);
-
-    if(err != M4NO_ERROR) {
-        ALOGE("M4VFL_modifyLumaWithScale(%d) error %d", videoEffect, (int)err);
-
-        if(NULL != buffer1) {
-            free(buffer1);
-            buffer1= NULL;
-        }
-        if(NULL != buffer2) {
-            free(buffer2);
-            buffer2= NULL;
-        }
-        return err;
-    }
-
-    // The out plane now becomes the in plane for adding other effects
-    swapImagePlanes(planeIn, planeOut,(M4VIFI_UInt8 *)buffer1,
-     (M4VIFI_UInt8 *)buffer2);
-
-    return err;
-}
-
-M4OSA_ERR applyEffectsAndRenderingMode(vePostProcessParams *params,
-    M4OSA_UInt32 reportedWidth, M4OSA_UInt32 reportedHeight) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4VIFI_ImagePlane planeIn[3], planeOut[3];
-    M4VIFI_UInt8 *finalOutputBuffer = NULL, *tempOutputBuffer= NULL;
-    M4OSA_Double percentageDone =0;
-    M4OSA_Int32 lum_factor;
-    M4VSS3GPP_ExternalProgress extProgress;
-    M4xVSS_FiftiesStruct fiftiesCtx;
-    M4OSA_UInt32 frameSize = 0, i=0;
-
-    frameSize = (params->videoWidth*params->videoHeight*3) >> 1;
-
-    finalOutputBuffer = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize, M4VS,
-     (M4OSA_Char*)("lvpp finalOutputBuffer"));
-
-    if(finalOutputBuffer == NULL) {
-        ALOGE("applyEffectsAndRenderingMode: malloc error");
-        return M4ERR_ALLOC;
-    }
-
-    // allocate the tempOutputBuffer
-    tempOutputBuffer = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(
-     ((params->videoHeight*params->videoWidth*3)>>1), M4VS, (M4OSA_Char*)("lvpp colorBuffer"));
-
-    if(tempOutputBuffer == NULL) {
-        ALOGE("applyEffectsAndRenderingMode: malloc error tempOutputBuffer");
-        if(NULL != finalOutputBuffer) {
-            free(finalOutputBuffer);
-            finalOutputBuffer = NULL;
-        }
-        return M4ERR_ALLOC;
-    }
-
-    // Initialize the In plane
-    prepareYUV420ImagePlane(planeIn, params->videoWidth, params->videoHeight,
-       params->vidBuffer, reportedWidth, reportedHeight);
-
-    // Initialize the Out plane
-    prepareYUV420ImagePlane(planeOut, params->videoWidth, params->videoHeight,
-       (M4VIFI_UInt8 *)tempOutputBuffer, params->videoWidth, params->videoHeight);
-
-    // The planeIn contains the YUV420 input data to postprocessing node
-    // and planeOut will contain the YUV420 data with effect
-    // In each successive if condition, apply filter to successive
-    // output YUV frame so that concurrent effects are both applied
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_BLACKANDWHITE) {
-        err = applyColorEffect(M4xVSS_kVideoEffectType_BlackAndWhite,
-              planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
-              (M4VIFI_UInt8 *)tempOutputBuffer, 0);
-        if(err != M4NO_ERROR) {
-            return err;
-        }
-    }
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_PINK) {
-        err = applyColorEffect(M4xVSS_kVideoEffectType_Pink,
-              planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
-              (M4VIFI_UInt8 *)tempOutputBuffer, 0);
-        if(err != M4NO_ERROR) {
-            return err;
-        }
-    }
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_GREEN) {
-        err = applyColorEffect(M4xVSS_kVideoEffectType_Green,
-              planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
-              (M4VIFI_UInt8 *)tempOutputBuffer, 0);
-        if(err != M4NO_ERROR) {
-            return err;
-        }
-    }
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_SEPIA) {
-        err = applyColorEffect(M4xVSS_kVideoEffectType_Sepia,
-              planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
-              (M4VIFI_UInt8 *)tempOutputBuffer, 0);
-        if(err != M4NO_ERROR) {
-            return err;
-        }
-    }
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_NEGATIVE) {
-        err = applyColorEffect(M4xVSS_kVideoEffectType_Negative,
-              planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
-              (M4VIFI_UInt8 *)tempOutputBuffer, 0);
-        if(err != M4NO_ERROR) {
-            return err;
-        }
-    }
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_GRADIENT) {
-        // find the effect in effectSettings array
-        for(i=0;i<params->numberEffects;i++) {
-            if(params->effectsSettings[i].VideoEffectType ==
-             (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Gradient)
-                break;
-        }
-        err = applyColorEffect(M4xVSS_kVideoEffectType_Gradient,
-              planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
-              (M4VIFI_UInt8 *)tempOutputBuffer,
-              params->effectsSettings[i].xVSS.uiRgb16InputColor);
-        if(err != M4NO_ERROR) {
-            return err;
-        }
-    }
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_COLOR_RGB16) {
-        // Find the effect in effectSettings array
-        for(i=0;i<params->numberEffects;i++) {
-            if(params->effectsSettings[i].VideoEffectType ==
-             (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_ColorRGB16)
-                break;
-        }
-        err = applyColorEffect(M4xVSS_kVideoEffectType_ColorRGB16,
-              planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
-              (M4VIFI_UInt8 *)tempOutputBuffer,
-              params->effectsSettings[i].xVSS.uiRgb16InputColor);
-        if(err != M4NO_ERROR) {
-            return err;
-        }
-    }
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_FIFTIES) {
-        // Find the effect in effectSettings array
-        for(i=0;i<params->numberEffects;i++) {
-            if(params->effectsSettings[i].VideoEffectType ==
-             (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Fifties)
-                break;
-        }
-        if(i < params->numberEffects) {
-            computeProgressForVideoEffect(params->timeMs,
-             params->effectsSettings[i].uiStartTime,
-             params->effectsSettings[i].uiDuration, &extProgress);
-
-            if(params->isFiftiesEffectStarted) {
-                fiftiesCtx.previousClipTime = -1;
-            }
-            fiftiesCtx.fiftiesEffectDuration =
-             1000/params->effectsSettings[i].xVSS.uiFiftiesOutFrameRate;
-
-            fiftiesCtx.shiftRandomValue = 0;
-            fiftiesCtx.stripeRandomValue = 0;
-
-            err = M4VSS3GPP_externalVideoEffectFifties(
-             (M4OSA_Void *)&fiftiesCtx, planeIn, planeOut, &extProgress,
-             M4xVSS_kVideoEffectType_Fifties);
-
-            if(err != M4NO_ERROR) {
-                ALOGE("M4VSS3GPP_externalVideoEffectFifties error 0x%x", (unsigned int)err);
-
-                if(NULL != finalOutputBuffer) {
-                    free(finalOutputBuffer);
-                    finalOutputBuffer = NULL;
-                }
-                if(NULL != tempOutputBuffer) {
-                    free(tempOutputBuffer);
-                    tempOutputBuffer = NULL;
-                }
-                return err;
-            }
-
-            // The out plane now becomes the in plane for adding other effects
-            swapImagePlanes(planeIn, planeOut,(M4VIFI_UInt8 *)finalOutputBuffer,
-             (M4VIFI_UInt8 *)tempOutputBuffer);
-        }
-    }
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_FRAMING) {
-
-        M4xVSS_FramingStruct framingCtx;
-        // Find the effect in effectSettings array
-        for(i=0;i<params->numberEffects;i++) {
-            if(params->effectsSettings[i].VideoEffectType ==
-             (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Framing) {
-                if((params->effectsSettings[i].uiStartTime <= params->timeMs + params->timeOffset) &&
-                   ((params->effectsSettings[i].uiStartTime+
-                     params->effectsSettings[i].uiDuration) >= params->timeMs + params->timeOffset))
-                {
-                        break;
-                }
-            }
-        }
-        if(i < params->numberEffects) {
-            computeProgressForVideoEffect(params->timeMs,
-             params->effectsSettings[i].uiStartTime,
-             params->effectsSettings[i].uiDuration, &extProgress);
-
-            err = prepareFramingStructure(&framingCtx,
-                  params->effectsSettings, i, params->overlayFrameRGBBuffer,
-                  params->overlayFrameYUVBuffer);
-
-            if(err == M4NO_ERROR) {
-                err = M4VSS3GPP_externalVideoEffectFraming(
-                      (M4OSA_Void *)&framingCtx, planeIn, planeOut, &extProgress,
-                      M4xVSS_kVideoEffectType_Framing);
-            }
-
-            free(framingCtx.alphaBlendingStruct);
-
-            if(framingCtx.FramingYuv != NULL) {
-                free(framingCtx.FramingYuv);
-                framingCtx.FramingYuv = NULL;
-            }
-            //If prepareFramingStructure / M4VSS3GPP_externalVideoEffectFraming
-            // returned error, then return from function
-            if(err != M4NO_ERROR) {
-
-                if(NULL != finalOutputBuffer) {
-                    free(finalOutputBuffer);
-                    finalOutputBuffer = NULL;
-                }
-                if(NULL != tempOutputBuffer) {
-                    free(tempOutputBuffer);
-                    tempOutputBuffer = NULL;
-                }
-                return err;
-            }
-
-            // The out plane now becomes the in plane for adding other effects
-            swapImagePlanes(planeIn, planeOut,(M4VIFI_UInt8 *)finalOutputBuffer,
-             (M4VIFI_UInt8 *)tempOutputBuffer);
-        }
-    }
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_FADEFROMBLACK) {
-        /* find the effect in effectSettings array*/
-        for(i=0;i<params->numberEffects;i++) {
-            if(params->effectsSettings[i].VideoEffectType ==
-             M4VSS3GPP_kVideoEffectType_FadeFromBlack)
-                break;
-        }
-
-        if(i < params->numberEffects) {
-            computePercentageDone(params->timeMs,
-             params->effectsSettings[i].uiStartTime,
-             params->effectsSettings[i].uiDuration, &percentageDone);
-
-            // Compute where we are in the effect (scale is 0->1024)
-            lum_factor = (M4OSA_Int32)( percentageDone * 1024 );
-            // Apply the darkening effect
-            err = applyLumaEffect(M4VSS3GPP_kVideoEffectType_FadeFromBlack,
-                  planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
-                  (M4VIFI_UInt8 *)tempOutputBuffer, lum_factor);
-            if(err != M4NO_ERROR) {
-                return err;
-            }
-        }
-    }
-
-    if(params->currentVideoEffect & VIDEO_EFFECT_FADETOBLACK) {
-        // Find the effect in effectSettings array
-        for(i=0;i<params->numberEffects;i++) {
-            if(params->effectsSettings[i].VideoEffectType ==
-             M4VSS3GPP_kVideoEffectType_FadeToBlack)
-                break;
-        }
-        if(i < params->numberEffects) {
-            computePercentageDone(params->timeMs,
-             params->effectsSettings[i].uiStartTime,
-             params->effectsSettings[i].uiDuration, &percentageDone);
-
-            // Compute where we are in the effect (scale is 0->1024)
-            lum_factor = (M4OSA_Int32)( (1.0-percentageDone) * 1024 );
-            // Apply the darkening effect
-            err = applyLumaEffect(M4VSS3GPP_kVideoEffectType_FadeToBlack,
-                  planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
-                  (M4VIFI_UInt8 *)tempOutputBuffer, lum_factor);
-            if(err != M4NO_ERROR) {
-                return err;
-            }
-        }
-    }
-
-    ALOGV("doMediaRendering CALL getBuffer()");
-    // Set the output YUV420 plane to be compatible with YV12 format
-    // W & H even
-    // YVU instead of YUV
-    // align buffers on 32 bits
-
-    // Y plane
-    //in YV12 format, sizes must be even
-    M4OSA_UInt32 yv12PlaneWidth = ((params->outVideoWidth +1)>>1)<<1;
-    M4OSA_UInt32 yv12PlaneHeight = ((params->outVideoHeight+1)>>1)<<1;
-
-    prepareYV12ImagePlane(planeOut, yv12PlaneWidth, yv12PlaneHeight,
-     (M4OSA_UInt32)params->outBufferStride, (M4VIFI_UInt8 *)params->pOutBuffer);
-
-    err = applyRenderingMode(planeIn, planeOut, params->renderingMode);
-
-    if(M4OSA_NULL != finalOutputBuffer) {
-        free(finalOutputBuffer);
-        finalOutputBuffer= M4OSA_NULL;
-    }
-    if(M4OSA_NULL != tempOutputBuffer) {
-        free(tempOutputBuffer);
-        tempOutputBuffer = M4OSA_NULL;
-    }
-    if(err != M4NO_ERROR) {
-        ALOGV("doVideoPostProcessing: applyRenderingMode returned err=%d",err);
-        return err;
-    }
-    return M4NO_ERROR;
-}
-
-android::status_t getVideoSizeByResolution(
-                      M4VIDEOEDITING_VideoFrameSize resolution,
-                      uint32_t *pWidth, uint32_t *pHeight) {
-
-    uint32_t frameWidth, frameHeight;
-
-    if (pWidth == NULL) {
-        ALOGE("getVideoFrameSizeByResolution invalid pointer for pWidth");
-        return android::BAD_VALUE;
-    }
-    if (pHeight == NULL) {
-        ALOGE("getVideoFrameSizeByResolution invalid pointer for pHeight");
-        return android::BAD_VALUE;
-    }
-
-    switch (resolution) {
-        case M4VIDEOEDITING_kSQCIF:
-            frameWidth = 128;
-            frameHeight = 96;
-            break;
-
-        case M4VIDEOEDITING_kQQVGA:
-            frameWidth = 160;
-            frameHeight = 120;
-            break;
-
-        case M4VIDEOEDITING_kQCIF:
-            frameWidth = 176;
-            frameHeight = 144;
-            break;
-
-        case M4VIDEOEDITING_kQVGA:
-            frameWidth = 320;
-            frameHeight = 240;
-            break;
-
-        case M4VIDEOEDITING_kCIF:
-            frameWidth = 352;
-            frameHeight = 288;
-            break;
-
-        case M4VIDEOEDITING_kVGA:
-            frameWidth = 640;
-            frameHeight = 480;
-            break;
-
-        case M4VIDEOEDITING_kWVGA:
-            frameWidth = 800;
-            frameHeight = 480;
-            break;
-
-        case M4VIDEOEDITING_kNTSC:
-            frameWidth = 720;
-            frameHeight = 480;
-            break;
-
-        case M4VIDEOEDITING_k640_360:
-            frameWidth = 640;
-            frameHeight = 360;
-            break;
-
-        case M4VIDEOEDITING_k854_480:
-            frameWidth = 854;
-            frameHeight = 480;
-            break;
-
-        case M4VIDEOEDITING_k1280_720:
-            frameWidth = 1280;
-            frameHeight = 720;
-            break;
-
-        case M4VIDEOEDITING_k1080_720:
-            frameWidth = 1080;
-            frameHeight = 720;
-            break;
-
-        case M4VIDEOEDITING_k960_720:
-            frameWidth = 960;
-            frameHeight = 720;
-            break;
-
-        case M4VIDEOEDITING_k1920_1080:
-            frameWidth = 1920;
-            frameHeight = 1080;
-            break;
-
-        default:
-            ALOGE("Unsupported video resolution %d.", resolution);
-            return android::BAD_VALUE;
-    }
-
-    *pWidth = frameWidth;
-    *pHeight = frameHeight;
-
-    return android::OK;
-}
-
-M4VIFI_UInt8 M4VIFI_Rotate90LeftYUV420toYUV420(void* pUserData,
-    M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut) {
-
-    M4VIFI_Int32 plane_number;
-    M4VIFI_UInt32 i,j, u_stride;
-    M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
-
-    /**< Loop on Y,U and V planes */
-    for (plane_number = 0; plane_number < 3; plane_number++) {
-        /**< Get adresses of first valid pixel in input and output buffer */
-        /**< As we have a -90° rotation, first needed pixel is the upper-right one */
-        p_buf_src =
-            &(pPlaneIn[plane_number].pac_data[pPlaneIn[plane_number].u_topleft]) +
-             pPlaneOut[plane_number].u_height - 1 ;
-        p_buf_dest =
-            &(pPlaneOut[plane_number].pac_data[pPlaneOut[plane_number].u_topleft]);
-        u_stride = pPlaneIn[plane_number].u_stride;
-        /**< Loop on output rows */
-        for (i = 0; i < pPlaneOut[plane_number].u_height; i++) {
-            /**< Loop on all output pixels in a row */
-            for (j = 0; j < pPlaneOut[plane_number].u_width; j++) {
-                *p_buf_dest++= *p_buf_src;
-                p_buf_src += u_stride;  /**< Go to the next row */
-            }
-
-            /**< Go on next row of the output frame */
-            p_buf_dest +=
-                pPlaneOut[plane_number].u_stride - pPlaneOut[plane_number].u_width;
-            /**< Go to next pixel in the last row of the input frame*/
-            p_buf_src -=
-                pPlaneIn[plane_number].u_stride * pPlaneOut[plane_number].u_width + 1 ;
-        }
-    }
-
-    return M4VIFI_OK;
-}
-
-M4VIFI_UInt8 M4VIFI_Rotate90RightYUV420toYUV420(void* pUserData,
-    M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut) {
-
-    M4VIFI_Int32 plane_number;
-    M4VIFI_UInt32 i,j, u_stride;
-    M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
-
-    /**< Loop on Y,U and V planes */
-    for (plane_number = 0; plane_number < 3; plane_number++) {
-        /**< Get adresses of first valid pixel in input and output buffer */
-        /**< As we have a +90° rotation, first needed pixel is the left-down one */
-        p_buf_src =
-            &(pPlaneIn[plane_number].pac_data[pPlaneIn[plane_number].u_topleft]) +
-             (pPlaneIn[plane_number].u_stride * (pPlaneOut[plane_number].u_width - 1));
-        p_buf_dest =
-            &(pPlaneOut[plane_number].pac_data[pPlaneOut[plane_number].u_topleft]);
-        u_stride = pPlaneIn[plane_number].u_stride;
-        /**< Loop on output rows */
-        for (i = 0; i < pPlaneOut[plane_number].u_height; i++) {
-            /**< Loop on all output pixels in a row */
-            for (j = 0; j < pPlaneOut[plane_number].u_width; j++) {
-                *p_buf_dest++= *p_buf_src;
-                p_buf_src -= u_stride;  /**< Go to the previous row */
-            }
-
-            /**< Go on next row of the output frame */
-            p_buf_dest +=
-                pPlaneOut[plane_number].u_stride - pPlaneOut[plane_number].u_width;
-            /**< Go to next pixel in the last row of the input frame*/
-            p_buf_src +=
-                pPlaneIn[plane_number].u_stride * pPlaneOut[plane_number].u_width +1 ;
-        }
-    }
-
-    return M4VIFI_OK;
-}
-
-M4VIFI_UInt8 M4VIFI_Rotate180YUV420toYUV420(void* pUserData,
-    M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut) {
-    M4VIFI_Int32 plane_number;
-    M4VIFI_UInt32 i,j;
-    M4VIFI_UInt8 *p_buf_src, *p_buf_dest, temp_pix1;
-
-    /**< Loop on Y,U and V planes */
-    for (plane_number = 0; plane_number < 3; plane_number++) {
-        /**< Get adresses of first valid pixel in input and output buffer */
-        p_buf_src =
-            &(pPlaneIn[plane_number].pac_data[pPlaneIn[plane_number].u_topleft]);
-        p_buf_dest =
-            &(pPlaneOut[plane_number].pac_data[pPlaneOut[plane_number].u_topleft]);
-
-        /**< If pPlaneIn = pPlaneOut, the algorithm will be different */
-        if (p_buf_src == p_buf_dest) {
-            /**< Get Address of last pixel in the last row of the frame */
-            p_buf_dest +=
-                pPlaneOut[plane_number].u_stride*(pPlaneOut[plane_number].u_height-1) +
-                 pPlaneOut[plane_number].u_width - 1;
-
-            /**< We loop (height/2) times on the rows.
-             * In case u_height is odd, the row at the middle of the frame
-             * has to be processed as must be mirrored */
-            for (i = 0; i < ((pPlaneOut[plane_number].u_height)>>1); i++) {
-                for (j = 0; j < pPlaneOut[plane_number].u_width; j++) {
-                    temp_pix1= *p_buf_dest;
-                    *p_buf_dest--= *p_buf_src;
-                    *p_buf_src++ = temp_pix1;
-                }
-                /**< Go on next row in top of frame */
-                p_buf_src +=
-                    pPlaneOut[plane_number].u_stride - pPlaneOut[plane_number].u_width;
-                /**< Go to the last pixel in previous row in bottom of frame*/
-                p_buf_dest -=
-                    pPlaneOut[plane_number].u_stride - pPlaneOut[plane_number].u_width;
-            }
-
-            /**< Mirror middle row in case height is odd */
-            if ((pPlaneOut[plane_number].u_height%2)!= 0) {
-                p_buf_src =
-                    &(pPlaneOut[plane_number].pac_data[pPlaneIn[plane_number].u_topleft]);
-                p_buf_src +=
-                    pPlaneOut[plane_number].u_stride*(pPlaneOut[plane_number].u_height>>1);
-                p_buf_dest =
-                    p_buf_src + pPlaneOut[plane_number].u_width;
-
-                /**< We loop u_width/2 times on this row.
-                 *  In case u_width is odd, the pixel at the middle of this row
-                 * remains unchanged */
-                for (j = 0; j < (pPlaneOut[plane_number].u_width>>1); j++) {
-                    temp_pix1= *p_buf_dest;
-                    *p_buf_dest--= *p_buf_src;
-                    *p_buf_src++ = temp_pix1;
-                }
-            }
-        } else {
-            /**< Get Address of last pixel in the last row of the output frame */
-            p_buf_dest +=
-                pPlaneOut[plane_number].u_stride*(pPlaneOut[plane_number].u_height-1) +
-                 pPlaneIn[plane_number].u_width - 1;
-
-            /**< Loop on rows */
-            for (i = 0; i < pPlaneOut[plane_number].u_height; i++) {
-                for (j = 0; j < pPlaneOut[plane_number].u_width; j++) {
-                    *p_buf_dest--= *p_buf_src++;
-                }
-
-                /**< Go on next row in top of input frame */
-                p_buf_src +=
-                    pPlaneIn[plane_number].u_stride - pPlaneOut[plane_number].u_width;
-                /**< Go to last pixel of previous row in bottom of input frame*/
-                p_buf_dest -=
-                    pPlaneOut[plane_number].u_stride - pPlaneOut[plane_number].u_width;
-            }
-        }
-    }
-
-    return M4VIFI_OK;
-}
-
-M4OSA_ERR applyVideoRotation(M4OSA_Void* pBuffer, M4OSA_UInt32 width,
-                             M4OSA_UInt32 height, M4OSA_UInt32 rotation) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4VIFI_ImagePlane planeIn[3], planeOut[3];
-
-    if (pBuffer == M4OSA_NULL) {
-        ALOGE("applyVideoRotation: NULL input frame");
-        return M4ERR_PARAMETER;
-    }
-    M4OSA_UInt8* outPtr = (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(
-     (width*height*1.5), M4VS, (M4OSA_Char*)("rotation out ptr"));
-    if (outPtr == M4OSA_NULL) {
-        return M4ERR_ALLOC;
-    }
-
-    // In plane
-    prepareYUV420ImagePlane(planeIn, width,
-        height, (M4VIFI_UInt8 *)pBuffer, width, height);
-
-    // Out plane
-    if (rotation != 180) {
-        prepareYUV420ImagePlane(planeOut, height,
-            width, outPtr, height, width);
-    }
-
-    switch(rotation) {
-        case 90:
-            M4VIFI_Rotate90RightYUV420toYUV420(M4OSA_NULL, planeIn, planeOut);
-            memcpy(pBuffer, (void *)outPtr, (width*height*1.5));
-            break;
-
-        case 180:
-            // In plane rotation, so planeOut = planeIn
-            M4VIFI_Rotate180YUV420toYUV420(M4OSA_NULL, planeIn, planeIn);
-            break;
-
-        case 270:
-            M4VIFI_Rotate90LeftYUV420toYUV420(M4OSA_NULL, planeIn, planeOut);
-            memcpy(pBuffer, (void *)outPtr, (width*height*1.5));
-            break;
-
-        default:
-            ALOGE("invalid rotation param %d", (int)rotation);
-            err = M4ERR_PARAMETER;
-            break;
-    }
-
-    free((void *)outPtr);
-    return err;
-
-}
-
diff --git a/libvideoeditor/lvpp/VideoEditorTools.h b/libvideoeditor/lvpp/VideoEditorTools.h
deleted file mode 100755
index 9b464da..0000000
--- a/libvideoeditor/lvpp/VideoEditorTools.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_VE_TOOLS_H
-#define ANDROID_VE_TOOLS_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Debug.h"
-#include "M4VIFI_FiltersAPI.h"
-/* Macro definitions */
-#include "M4VIFI_Defines.h"
-/* Clip table declaration */
-#include "M4VIFI_Clip.h"
-#include "M4VFL_transition.h"
-#include "M4VSS3GPP_API.h"
-#include "M4xVSS_API.h"
-#include "M4xVSS_Internal.h"
-#include "M4AIR_API.h"
-#include "PreviewRenderer.h"
-
-#define MEDIA_RENDERING_INVALID 255
-#define TRANSPARENT_COLOR 0x7E0
-#define LUM_FACTOR_MAX 10
-enum {
-    VIDEO_EFFECT_NONE               = 0,
-    VIDEO_EFFECT_BLACKANDWHITE      = 1,
-    VIDEO_EFFECT_PINK               = 2,
-    VIDEO_EFFECT_GREEN              = 4,
-    VIDEO_EFFECT_SEPIA              = 8,
-    VIDEO_EFFECT_NEGATIVE           = 16,
-    VIDEO_EFFECT_FRAMING            = 32,
-    VIDEO_EFFECT_FIFTIES            = 64,
-    VIDEO_EFFECT_COLOR_RGB16        = 128,
-    VIDEO_EFFECT_GRADIENT           = 256,
-    VIDEO_EFFECT_FADEFROMBLACK      = 512,
-    VIDEO_EFFECT_FADETOBLACK        = 2048,
-};
-
-typedef struct {
-    M4VIFI_UInt8 *vidBuffer;
-    M4OSA_UInt32 videoWidth;
-    M4OSA_UInt32 videoHeight;
-    M4OSA_UInt32 timeMs;
-    M4OSA_UInt32 timeOffset; //has the duration of clips played.
-                             //The flag shall be used for Framing.
-    M4VSS3GPP_EffectSettings* effectsSettings;
-    M4OSA_UInt32 numberEffects;
-    M4OSA_UInt32 outVideoWidth;
-    M4OSA_UInt32 outVideoHeight;
-    M4OSA_UInt32 currentVideoEffect;
-    M4OSA_Bool isFiftiesEffectStarted;
-    M4xVSS_MediaRendering renderingMode;
-    uint8_t *pOutBuffer;
-    size_t outBufferStride;
-    M4VIFI_UInt8*  overlayFrameRGBBuffer;
-    M4VIFI_UInt8*  overlayFrameYUVBuffer;
-} vePostProcessParams;
-
-M4VIFI_UInt8 M4VIFI_YUV420PlanarToYUV420Semiplanar(void *user_data, M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut );
-M4VIFI_UInt8 M4VIFI_SemiplanarYUV420toYUV420(void *user_data, M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut );
-
-M4OSA_ERR M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext, M4VIFI_ImagePlane *PlaneIn,
-                                                    M4VIFI_ImagePlane *PlaneOut,M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiEffectKind);
-
-M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn[3], M4VIFI_ImagePlane *PlaneOut, M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiEffectKind );
-
-M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties( M4OSA_Void *pUserData, M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut, M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiEffectKind );
-
-unsigned char M4VFL_modifyLumaWithScale(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out, unsigned long lum_factor, void *user_data);
-
-M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx);
-M4VIFI_UInt8    M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
-                                                      M4VIFI_ImagePlane *pPlaneOut);
-
-M4OSA_ERR M4xVSS_internalConvertRGB888toYUV(M4xVSS_FramingStruct* framingCtx);
-M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3]);
-
-/*+ Handle the image files here */
-M4OSA_ERR LvGetImageThumbNail(const char *fileName, M4OSA_UInt32 height, M4OSA_UInt32 width, M4OSA_Void **pBuffer);
-/*- Handle the image files here */
-
-M4OSA_ERR applyRenderingMode(M4VIFI_ImagePlane* pPlaneIn, M4VIFI_ImagePlane* pPlaneOut, M4xVSS_MediaRendering mediaRendering);
-
-
-M4VIFI_UInt8 M4VIFI_YUV420toYUV420(void *user_data, M4VIFI_ImagePlane PlaneIn[3], M4VIFI_ImagePlane *PlaneOut );
-M4VIFI_UInt8    M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
-                                                                M4VIFI_ImagePlane *pPlaneIn,
-                                                                M4VIFI_ImagePlane *pPlaneOut);
-
-M4OSA_Void prepareYUV420ImagePlane(M4VIFI_ImagePlane *plane,
-    M4OSA_UInt32 width, M4OSA_UInt32 height, M4VIFI_UInt8 *buffer,
-    M4OSA_UInt32 reportedWidth, M4OSA_UInt32 reportedHeight);
-
-M4OSA_Void prepareYV12ImagePlane(M4VIFI_ImagePlane *plane,
-    M4OSA_UInt32 width, M4OSA_UInt32 height, M4OSA_UInt32 stride, M4VIFI_UInt8 *buffer);
-
-M4OSA_Void swapImagePlanes(
-    M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
-    M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2);
-
-M4OSA_Void computePercentageDone(
-     M4OSA_UInt32 ctsMs, M4OSA_UInt32 effectStartTimeMs,
-     M4OSA_UInt32 effectDuration, M4OSA_Double *percentageDone);
-
-M4OSA_Void computeProgressForVideoEffect(
-     M4OSA_UInt32 ctsMs, M4OSA_UInt32 effectStartTimeMs,
-     M4OSA_UInt32 effectDuration, M4VSS3GPP_ExternalProgress* extProgress);
-
-M4OSA_ERR prepareFramingStructure(
-    M4xVSS_FramingStruct* framingCtx,
-    M4VSS3GPP_EffectSettings* effectsSettings, M4OSA_UInt32 index,
-    M4VIFI_UInt8* overlayRGB, M4VIFI_UInt8* overlayYUV);
-
-M4OSA_ERR applyColorEffect(M4xVSS_VideoEffectType colorEffect,
-    M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
-    M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2, M4OSA_UInt16 rgbColorData);
-
-M4OSA_ERR applyLumaEffect(M4VSS3GPP_VideoEffectType videoEffect,
-    M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
-    M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2, M4OSA_Int32 lum_factor);
-
-M4OSA_ERR applyEffectsAndRenderingMode(vePostProcessParams *params,
-    M4OSA_UInt32 reportedWidth, M4OSA_UInt32 reportedHeight);
-
-android::status_t getVideoSizeByResolution(M4VIDEOEDITING_VideoFrameSize resolution,
-    uint32_t *pWidth, uint32_t *pHeight);
-
-M4VIFI_UInt8 M4VIFI_Rotate90LeftYUV420toYUV420(void* pUserData,
-    M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-M4VIFI_UInt8 M4VIFI_Rotate90RightYUV420toYUV420(void* pUserData,
-    M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-M4VIFI_UInt8 M4VIFI_Rotate180YUV420toYUV420(void* pUserData,
-    M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-M4OSA_ERR applyVideoRotation(M4OSA_Void* pBuffer,
-    M4OSA_UInt32 width, M4OSA_UInt32 height, M4OSA_UInt32 rotation);
-#endif // ANDROID_VE_TOOLS_H
diff --git a/libvideoeditor/osal/Android.mk b/libvideoeditor/osal/Android.mk
deleted file mode 100755
index 5053e7d..0000000
--- a/libvideoeditor/osal/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/osal/inc/LVOSA_FileReader_optim.h b/libvideoeditor/osal/inc/LVOSA_FileReader_optim.h
deleted file mode 100755
index 237376d..0000000
--- a/libvideoeditor/osal/inc/LVOSA_FileReader_optim.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file         M4OSA_FileReader_optim.h
- * @brief        File reader for Symbian
- * @note         This file declares functions and types to read a file.
- ******************************************************************************
-*/
-
-
-
-#ifndef M4OSA_FILEREADER_OPTIM_H
-#define M4OSA_FILEREADER_OPTIM_H
-
-#define M4OSA_READER_OPTIM_USE_OSAL_IF
-
-/**/
-#ifndef M4OSA_READER_OPTIM_USE_OSAL_IF
-    typedef struct
-    {
-        M4OSA_Void*        (*pFctPtr_Open)( M4OSA_Void* fd,
-                                            M4OSA_UInt32 FileModeAccess,
-                                            M4OSA_UInt16* errno );
-        M4OSA_FilePosition (*pFctPtr_Read)( M4OSA_Void* fd,
-                                            M4OSA_UInt8* data,
-                                            M4OSA_FilePosition size,
-                                            M4OSA_UInt16* errno );
-        M4OSA_FilePosition (*pFctPtr_Seek)( M4OSA_Void* fd,
-                                            M4OSA_FilePosition pos,
-                                            M4OSA_FileSeekAccessMode mode,
-                                            M4OSA_UInt16* errno );
-        M4OSA_FilePosition (*pFctPtr_Tell)( M4OSA_Void* fd,
-                                            M4OSA_UInt16* errno );
-        M4OSA_Int32        (*pFctPtr_Close)( M4OSA_Void* fd,
-                                             M4OSA_UInt16* errno );
-        M4OSA_Void         (*pFctPtr_AccessType)( M4OSA_UInt32 FileModeAccess_In,
-                                                  M4OSA_Void* FileModeAccess_Out );
-
-    } M4OSA_FileSystem_FctPtr;
-#endif
-/**/
-
-
-/* Reader API : bufferized functions */
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    M4OSA_ERR M4OSA_fileReadOpen_optim( M4OSA_Context* context,
-                                        M4OSA_Void* fileDescriptor,
-                                        M4OSA_UInt32 fileModeAccess);
-#else
-    M4OSA_ERR M4OSA_fileReadOpen_optim( M4OSA_Context* context,
-                                        M4OSA_Void* fileDescriptor,
-                                        M4OSA_UInt32 fileModeAccess,
-                                        M4OSA_FileSystem_FctPtr *FS);
-#endif
-
-M4OSA_ERR M4OSA_fileReadData_optim( M4OSA_Context context,
-                                    M4OSA_MemAddr8 buffer,
-                                    M4OSA_UInt32* size );
-M4OSA_ERR M4OSA_fileReadSeek_optim( M4OSA_Context context,
-                                    M4OSA_FileSeekAccessMode seekMode,
-                                    M4OSA_FilePosition* position );
-M4OSA_ERR M4OSA_fileReadClose_optim( M4OSA_Context context );
-M4OSA_ERR M4OSA_fileReadGetOption_optim( M4OSA_Context context,
-                                         M4OSA_FileReadOptionID optionID,
-                                         M4OSA_DataOption *optionValue );
-M4OSA_ERR M4OSA_fileReadSetOption_optim( M4OSA_Context context,
-                                         M4OSA_FileReadOptionID optionID,
-                                         M4OSA_DataOption optionValue );
-
-#endif /* M4OSA_FILEREADER_OPTIM_H */
diff --git a/libvideoeditor/osal/inc/LV_Macros.h b/libvideoeditor/osal/inc/LV_Macros.h
deleted file mode 100755
index b8d7e85..0000000
--- a/libvideoeditor/osal/inc/LV_Macros.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*******************************************************************************
-* @file        LV_Macros.h
-* @par        NXP Software
-* @brief    Macros definition for Smartphone team
-*******************************************************************************/
-
-#ifndef LV_MACROS_H
-#define LV_MACROS_H
-
-/*------------*/
-/*    INCLUDES  */
-/*------------*/
-#include "M4OSA_Memory.h"
-#include "M4OSA_Debug.h"
-
-/******************************************************************************
-*
-* CHECK_PTR(fct, p, err, errValue)
-* @note    This macro checks the value p. If it is NULL, it sets the variable err
-*           to errValue and jumps to the label <fct>_cleanUp. A trace is displayed
-*           signalling the error, the function name and the line number.
-*
-******************************************************************************/
-#define CHECK_PTR(fct, p, err, errValue) \
-{ \
-    if(M4OSA_NULL == (p)) \
-    { \
-        (err) = (errValue) ; \
-        M4OSA_TRACE1_1((M4OSA_Char*)"" #fct "(L%d): " #p " is NULL, returning " #errValue "",__LINE__) ; \
-        goto fct##_cleanUp; \
-    } \
-}
-
-/******************************************************************************
-*
-* CHECK_ERR(fct, err)
-* @note    This macro checks the value err. If it is not NULL, a trace is displayed
-*           signalling the error, the function name and the line number. The macro
-*           jumps to the label <fct>_cleanUp.
-*
-******************************************************************************/
-#define CHECK_ERR(fct, err) \
-{ \
-    if(M4NO_ERROR != (err)) \
-    { \
-        M4OSA_TRACE1_2((M4OSA_Char*)"!!! " #fct "(L%d): ERROR 0x%.8x returned",\
-                                                               __LINE__,err) ; \
-        goto fct##_cleanUp; \
-    } \
-}
-
-
-/******************************************************************************
-*
-* CHECK_ERR(fct, err)
-* @note    This macro compares a current state with a state value. If they are different,
-*           err is set to M4ERR_STATE.
-*           A trace is displayed signalling the error, the function name and the line number.
-*           The macro jumps to the label <fct>_cleanUp.
-*
-******************************************************************************/
-#define    CHECK_STATE(fct, stateValue, state) \
-{ \
-    if((stateValue) != (state)) \
-    { \
-        M4OSA_TRACE1_1("" #fct " called in bad state %d", state) ; \
-        (err) = M4ERR_STATE ; \
-        goto fct##_cleanUp; \
-    } \
-}
-
-/******************************************************************************
-*
-* SAFE_FREE(p)
-* @note    This macro checks the value of p is not NULL. If it is NULL, it does
-*           nothing. Else, p is de allocated and set to NULL.
-*
-******************************************************************************/
-#define SAFE_FREE(p) \
-{ \
-    if(M4OSA_NULL != (p)) \
-    { \
-        free((p)) ; \
-        (p) = M4OSA_NULL ; \
-    } \
-}
-
-
-
-#endif /*---  LV_MACROS_H ---*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_CharStar.h b/libvideoeditor/osal/inc/M4OSA_CharStar.h
deleted file mode 100755
index 06316f0..0000000
--- a/libvideoeditor/osal/inc/M4OSA_CharStar.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_CharStar.h
- * @ingroup
- * @brief        external API of the Char Star set of functions.
- ************************************************************************
-*/
-
-#ifndef M4OSA_CHARSTAR_H
-#define M4OSA_CHARSTAR_H
-
-/* general OSAL types and prototypes inclusion                      */
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Time.h"
-#include "M4OSA_FileCommon.h"
-
-/* types definition                                                          */
-typedef enum
-{
-   M4OSA_kchrDec  = 0x01,
-   M4OSA_kchrHexa = 0x02,
-   M4OSA_kchrOct  = 0x03
-} M4OSA_chrNumBase;
-
-/* error and warning codes                                                   */
-#define M4ERR_CHR_STR_OVERFLOW M4OSA_ERR_CREATE(M4_ERR,M4OSA_CHARSTAR,0x000001)
-#define M4ERR_CHR_CONV_FAILED  M4OSA_ERR_CREATE(M4_ERR,M4OSA_CHARSTAR,0x000002)
-#define M4WAR_CHR_NOT_FOUND    M4OSA_ERR_CREATE(M4_WAR,M4OSA_CHARSTAR,0x000001)
-#define M4WAR_CHR_NUM_RANGE    M4OSA_ERR_CREATE(M4_WAR,M4OSA_CHARSTAR,0x000002)
-#define M4WAR_CHR_NEGATIVE     M4OSA_ERR_CREATE(M4_WAR,M4OSA_CHARSTAR,0x000003)
-
-/* prototypes of the Char Star functions                                     */
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-M4OSAL_CHARSTAR_EXPORT_TYPE M4OSA_ERR M4OSA_chrNCopy          (M4OSA_Char   *strOut,
-                                   M4OSA_Char   *strIn,
-                                   M4OSA_UInt32 len2Copy);
-M4OSAL_CHARSTAR_EXPORT_TYPE M4OSA_ERR M4OSA_chrAreIdentical   (M4OSA_Char   *strIn1,
-                                   M4OSA_Char   *strIn2,
-                                   M4OSA_Bool  *result);
-M4OSAL_CHARSTAR_EXPORT_TYPE M4OSA_ERR M4OSA_chrGetUInt32      (M4OSA_Char   *strIn,
-                                   M4OSA_UInt32 *val,
-                                   M4OSA_Char   **strOut,
-                                   M4OSA_chrNumBase base);
-M4OSAL_CHARSTAR_EXPORT_TYPE M4OSA_ERR M4OSA_chrGetUInt16      (M4OSA_Char   *strIn,
-                                   M4OSA_UInt16 *val,
-                                   M4OSA_Char   **strOut,
-                                   M4OSA_chrNumBase base);
-M4OSAL_CHARSTAR_EXPORT_TYPE M4OSA_ERR M4OSA_chrSPrintf         (M4OSA_Char  *strOut,
-                                   M4OSA_UInt32 strOutMaxLen,
-                                   M4OSA_Char   *format,
-                                   ...);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Clock.h b/libvideoeditor/osal/inc/M4OSA_Clock.h
deleted file mode 100755
index 52ea696..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Clock.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Clock.h
- * @ingroup      OSAL
- * @brief        clock API
- ************************************************************************
-*/
-
-#ifndef M4OSA_CLOCK_H
-#define M4OSA_CLOCK_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Time.h"
-
-
-#define M4WAR_TIMESCALE_TOO_BIG    M4OSA_ERR_CREATE(M4_WAR,M4OSA_CLOCK,0x000001) /**< Time precision too high for the system*/
-#define M4ERR_CLOCK_BAD_REF_YEAR   M4OSA_ERR_CREATE(M4_ERR,M4OSA_CLOCK,0x000001) /**< Input year of reference is neither 1900, nor 1970 nor 2000*/
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-M4OSAL_CLOCK_EXPORT_TYPE M4OSA_ERR M4OSA_clockGetTime(M4OSA_Time* pTime,
-                                                M4OSA_UInt32 timescale);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*M4OSA_CLOCK_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_CoreID.h b/libvideoeditor/osal/inc/M4OSA_CoreID.h
deleted file mode 100755
index 9172800..0000000
--- a/libvideoeditor/osal/inc/M4OSA_CoreID.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file   M4OSA_CoreID.h
- * @brief  defines the uniques component identifiers used for memory management
- *         and optionID mechanism
- * @note
- ************************************************************************
-*/
-#ifndef __M4OSA_COREID_H__
-#define __M4OSA_COREID_H__
-
-/* CoreId are defined on 14 bits */
-/* we start from 0x0100, lower values are reserved for osal core components */
-
-/* reader shells*/
-#define M4READER_COMMON     0x0100
-#define M4READER_AVI        0x0101
-#define M4READER_AMR        0x0102
-#define M4READER_3GP        0x0103
-#define M4READER_NET        0x0104
-#define M4READER_3GP_HTTP   0x0105
-#define M4READER_MP3        0x0106
-#define M4READER_WAV        0x0107
-#define M4READER_MIDI       0x0108
-#define M4READER_ASF        0x0109
-#define M4READER_REAL        0x010A
-#define M4READER_AAC        0x010B
-#define M4READER_FLEX        0x010C
-#define M4READER_BBA        0x010D
-#define M4READER_SYNTHESIS_AUDIO    0x010E
-#define M4READER_JPEG        0x010F
-
-
-/* writer shells*/
-#define M4WRITER_COMMON     0x0110
-#define M4WRITER_AVI        0x0111
-#define M4WRITER_AMR        0x0112
-#define M4WRITER_3GP        0x0113
-#define M4WRITER_JPEG        0x0116
-#define M4WRITER_MP3        0x0117
-
-/* decoder shells */
-#define M4DECODER_COMMON    0x0120
-#define M4DECODER_JPEG      0x0121
-#define M4DECODER_MPEG4     0x0122
-#define M4DECODER_AUDIO     0x0123
-#define M4DECODER_AVC       0x0124
-#define M4DECODER_MIDI      0x0125
-#define M4DECODER_WMA        0x0126
-#define M4DECODER_WMV        0x0127
-#define M4DECODER_RMV        0x0128
-#define M4DECODER_RMA        0x0129
-#define M4DECODER_AAC       0x012A
-#define M4DECODER_BEATBREW  0x012B
-#define M4DECODER_EXTERNAL  0x012C
-
-/* encoder shells */
-#define M4ENCODER_COMMON    0x0130
-#define M4ENCODER_JPEG      0x0131
-#define M4ENCODER_MPEG4     0x0132
-#define M4ENCODER_AUDIO     0x0133
-#define M4ENCODER_VID_NULL  0x0134
-#define M4ENCODER_MJPEG        0x0135
-#define M4ENCODER_MP3        0x0136
-#define M4ENCODER_H264        0x0137
-#define M4ENCODER_AAC        0x0138
-#define M4ENCODER_AMRNB        0x0139
-#define M4ENCODER_AUD_NULL  0x013A
-#define M4ENCODER_EXTERNAL  0x013B
-
-/* cores */
-#define M4JPG_DECODER       0x0140
-#define M4JPG_ENCODER       0x0141
-
-#define M4MP4_DECODER       0x0142
-#define M4MP4_ENCODER       0x0143
-
-#define M4AVI_COMMON        0x0144
-#define M4AVI_READER        0x0145
-#define M4AVI_WRITER        0x0146
-
-#define M4HTTP_ENGINE       0x0147
-
-#define M4OSA_TMPFILE       0x0148
-#define M4TOOL_TIMER        0x0149
-
-#define M4AMR_READER        0x014A
-
-#define M4MP3_READER        0x014B
-
-#define M4WAV_READER        0x014C
-#define M4WAV_WRITER        0x014D
-#define M4WAV_COMMON        0x014E
-
-#define M4ADTS_READER        0x014F
-#define M4ADIF_READER        0x016A
-
-#define M4SPS               0x0150
-#define M4EXIF_DECODER      0x0151
-#define M4EXIF_ENCODER      0x0152
-#define M4GIF_DECODER       0x0153
-#define M4GIF_ENCODER       0x0154
-#define M4PNG_DECODER       0x0155
-#define M4PNG_ENCODER       0x0156
-#define M4WBMP_DECODER      0x0157
-#define M4WBMP_ENCODER      0x0158
-
-#define M4AMR_WRITER        0x0159    /**< no room to put it along M4AMR_READER */
-
-
-#define M4AVC_DECODER       0x015A
-#define M4AVC_ENCODER       0x015B
-
-#define M4ASF_READER        0x015C
-#define M4WMDRM_AGENT        0x015D
-#define M4MIDI_READER        0x0162    /**< no room before the presenters */
-#define M4RM_READER         0x163
-#define M4RMV_DECODER        0x164
-#define M4RMA_DECODER        0x165
-
-#define M4TOOL_XML            0x0166
-#define M4TOOL_EFR            0x0167    /**< Decryption module for Video Artist */
-#define M4IAL_FTN            0x0168    /* FTN implementation of the IAL */
-#define M4FTN                0x0169    /* FTN library */
-
-/* presenter */
-#define M4PRESENTER_AUDIO   0x0160
-#define M4PRESENTER_VIDEO   0x0161
-
-/* high level interfaces (vps, etc..)*/
-#define M4VPS               0x0170
-#define M4VTS               0x0171
-#define M4VXS               0x0172
-#define M4CALLBACK          0x0173
-#define M4VES               0x0174
-#define M4PREPROCESS_VIDEO  0x0175
-#define M4GRAB_AUDIO        0x0176
-#define M4GRAB_VIDEO        0x0177
-#define M4VSSAVI            0x0178
-#define M4VSS3GPP           0x0179
-#define M4PTO3GPP           0x017A
-#define M4PVX_PARSER        0x017B
-#define M4VCS                0x017C
-#define M4MCS                0x017D
-#define M4MNMC                0x0180    /**< mnm controller */
-#define M4TTEXT_PARSER      0x0181    /**< timed text */
-#define M4MM                0x0182    /**< Music manager */
-#define M4MDP                0x0183    /**< Metadata parser */
-#define M4MMSQLCORE            0x0184
-#define M4VPSIL                0x0185
-#define M4FILEIL            0x0186 /* IL file Interface */
-#define M4MU                0x0187
-#define M4VEE                0x0188  /**< Video effect engine */
-#define M4VA                0x0189 /* VideoArtist */
-#define M4JTS                0x018A
-#define M4JTSIL                0x018B
-#define M4AIR                0x018C  /**< AIR */
-#define M4SPE                0x018D  /**< Still picture editor */
-#define M4VS                0x018E    /**< Video Studio (xVSS) */
-#define M4VESIL                0x018F    /**< VES il */
-#define M4ID3                0x0190    /**< ID3 Tag Module */
-#define M4SC                0x0191    /**< Media Scanner */
-#define M4TG                0x0192  /**< Thumbnail Generator*/
-#define M4TS                0x0193    /**< Thumbnail storage */
-#define M4MB                0x0194    /**< Media browser */
-
-/* high level application (test or client app) */
-#define M4APPLI             0x0200
-#define M4VA_APPLI            0x0201    /**< Video Artist test application */
-
-/* external components (HW video codecs, etc.) */
-#define M4VD_EXTERNAL        0x0300
-#define M4VE_EXTERNAL        0x0301
-
-
-/* priority to combine with module ids */
-#define M4HIGH_PRIORITY     0xC000
-#define M4MEDIUM_PRIORITY   0x8000
-#define M4LOW_PRIORITY      0x4000
-#define M4DEFAULT_PRIORITY  0x0000
-
-
-#endif /*__M4OSA_COREID_H__*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Debug.h b/libvideoeditor/osal/inc/M4OSA_Debug.h
deleted file mode 100755
index 826ab51..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Debug.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Debug.h
- * @brief        Debug and Trace Macro
- ************************************************************************
-*/
-
-
-#ifndef _M4OSA_DEBUG_H_
-#define _M4OSA_DEBUG_H_
-
-#include "M4OSA_Error.h"
-#include "M4OSA_Types.h"
-
-
-/* defaut value, defined only if not defined already. */
-#ifndef M4TRACE_ID
-#define M4TRACE_ID M4UNKNOWN_COREID
-#endif /* M4TRACE_ID undefined */
-
-
-#define M4OSA_SUPER_DEBUG_LEVEL 0
-
-#ifndef M4OSA_DEBUG_LEVEL
-#define M4OSA_DEBUG_LEVEL       0
-#endif
-
-
-#define M4OSA_SUPER_TRACE_LEVEL 0
-
-#ifndef M4OSA_TRACE_LEVEL
-#define M4OSA_TRACE_LEVEL       0
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-
-#if (M4OSA_DEBUG_LEVEL >= 1) || (M4OSA_SUPER_DEBUG_LEVEL >= 1)
-
-/* Debug macros */
-extern M4OSA_Void M4OSA_DEBUG_traceFunction(M4OSA_UInt32 line,
-                                            M4OSA_Char* fileName,
-                                            M4OSA_UInt32 level,
-                                            M4OSA_Char* stringCondition,
-                                            M4OSA_Char* message,
-                                            M4OSA_ERR returnedError);
-
-
-#define M4OSA_DEBUG_IFx(cond, errorCode, msg, level)\
-      if(cond)\
-      {\
-         M4OSA_DEBUG_traceFunction(__LINE__, (M4OSA_Char*)__FILE__, level,\
-                                   (M4OSA_Char*)#cond, (M4OSA_Char*)msg,\
-                                   (errorCode));\
-         return(errorCode);\
-      }
-
-#define M4OSA_DEBUG(errorCode, msg)\
-         M4OSA_DEBUG_traceFunction(__LINE__, (M4OSA_Char*)__FILE__, 1,\
-                                   (M4OSA_Char*)#errorCode, (M4OSA_Char*)msg,\
-                                   (errorCode));
-
-#else /*(M4OSA_DEBUG_LEVEL >= 1) || (M4OSA_SUPER_DEBUG_LEVEL >= 1)*/
-
-
-#define M4OSA_DEBUG(errorCode, msg)
-
-#endif /*(M4OSA_DEBUG_LEVEL >= 1) || (M4OSA_SUPER_DEBUG_LEVEL >= 1)*/
-
-
-
-#if (M4OSA_DEBUG_LEVEL >= 1) || (M4OSA_SUPER_DEBUG_LEVEL >= 1)
- #define M4OSA_DEBUG_IF1(cond, errorCode, msg)\
-         M4OSA_DEBUG_IFx(cond, errorCode, msg, 1)
-#else
- #define M4OSA_DEBUG_IF1(cond, errorCode, msg)
-#endif /*(M4OSA_DEBUG_LEVEL >= 1) || (M4OSA_SUPER_DEBUG_LEVEL >= 1)*/
-
-
-#if (M4OSA_DEBUG_LEVEL >= 2) || (M4OSA_SUPER_DEBUG_LEVEL >= 2)
- #define M4OSA_DEBUG_IF2(cond, errorCode, msg)\
-         M4OSA_DEBUG_IFx(cond, errorCode, msg, 2)
-#else
- #define M4OSA_DEBUG_IF2(cond, errorCode, msg)
-#endif /*(M4OSA_DEBUG_LEVEL >= 2) || (M4OSA_SUPER_DEBUG_LEVEL >= 2)*/
-
-
-#if (M4OSA_DEBUG_LEVEL >= 3) || (M4OSA_SUPER_DEBUG_LEVEL >= 3)
- #define M4OSA_DEBUG_IF3(cond, errorCode, msg)\
-         M4OSA_DEBUG_IFx(cond, errorCode, msg, 3)
-#else
- #define M4OSA_DEBUG_IF3(cond, errorCode, msg)
-#endif /*(M4OSA_DEBUG_LEVEL >= 3) || (M4OSA_SUPER_DEBUG_LEVEL >= 3)*/
-
-
-
-/* Trace macros */
-
-#if (M4OSA_TRACE_LEVEL >= 1) || (M4OSA_SUPER_TRACE_LEVEL >= 1)
-
-extern M4OSA_Void M4OSA_TRACE_traceFunction(M4OSA_UInt32 line,
-                                            M4OSA_Char* fileName,
-                                            M4OSA_CoreID coreID,
-                                            M4OSA_UInt32 level,
-                                            M4OSA_Char* stringMsg,
-                                            ... );
-
-
-
-#define M4OSA_TRACEx_0(msg, level)\
-      M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
-                             (M4OSA_CoreID)M4TRACE_ID, level, (M4OSA_Char*)msg);
-
-
-#define M4OSA_TRACEx_1(msg, param1, level)\
-      M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
-                     (M4OSA_CoreID)M4TRACE_ID, level, (M4OSA_Char*)msg, param1);
-
-
-#define M4OSA_TRACEx_2(msg, param1, param2, level)\
-      M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
-                                (M4OSA_CoreID)M4TRACE_ID, level,\
-                                (M4OSA_Char*)msg, param1,\
-                                param2);
-
-
-#define M4OSA_TRACEx_3(msg, param1, param2, param3, level)\
-      M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
-                                (M4OSA_CoreID)M4TRACE_ID, level, (M4OSA_Char*)msg,\
-                                param1,param2, param3);
-
-
-#define M4OSA_TRACEx_4(msg, param1, param2, param3, param4, level)\
-      M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
-                                (M4OSA_CoreID)M4TRACE_ID, level,\
-                                (M4OSA_Char*)msg, param1,\
-                                param2, param3, param4);
-
-
-#define M4OSA_TRACEx_5(msg, param1, param2, param3, param4, param5, level)\
-      M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
-                                (M4OSA_CoreID)M4TRACE_ID, level,\
-                                (M4OSA_Char*)msg, param1,\
-                                param2, param3, param4, param5);
-
-#endif /*(M4OSA_TRACE_LEVEL >= 1) || (M4OSA_SUPER_TRACE_LEVEL >= 1)*/
-
-
-
-#if (M4OSA_TRACE_LEVEL >= 1) || (M4OSA_SUPER_TRACE_LEVEL >= 1)
-#define M4OSA_TRACE1_0(msg)\
-        M4OSA_TRACEx_0(msg, 1)
-
-#define M4OSA_TRACE1_1(msg, param1)\
-        M4OSA_TRACEx_1(msg, param1, 1)
-
-#define M4OSA_TRACE1_2(msg, param1, param2)\
-        M4OSA_TRACEx_2(msg, param1, param2, 1)
-
-#define M4OSA_TRACE1_3(msg, param1, param2, param3)\
-        M4OSA_TRACEx_3(msg, param1, param2, param3, 1)
-
-#define M4OSA_TRACE1_4(msg, param1, param2, param3, param4)\
-        M4OSA_TRACEx_4(msg, param1, param2, param3, param4, 1)
-
-#define M4OSA_TRACE1_5(msg, param1, param2, param3, param4, param5)\
-        M4OSA_TRACEx_5(msg, param1, param2, param3, param4, param5, 1)
-
-#else /*(M4OSA_TRACE_LEVEL >= 1) || (M4OSA_SUPER_TRACE_LEVEL >= 1)*/
-
-#define M4OSA_TRACE1_0(msg)
-#define M4OSA_TRACE1_1(msg, param1)
-#define M4OSA_TRACE1_2(msg, param1, param2)
-#define M4OSA_TRACE1_3(msg, param1, param2, param3)
-#define M4OSA_TRACE1_4(msg, param1, param2, param3, param4)
-#define M4OSA_TRACE1_5(msg, param1, param2, param3, param4, param5)
-
-#endif /*(M4OSA_TRACE_LEVEL >= 1) || (M4OSA_SUPER_TRACE_LEVEL >= 1)*/
-
-
-#if (M4OSA_TRACE_LEVEL >= 2) || (M4OSA_SUPER_TRACE_LEVEL >= 2)
-#define M4OSA_TRACE2_0(msg)\
-        M4OSA_TRACEx_0(msg, 2)
-
-#define M4OSA_TRACE2_1(msg, param1)\
-        M4OSA_TRACEx_1(msg, param1, 2)
-
-#define M4OSA_TRACE2_2(msg, param1, param2)\
-        M4OSA_TRACEx_2(msg, param1, param2, 2)
-
-#define M4OSA_TRACE2_3(msg, param1, param2, param3)\
-        M4OSA_TRACEx_3(msg, param1, param2, param3, 2)
-
-#define M4OSA_TRACE2_4(msg, param1, param2, param3, param4)\
-        M4OSA_TRACEx_4(msg, param1, param2, param3, param4, 2)
-
-#define M4OSA_TRACE2_5(msg, param1, param2, param3, param4, param5)\
-        M4OSA_TRACEx_5(msg, param1, param2, param3, param4, param5, 2)
-
-#else /*(M4OSA_TRACE_LEVEL >= 2) || (M4OSA_SUPER_TRACE_LEVEL >= 2)*/
-
-#define M4OSA_TRACE2_0(msg)
-#define M4OSA_TRACE2_1(msg, param1)
-#define M4OSA_TRACE2_2(msg, param1, param2)
-#define M4OSA_TRACE2_3(msg, param1, param2, param3)
-#define M4OSA_TRACE2_4(msg, param1, param2, param3, param4)
-#define M4OSA_TRACE2_5(msg, param1, param2, param3, param4, param5)
-#endif /*(M4OSA_TRACE_LEVEL >= 2) || (M4OSA_SUPER_TRACE_LEVEL >= 2)*/
-
-
-#if (M4OSA_TRACE_LEVEL >= 3) || (M4OSA_SUPER_TRACE_LEVEL >= 3)
-#define M4OSA_TRACE3_0(msg)\
-        M4OSA_TRACEx_0(msg, 3)
-
-#define M4OSA_TRACE3_1(msg, param1)\
-        M4OSA_TRACEx_1(msg, param1, 3)
-
-#define M4OSA_TRACE3_2(msg, param1, param2)\
-        M4OSA_TRACEx_2(msg, param1, param2, 3)
-
-#define M4OSA_TRACE3_3(msg, param1, param2, param3)\
-        M4OSA_TRACEx_3(msg, param1, param2, param3, 3)
-
-#define M4OSA_TRACE3_4(msg, param1, param2, param3, param4)\
-        M4OSA_TRACEx_4(msg, param1, param2, param3, param4, 3)
-
-#define M4OSA_TRACE3_5(msg, param1, param2, param3, param4, param5)\
-        M4OSA_TRACEx_5(msg, param1, param2, param3, param4, param5, 3)
-
-#else /*(M4OSA_TRACE_LEVEL >= 3) || (M4OSA_SUPER_TRACE_LEVEL >= 3)*/
-
-#define M4OSA_TRACE3_0(msg)
-#define M4OSA_TRACE3_1(msg, param1)
-#define M4OSA_TRACE3_2(msg, param1, param2)
-#define M4OSA_TRACE3_3(msg, param1, param2, param3)
-#define M4OSA_TRACE3_4(msg, param1, param2, param3, param4)
-#define M4OSA_TRACE3_5(msg, param1, param2, param3, param4, param5)
-
-#endif /*(M4OSA_TRACE_LEVEL >= 3) || (M4OSA_SUPER_TRACE_LEVEL >= 3)*/
-
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _M4OSA_DEBUG_H_ */
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Error.h b/libvideoeditor/osal/inc/M4OSA_Error.h
deleted file mode 100755
index 75c3177..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Error.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Error.h
- * @ingroup      OSAL
- * @brief        Definition of common error types
- * @note         This file contains macros to generate and analyze error codes.
- ************************************************************************
-*/
-
-
-#ifndef M4OSA_ERROR_H
-#define M4OSA_ERROR_H
-
-#include "M4OSA_Types.h"
-
-/** M4OSA_ERR is a 32 bits unsigned integer.
- * To sort returned code, a specific naming convention must be followed:
- * - Severity (2 bits): It may br either 0b00 (no error), 0b01 (warning) or
- *                      0b01 (fatal error)
- * - Core ID (14 bits): It is a unique ID for each core component
- * - ErrorID (16 bits): It is the specific error code
-
- * EACH CORE COMPONENT FUNCTION SHOULD RETURN AN M4OSA_ERR
-*/
-typedef M4OSA_UInt32   M4OSA_ERR;
-
-#define M4_OK     0
-#define M4_WAR    1
-#define M4_ERR    2
-
-
-/* Macro to process M4OSA_ERR */
-
-/** This macro tests if the provided M4OSA_ERR is a warning or not*/
-#define M4OSA_ERR_IS_WARNING(error)   ((((error)>>30) == M4_WAR) ? 1:0)
-
-/** This macro tests if the provided M4OSA_ERR is a fatal error or not*/
-#define M4OSA_ERR_IS_ERROR(error)   ((((error)>>30) == M4_ERR) ? 1:0)
-
-/** This macro returns an error code accroding to the 3 provided fields:
-  * @arg severity: (IN) [M4OSA_UInt32] Severity to put in the error code
-  * @arg coreID: (IN) [M4OSA_UInt32] CoreID to put in the error code
-  * @arg errorID: (IN) [M4OSA_UInt32] ErrorID to put in the error code*/
-#define M4OSA_ERR_CREATE(severity, coreID, errorID)\
-   (M4OSA_UInt32)((((M4OSA_UInt32)severity)<<30)+((((M4OSA_UInt32)coreID)&0x003FFF)<<16)+(((M4OSA_UInt32)errorID)&0x00FFFF))
-
-/** This macro extracts the 3 fields from the error:
-  * @arg error: (IN) [M4OSA_ERR] Error code
-  * @arg severity: (OUT) [M4OSA_UInt32] Severity to put in the error code
-  * @arg coreID: (OUT) [M4OSA_UInt32] CoreID to put in the error code
-  * @arg errorID: (OUT) [M4OSA_UInt32] ErrorID to put in the error code*/
-#define M4OSA_ERR_SPLIT(error, severity, coreID, errorID)\
-   { severity=(M4OSA_UInt32)((error)>>30);\
-     coreID=(M4OSA_UInt32)(((error)>>16)&0x003FFF);\
-     (M4OSA_UInt32)(errorID=(error)&0x00FFFF); }
-
-
-/* "fake" CoreID, is used to report an unknown CoreID. Used by the trace system
-when the core ID macro isn't defined. Defined here instead of CoreID.h to avoid
-introducing dependencies to common/inc. */
-
-#define M4UNKNOWN_COREID    0x3FFF /* max possible CoreID */
-
-#define M4_COMMON           0x00  /**<Common*/
-#define M4MP4_COMMON        0x01  /**<Core MP4 (common)*/
-#define M4MP4_WRITER        0x02  /**<Core MP4 writer*/
-#define M4MP4_READER        0x03  /**<Core MP4 reader*/
-#define M4RTSP_COMMON       0x11  /**<Core RTSP common*/
-#define M4RTSP_WRITER       0x12  /**<Core RTSP transmitter*/
-#define M4RTSP_READER       0x13  /**<Core RTSP receiver*/
-#define M4RTP_WRITER        0x14  /**<Core RTP/RTCP receiver*/
-#define M4RTP_READER        0x15  /**<Core RTP/RTCP transmitter*/
-#define M4SAP_WRITER        0x16  /**<Core SAP transmitter*/
-#define M4SAP_READER        0x17  /**<Core SAP receiver*/
-#define M4DVBH_READER        0x18  /**<Core DVBH receiver*/
-#define M4SDP_WRITER        0x22  /**<Core SDP writer*/
-#define M4SDP_READER        0x31  /**<Core SDP reader*/
-#define M4PAK_AMR           0x32  /**<Core packetizer AMR (RFC3267)*/
-#define M4DEPAK_AMR         0x33  /**<Core de-packetizer AMR (RFC3267)*/
-#define M4PAK_H263          0x34  /**<Core packetizer H263 (RFC2429)*/
-#define M4DEPAK_H263        0x35  /**<Core de-packetizer H263(RFC2429)*/
-#define M4PAK_SIMPLE        0x36  /**<Core packetizer SimpleDraft (RFC xxxx)*/
-#define M4DEPAK_SIMPLE      0x37  /**<Core de-packetizer SimpleDraft (RFC xxxx)*/
-#define M4PAK_3016_VIDEO    0x38  /**<Core packetizer RFC3016 video*/
-#define M4DEPAK_3016_VIDEO  0x39  /**<Core de-packetizer RFC3016 video*/
-#define M4PAK_3016_AUDIO    0x3A  /**<Core packetizer RFC3016 audio (LATM)*/
-#define M4DEPAK_3016_AUDIO  0x3B  /**<Core de-packetizer RFC3016 audio (LATM)*/
-#define M4DEPAK_H264        0x3C  /**<Core de-packetizer H264*/
-#define M4DEPAK_REALV        0x3D  /**<Core de-packetizer Real Video */
-#define M4DEPAK_REALA        0x3E  /**<Core de-packetizer Real Audio */
-#define M4RDT_READER        0x3F  /**<Core RDT receiver*/
-#define M4TCP_DMUX          0x50  /**<Core TCP demux*/
-#define M4IOD_PARSER        0x51  /**<Core IOD parser*/
-#define M4OSA_FILE_COMMON   0x61  /**<OSAL file common*/
-#define M4OSA_FILE_WRITER   0x62  /**<OSAL file writer*/
-#define M4OSA_FILE_READER   0x63  /**<OSAL file reader*/
-#define M4OSA_FILE_EXTRA    0x64  /**<OSAL file extra*/
-#define M4OSA_DIRECTORY     0x65  /**<OSAL directory*/
-#define M4OSA_SOCKET        0x71  /**<OSAL socket (both reader and writer)*/
-#define M4OSA_THREAD        0x81  /**<OSAL thread*/
-#define M4OSA_MUTEX         0x82  /**<OSAL mutex*/
-#define M4OSA_SEMAPHORE     0x83  /**<OSAL semaphore*/
-#define M4OSA_CLOCK         0x84  /**<OSAL clock*/
-#define M4OSA_MEMORY        0x91  /**<OSAL memory*/
-#define M4CALL_BACK         0xA1  /**<Call Back error*/
-#define M4OSA_URI           0xB1  /**<OSAL URI handler*/
-#define M4OSA_STRING        0xB2  /**<OSAL string*/
-#define M4SYS_CMAPI         0xB3  /**<SYSTEM Common Medi API*/
-#define M4OSA_CHARSTAR      0xB4  /**<OSAL CharStar*/
-#define M4REACTOR           0xC1  /**<Core reactor*/
-#define M4TEST              0xD1  /**<Test component*/
-#define M4STACK                0xE1  /**< Core ID of the integrated stack*/
-#define M4STACK_REAL        0xE2  /**<Core ID of the Real integrated stack */
-#define M4TOOL_LBVT_PARAM   0xF1  /**<LB_VT config file manager*/
-#define M4TOOL_LINK_LIST    0xF2  /**<Tool linked list*/
-#define M4TOOL_BASE64       0xF3  /**<Core base64 encoder/decoder*/
-
-
-
-/* Definition of common error codes */
-/** there is no error*/
-#define M4NO_ERROR            0x00000000
-
-/** At least one parameter is NULL*/
-#define M4ERR_PARAMETER            M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000001)
-/** This function cannot be called now*/
-#define M4ERR_STATE                M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000002)
-/** There is no more memory available*/
-#define M4ERR_ALLOC                M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000003)
-/** Provided context is not a valid one*/
-#define M4ERR_BAD_CONTEXT          M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000004)
-#define M4ERR_CONTEXT_FAILED       M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000005)
-#define M4ERR_BAD_STREAM_ID        M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000006)
-/** The optionID is not a valid one*/
-#define M4ERR_BAD_OPTION_ID        M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000007)
-/** This option is a write only one*/
-#define M4ERR_WRITE_ONLY           M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000008)
-/** This option is a read only one*/
-#define M4ERR_READ_ONLY            M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000009)
-/** This function is not supported yet*/
-#define M4ERR_NOT_IMPLEMENTED      M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x00000A)
-
-#define    M4ERR_UNSUPPORTED_MEDIA_TYPE  M4OSA_ERR_CREATE(M4_ERR, M4_COMMON, 0x00000B)
-
-#define M4WAR_NO_DATA_YET          M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000001)
-#define M4WAR_NO_MORE_STREAM       M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000002)
-#define M4WAR_INVALID_TIME         M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000003)
-#define M4WAR_NO_MORE_AU           M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000004)
-#define M4WAR_TIME_OUT             M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000005)
-/** The buffer is full*/
-#define M4WAR_BUFFER_FULL          M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000006)
-/* The server asks for a redirection */
-#define M4WAR_REDIRECT               M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000007)
-#define M4WAR_TOO_MUCH_STREAMS     M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000008)
-/* SF Codec detected INFO_FORMAT_CHANGE during decode */
-#define M4WAR_INFO_FORMAT_CHANGE M4OSA_ERR_CREATE(M4_WAR, M4_COMMON, 0x000009)
-
-#endif /*M4OSA_ERROR_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Export.h b/libvideoeditor/osal/inc/M4OSA_Export.h
deleted file mode 100755
index b7a6e81..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Export.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file    M4OSA_Export.h
- * @brief    Data access export types for Android
- * @note    This file defines types which must be
- *          used to import or export any function.
- ************************************************************************
-*/
-
-#ifndef M4OSA_EXPORT_H
-#define M4OSA_EXPORT_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif /*__cplusplus*/
-
-/************************************/
-/*  OSAL EXPORTS                    */
-/************************************/
-
-#define M4OSAL_CHARSTAR_EXPORT_TYPE            /**< OSAL CHAR_STAR        */
-#define M4OSAL_CLOCK_EXPORT_TYPE            /**< OSAL CLOCK            */
-#define M4OSAL_DATE_EXPORT_TYPE                /**< OSAL DATE            */
-#define M4OSAL_FILE_EXPORT_TYPE                /**< OSAL FILE            */
-#define M4OSAL_REALTIME_EXPORT_TYPE            /**< OSAL REAL TIME        */
-#define M4OSAL_SOCKET_EXPORT_TYPE            /**< SOCKET                */
-#define M4OSAL_STRING_EXPORT_TYPE            /**< OSAL STRING        */
-#define M4OSAL_URI_EXPORT_TYPE                /**< OSAL URI            */
-#define M4OSAL_MEMORY_EXPORT_TYPE            /**< OSAL MEMORY        */
-#define M4OSAL_TRACE_EXPORT_TYPE            /**< OSAL TRACE            */
-#define M4OSAL_TOOL_TIMER_EXPORT_TYPE        /**< OSAL TOOL TIMER    */
-#define M4OSAL_SYSTEM_CM_EXPORT_TYPE        /**< SYSTEM COMMON API    */
-#define M4OSAL_LINKED_LIST_EXPORT_TYPE        /**< TOOL LINKED LIST    */
-#define M4OSAL_MEMORY_MANAGER_EXPORT_TYPE    /**< MEMORY MANAGER        */
-#define M4OSAL_TRACE_MANAGER_EXPORT_TYPE    /**< TOOL TRACE MANAGER */
-#define M4VPS_EXPORT_TYPE                    /**< VPS API            */
-#define M4AP_EXPORT_TYPE                    /**< AUDIO PRESENTERS    */
-#define M4VP_EXPORT_TYPE                    /**< VIDEO PRESENTERS    */
-#define M4CB_EXPORT_TYPE                    /**< Call back            */
-
-#ifdef __cplusplus
-}
-#endif /*__cplusplus*/
-
-#endif /*M4OSA_EXPORT_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_FileCommon.h b/libvideoeditor/osal/inc/M4OSA_FileCommon.h
deleted file mode 100755
index f2afb8c..0000000
--- a/libvideoeditor/osal/inc/M4OSA_FileCommon.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_FileCommon.h
- * @ingroup      OSAL
- * @brief        File common
- * @note         This file declares functions and types used by both the file
- *               writer and file reader.
- ************************************************************************
-*/
-
-
-#ifndef M4OSA_FILECOMMON_H
-#define M4OSA_FILECOMMON_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Time.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_OptionID.h"
-
-
-typedef M4OSA_Int32 M4OSA_FilePosition;
-
-/** This enum defines the application mode access.
- *  ie, the application uses a file descriptor to read or to write  or
- *  both to read and write at the same time.
- *  This structure is used for MM project only. It enables to read and write to a file
- *  with one descriptor.
- */
-typedef enum
-{
-   M4OSA_kDescNoneAccess    = 0x00,
-   M4OSA_kDescReadAccess    = 0x01,    /** The Descriptor reads only from the file */
-   M4OSA_kDescWriteAccess    = 0x02,    /** The Descriptor writes only from the file*/
-   M4OSA_kDescRWAccess        = 0x03    /** The Descriptor reads and writes from/in the file*/
-} M4OSA_DescrModeAccess;
-
-
-/** This enum defines the file mode access. Both text mode as binary mode
-    cannot be set together.*/
-typedef enum
-{
-   /** The file must be accessed in read only mode*/
-   M4OSA_kFileRead             = 0x01,
-   /** The file must be accessed in write only mode*/
-   M4OSA_kFileWrite            = 0x02,
-   /** The file must be accessed in append mode (An existing file must
-       be available to append data)*/
-   M4OSA_kFileAppend           = 0x04,
-   /** If the file does not exist, it will be created*/
-   M4OSA_kFileCreate           = 0x08,
-   /** Data are processed as binary one, there is no data management*/
-   M4OSA_kFileIsTextMode       = 0x10
-} M4OSA_FileModeAccess;
-
-
-/** This type is used to store a date.*/
-typedef struct
-{
-   /** Time scale (tick number per second)*/
-   M4OSA_UInt32 timeScale;
-   /** Date expressed in the time scale*/
-   M4OSA_Time   time;
-   /** Year of the absolute time (1900, 1970 or 2000)*/
-   M4OSA_UInt32 referenceYear;
-} M4OSA_Date;
-
-
-/** This strucure defines the file attributes*/
-typedef struct
-{
-   /** The file mode access*/
-   M4OSA_FileModeAccess    modeAccess;
-   /** The creation date*/
-   M4OSA_Date              creationDate;
-   /** The last modification date*/
-   M4OSA_Date              modifiedDate;
-   /** The last access date (read)*/
-   M4OSA_Date              lastAccessDate;
-} M4OSA_FileAttribute;
-
-
-
-/** This enum defines the seek behavior*/
-typedef enum M4OSA_FileSeekAccessMode
-{
-   /** Relative to the beginning of the file*/
-   M4OSA_kFileSeekBeginning            = 0x01,
-   /** Relative to the end of the file*/
-   M4OSA_kFileSeekEnd                  = 0x02,
-   /** Relative to the current file position*/
-   M4OSA_kFileSeekCurrent              = 0x03
-} M4OSA_FileSeekAccessMode;
-
-
-/* Error codes */
-#define M4ERR_FILE_NOT_FOUND         M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_COMMON, 0x000001)
-#define M4ERR_FILE_LOCKED            M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_COMMON, 0x000002)
-#define M4ERR_FILE_BAD_MODE_ACCESS   M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_COMMON, 0x000003)
-#define M4ERR_FILE_INVALID_POSITION  M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_COMMON, 0x000004)
-
-
-#endif /*M4OSA_FILECOMMON_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_FileCommon_priv.h b/libvideoeditor/osal/inc/M4OSA_FileCommon_priv.h
deleted file mode 100755
index 1eba456..0000000
--- a/libvideoeditor/osal/inc/M4OSA_FileCommon_priv.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_FileCommon_priv.h
- * @ingroup      OSAL
- * @brief        File common private for Android
- * @note         This file declares functions and types used by both the file
- *               writer and file reader.
- ************************************************************************
-*/
-
-#ifndef M4OSA_FILECOMMON_PRIV_H
-#define M4OSA_FILECOMMON_PRIV_H
-
-
-#include "M4OSA_FileCommon.h"
-#include <stdio.h>
-
-#define M4OSA_isAccessModeActived(compound_mode_access,elementary_mode_access)\
-        (((compound_mode_access)&(elementary_mode_access))? 1:0)
-
-
-typedef enum M4OSA_LastSeek
-{
-   SeekNone,
-   SeekRead,
-   SeekWrite
-} M4OSA_LastSeek;
-
-/** This structure defines the file context*/
-typedef struct {
-   M4OSA_UInt32         coreID_read;
-   M4OSA_UInt32         coreID_write;
-   FILE*                file_desc;
-   /** The name of the URL */
-   M4OSA_Char*          url_name;
-   /** The name of the file */
-   M4OSA_Char*          file_name;
-   /** The size in bytes of the file */
-   M4OSA_FilePosition   file_size;
-   /** The file mode access used to open the file */
-   M4OSA_FileModeAccess access_mode;
-   M4OSA_LastSeek       current_seek;
-   M4OSA_FilePosition   read_position;
-   M4OSA_FilePosition   write_position;
-   M4OSA_Bool           b_is_end_of_file;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-   M4OSA_Context        semaphore_context;
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-
-   /* These two variables were added to manage case where a file
-    * is opened in read and write mode with one descriptor */
-    M4OSA_DescrModeAccess    m_DescrModeAccess;
-    M4OSA_UInt32            m_uiLockMode;
-
-
-} M4OSA_FileContext;
-
-
-
-M4OSA_ERR M4OSA_fileCommonOpen(M4OSA_UInt16 core_id,
-                               M4OSA_Context* context,
-                               M4OSA_Char* URL,
-                               M4OSA_FileModeAccess fileModeAccess);
-
-M4OSA_ERR M4OSA_fileCommonClose(M4OSA_UInt16 core_id,
-                                M4OSA_Context context);
-
-M4OSA_ERR M4OSA_fileCommonGetAttribute(M4OSA_Context context,
-                                       M4OSA_FileAttribute* attribute);
-
-M4OSA_ERR M4OSA_fileCommonGetURL(M4OSA_Context context,
-                                 M4OSA_Char** url);
-
-M4OSA_ERR M4OSA_fileCommonGetFilename(M4OSA_Char* url,
-                                      M4OSA_Char** filename);
-
-M4OSA_ERR M4OSA_fileCommonSeek(M4OSA_Context context,
-                               M4OSA_FileSeekAccessMode seekMode,
-                               M4OSA_FilePosition* position);
-
-#ifdef UTF_CONVERSION
-M4OSA_ERR M4OSA_ToUTF8_OSAL (M4OSA_Void   *pBufferIn,
-                             M4OSA_UInt8  *pBufferOut,
-                             M4OSA_UInt32 *bufferOutSize);
-#endif /*UTF_CONVERSION*/
-
-
-#endif /*M4OSA_FILECOMMON_PRIV_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_FileReader.h b/libvideoeditor/osal/inc/M4OSA_FileReader.h
deleted file mode 100755
index c22756d..0000000
--- a/libvideoeditor/osal/inc/M4OSA_FileReader.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_FileReader.h
- * @ingroup      OSAL
- * @brief        File reader
- * @note         This file declares functions and types to read a file.
- ************************************************************************
-*/
-
-
-#ifndef M4OSA_FILEREADER_H
-#define M4OSA_FILEREADER_H
-
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_FileCommon.h"
-#include "M4OSA_Memory.h"
-
-
-
-/** This enum defines the option ID to be used in M4OSA_FileReadGetOption()
-    and M4OSA_FileReadSetOption()*/
-typedef enum M4OSA_FileReadOptionID
-{
-   /** Get the file size (M4OSA_fpos*)*/
-   M4OSA_kFileReadGetFileSize
-                  = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_READER, 0x01),
-
-      /** Get the file attributes (M4OSA_FileAttribute*)*/
-   M4OSA_kFileReadGetFileAttribute
-                  = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_READER, 0x02),
-
-   /** Get the file URL, provided by the M4OSA_FileReadOpen (M4OSA_Char*)*/
-   M4OSA_kFileReadGetURL
-                  = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_READER, 0x03),
-
-   /** Get the file position (M4OSA_fpos*)*/
-   M4OSA_kFileReadGetFilePosition
-                  = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_READER, 0x04),
-
-   /** Check end of file: TRUE if the EOF has been reached, FALSE else
-       (M4OSA_Bool*)*/
-   M4OSA_kFileReadIsEOF
-                  = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_READER, 0x05),
-
-   /** Check lock of file */
-   M4OSA_kFileReadLockMode
-                  = M4OSA_OPTION_ID_CREATE(M4_READWRITE, M4OSA_FILE_READER, 0x06)
-
-} M4OSA_FileReadOptionID;
-
-
-
-
-
-/** This structure stores the set of the function pointer to access to a
-    file in read mode*/
-typedef struct
-{
-   M4OSA_ERR (*openRead)   (M4OSA_Context* context,
-                            M4OSA_Void* fileDescriptor,
-                            M4OSA_UInt32 fileModeAccess);
-
-   M4OSA_ERR (*readData)   (M4OSA_Context context,
-                            M4OSA_MemAddr8 buffer,
-                            M4OSA_UInt32* size);
-
-   M4OSA_ERR (*seek)       (M4OSA_Context context,
-                            M4OSA_FileSeekAccessMode seekMode,
-                            M4OSA_FilePosition* position);
-
-   M4OSA_ERR (*closeRead)  (M4OSA_Context context);
-
-   M4OSA_ERR (*setOption)  (M4OSA_Context context,
-                            M4OSA_FileReadOptionID optionID,
-                            M4OSA_DataOption optionValue);
-
-   M4OSA_ERR (*getOption)  (M4OSA_Context context,
-                            M4OSA_FileReadOptionID optionID,
-                            M4OSA_DataOption *optionValue);
-} M4OSA_FileReadPointer;
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadOpen        (M4OSA_Context* context,
-                                     M4OSA_Void* fileDescriptor,
-                                     M4OSA_UInt32 fileModeAccess);
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadData        (M4OSA_Context context,
-                                     M4OSA_MemAddr8 buffer,
-                                     M4OSA_UInt32* size);
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadSeek        (M4OSA_Context context,
-                                     M4OSA_FileSeekAccessMode seekMode,
-                                     M4OSA_FilePosition* position);
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadClose       (M4OSA_Context context);
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadGetOption   (M4OSA_Context context,
-                                     M4OSA_FileReadOptionID optionID,
-                                     M4OSA_DataOption *optionValue);
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadSetOption   (M4OSA_Context context,
-                                     M4OSA_FileReadOptionID optionID,
-                                     M4OSA_DataOption optionValue);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif   /*M4OSA_FILEREADER_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_FileReader_priv.h b/libvideoeditor/osal/inc/M4OSA_FileReader_priv.h
deleted file mode 100755
index 327b086..0000000
--- a/libvideoeditor/osal/inc/M4OSA_FileReader_priv.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_FileReader_priv.h
- * @ingroup      OSAL
- * @brief        File reader private for Android
- * @note
- ************************************************************************
-*/
-
-#ifndef M4OSA_FILEREADER_PRIV_H
-#define M4OSA_FILEREADER_PRIV_H
-
-
-/** Those define enable/disable option ID*/
-
-#define M4OSA_OPTIONID_FILE_READ_GET_FILE_SIZE               M4OSA_TRUE
-#define M4OSA_OPTIONID_FILE_READ_GET_FILE_ATTRIBUTE          M4OSA_TRUE
-#define M4OSA_OPTIONID_FILE_READ_GET_URL                     M4OSA_TRUE
-#define M4OSA_OPTIONID_FILE_READ_GET_FILE_POSITION           M4OSA_TRUE
-#define M4OSA_OPTIONID_FILE_READ_IS_EOF                      M4OSA_TRUE
-
-#endif /*M4OSA_FILEREADER_PRIV_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_FileWriter.h b/libvideoeditor/osal/inc/M4OSA_FileWriter.h
deleted file mode 100755
index 9a11331..0000000
--- a/libvideoeditor/osal/inc/M4OSA_FileWriter.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_FileWriter.h
- * @ingroup      OSAL
- * @brief        File writer
- * @note         This file declares functions and types to write in a file.
- ************************************************************************
-*/
-
-
-#ifndef M4OSA_FILEWRITER_H
-#define M4OSA_FILEWRITER_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_FileCommon.h"
-#include "M4OSA_Memory.h"
-
-
-/** This enum defines the option ID to be used in M4OSA_FileWriteGetOption()
-and M4OSA_FileWriteSetOption()*/
-typedef enum
-{
-   /** Get the file URL, provided by the M4OSA_FileWriteOpen (M4OSA_Char*)*/
-   M4OSA_kFileWriteGetURL
-               = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_WRITER, 0x01),
-
-   /** Get the file attributes (M4OSA_FileAttribute*)*/
-   M4OSA_kFileWriteGetAttribute
-               = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_WRITER, 0x02),
-
-   /** Get the reader context for read & write file. (M4OSA_Context*)*/
-   M4OSA_kFileWriteGetReaderContext
-               = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_WRITER, 0x03),
-
-   M4OSA_kFileWriteGetFilePosition
-               = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_WRITER, 0x04),
-
-   M4OSA_kFileWriteGetFileSize
-               = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_WRITER, 0x05),
-
-
-    M4OSA_kFileWriteLockMode
-               = M4OSA_OPTION_ID_CREATE(M4_READWRITE, M4OSA_FILE_WRITER, 0x06),
-
-
-   /** Check lock of file */
-   M4OSA_kFileWriteDescMode
-                = M4OSA_OPTION_ID_CREATE(M4_READWRITE, M4OSA_FILE_WRITER, 0x07)
-} M4OSA_FileWriteOptionID;
-
-
-/** This structure stores the set of the function pointer to access to a file
-    in read mode*/
-typedef struct
-{
-   M4OSA_ERR (*openWrite)   (M4OSA_Context* context,
-                             M4OSA_Void* fileDescriptor,
-                             M4OSA_UInt32 fileModeAccess);
-
-   M4OSA_ERR (*writeData)   (M4OSA_Context context,
-                             M4OSA_MemAddr8 data,
-                             M4OSA_UInt32 size);
-
-   M4OSA_ERR (*seek)        (M4OSA_Context context,
-                             M4OSA_FileSeekAccessMode seekMode,
-                             M4OSA_FilePosition* position);
-
-   M4OSA_ERR (*Flush)       (M4OSA_Context context);
-   M4OSA_ERR (*closeWrite)  (M4OSA_Context context);
-   M4OSA_ERR (*setOption)   (M4OSA_Context context,
-                             M4OSA_OptionID optionID,
-                             M4OSA_DataOption optionValue);
-
-   M4OSA_ERR (*getOption)   (M4OSA_Context context,
-                             M4OSA_OptionID optionID,
-                             M4OSA_DataOption* optionValue);
-} M4OSA_FileWriterPointer;
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteOpen       (M4OSA_Context* context,
-                                     M4OSA_Void* fileDescriptor,
-                                     M4OSA_UInt32 fileModeAccess);
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteData       (M4OSA_Context context,
-                                     M4OSA_MemAddr8 data,
-                                     M4OSA_UInt32 size);
-
-/* Pierre Lebeaupin 2008/04/29: WARNING! the feature of file*Seek which returns
-the position in the file (from the beginning) after the seek in the "position"
-pointer has been found to be unreliably (or sometimes not at all) implemented
-in some OSALs, so relying on it is strongly discouraged, unless you really want
-to have a pizza evening. */
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteSeek       (M4OSA_Context context,
-                                     M4OSA_FileSeekAccessMode seekMode,
-                                     M4OSA_FilePosition* position);
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteClose      (M4OSA_Context context);
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteFlush      (M4OSA_Context context);
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteGetOption  (M4OSA_Context context,
-                                     M4OSA_OptionID optionID,
-                                     M4OSA_DataOption* optionValue);
-
-M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteSetOption  (M4OSA_Context context,
-                                     M4OSA_OptionID optionID,
-                                     M4OSA_DataOption optionValue);
-
-#ifdef __cplusplus
-}
-#endif
-
-
-#endif /*M4OSA_FILEWRITER_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_FileWriter_priv.h b/libvideoeditor/osal/inc/M4OSA_FileWriter_priv.h
deleted file mode 100755
index 9d972f4..0000000
--- a/libvideoeditor/osal/inc/M4OSA_FileWriter_priv.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_FileWriter_priv.h
- * @ingroup      OSAL
- * @brief        File writer private for Android
-************************************************************************
-*/
-
-#ifndef M4OSA_FILEWRITER_PRIV_H
-#define M4OSA_FILEWRITER_PRIV_H
-
-
-/** Those define enable/disable option ID*/
-
-#define M4OSA_OPTIONID_FILE_WRITE_GET_FILE_SIZE              M4OSA_TRUE
-#define M4OSA_OPTIONID_FILE_WRITE_GET_FILE_ATTRIBUTE         M4OSA_TRUE
-#define M4OSA_OPTIONID_FILE_WRITE_GET_READER_CONTEXT         M4OSA_TRUE
-#define M4OSA_OPTIONID_FILE_WRITE_GET_FILE_POSITION          M4OSA_TRUE
-#define M4OSA_OPTIONID_FILE_WRITE_GET_URL                    M4OSA_TRUE
-
-#endif /*M4OSA_FILEWRITER_PRIV_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Memory.h b/libvideoeditor/osal/inc/M4OSA_Memory.h
deleted file mode 100755
index a4d15cc..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Memory.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Memory.h
- * @ingroup      OSAL
- * @brief        Memory allocation
- * @note         This file defines function prototypes to allocate
- *               and free memory.
- ************************************************************************
-*/
-
-#ifndef M4OSA_MEMORY_H
-#define M4OSA_MEMORY_H
-
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h" /*for M4OSA_CoreID definition*/
-
-typedef M4OSA_Int32* M4OSA_MemAddr32;
-typedef M4OSA_Int8*  M4OSA_MemAddr8;
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-M4OSAL_MEMORY_EXPORT_TYPE extern M4OSA_MemAddr32 M4OSA_32bitAlignedMalloc (M4OSA_UInt32 size,
-                                                               M4OSA_CoreID coreID,
-                                                               M4OSA_Char* string);
-
-M4OSAL_MEMORY_EXPORT_TYPE extern M4OSA_ERR M4OSA_randInit(void);
-
-
-M4OSAL_MEMORY_EXPORT_TYPE extern M4OSA_ERR M4OSA_rand(M4OSA_Int32* out_value,
-                                                      M4OSA_UInt32 max_value);
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Mutex.h b/libvideoeditor/osal/inc/M4OSA_Mutex.h
deleted file mode 100755
index d496bdd..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Mutex.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Mutex.h
- * @ingroup      OSAL
- * @brief        mutex API
- ************************************************************************
-*/
-
-
-#ifndef M4OSA_MUTEX_H
-#define M4OSA_MUTEX_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-
-
-#ifdef __cplusplus
-extern "C"
-{
-
-#endif
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_mutexOpen(    M4OSA_Context* context );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_mutexLock(    M4OSA_Context  context,
-                              M4OSA_UInt32   timeout );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_mutexUnlock(  M4OSA_Context  context );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_mutexClose(   M4OSA_Context  context );
-
-#ifdef __cplusplus
-}
-#endif
-
-
-#endif /*M4OSA_MUTEX_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_OptionID.h b/libvideoeditor/osal/inc/M4OSA_OptionID.h
deleted file mode 100755
index 61b9044..0000000
--- a/libvideoeditor/osal/inc/M4OSA_OptionID.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_OptionID.h
- * @ingroup      OSAL
- * @brief        Option ID macros
- * @note         This file defines macros to generate and analyze option ID.
- *               Option ID is used by M4YYY_ZZsetOption() and
- *               M4YYY_ZZgetOption() functions.
- ************************************************************************
-*/
-
-#ifndef M4OSA_OPTIONID_H
-#define M4OSA_OPTIONID_H
-
-
-#include "M4OSA_Types.h"
-
-/** M4OSA_OptionID is a 32 bits unsigned integer.
-- Right access (2 bits): Some options may have read only, write only or read
-  and write access
-- Core ID (14 bits): It is a unique ID for each core component
-- SubOption ID (16 bits): To select which option in a specific core component
-*/
-typedef M4OSA_UInt32 M4OSA_OptionID;
-typedef void*        M4OSA_DataOption;
-
-#define M4_READ      0x01
-#define M4_WRITE     0x02
-#define M4_READWRITE 0x03
-
-/* Macro to process M4OSA_OptionID */
-
-/** This macro creates an optionID given read/write access,
-    coreID and SubOptionID*/
-#define M4OSA_OPTION_ID_CREATE(right, coreID, errorID)\
-   (M4OSA_Int32)((((((M4OSA_UInt32)right)&0x03)<<30))+((((M4OSA_UInt32)coreID)&0x003FFF)<<16)+(((M4OSA_UInt32)errorID)&0x00FFFF))
-
-/** This macro splits an optionID into read/write access,
-    coreID and SubOptionID*/
-#define M4OSA_OPTION_ID_SPLIT(optionID, right, coreID, errorID)\
-   { right=(M4OSA_UInt8)((optionID)>>30);\
-     coreID=(M4OSA_UInt16)(((optionID)>>16)&0x00003FFF);\
-     errorID=(M4OSA_UInt32)((optionID)&0x0000FFFF); }
-
-/** This macro returns 1 if the optionID is writable, 0 otherwise*/
-#define M4OSA_OPTION_ID_IS_WRITABLE(optionID) ((((optionID)>>30)&M4_WRITE)!=0)
-
-/** This macro returns 1 if the optionID is readable, 0 otherwise*/
-#define M4OSA_OPTION_ID_IS_READABLE(optionID) ((((optionID)>>30)&M4_READ)!=0)
-
-/** This macro returns 1 if the optionID has its core ID equal to 'coreID', 0 otherwise*/
-#define M4OSA_OPTION_ID_IS_COREID(optionID, coreID)\
-   (((((optionID)>>16)&0x003FFF) == (coreID)) ? M4OSA_TRUE:M4OSA_FALSE)
-
-
-#endif   /*M4OSA_OPTIONID_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Semaphore.h b/libvideoeditor/osal/inc/M4OSA_Semaphore.h
deleted file mode 100755
index 2630454..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Semaphore.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Semaphore.h
- * @ingroup      OSAL
- * @brief        semaphore API
- ************************************************************************
-*/
-
-#ifndef M4OSA_SEMAPHORE_H
-#define M4OSA_SEMAPHORE_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_semaphoreOpen(  M4OSA_Context* context,
-                                M4OSA_UInt32   initialNumber );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_semaphorePost(  M4OSA_Context  context );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_semaphoreWait(  M4OSA_Context  context,
-                                M4OSA_Int32    timeout );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_semaphoreClose( M4OSA_Context  context );
-
-#ifdef __cplusplus
-}
-#endif
-
-
-#endif /*M4OSA_SEMAPHORE_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Thread.h b/libvideoeditor/osal/inc/M4OSA_Thread.h
deleted file mode 100755
index ca96afb..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Thread.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Thread.h
- * @ingroup      OSAL
- * @brief        thread API
- ************************************************************************
-*/
-
-
-#ifndef M4OSA_THREAD_H
-#define M4OSA_THREAD_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_OptionID.h"
-
-
-/* Definition of common error codes */
-#define M4ERR_THREAD_NOT_STARTED M4OSA_ERR_CREATE(M4_ERR,M4OSA_THREAD,0x000001)
-
-
-typedef enum
-{
-   M4OSA_kThreadOpened   = 0x100,
-   M4OSA_kThreadStarting = 0x200,
-   M4OSA_kThreadRunning  = 0x300,
-   M4OSA_kThreadStopping = 0x400,
-   M4OSA_kThreadClosed  = 0x500
-} M4OSA_ThreadState;
-
-
-
-typedef enum
-{
-   M4OSA_kThreadHighestPriority  =  0x000,
-   M4OSA_kThreadHighPriority     =  0x100,
-   M4OSA_kThreadNormalPriority   =  0x200,
-   M4OSA_kThreadLowPriority      =  0x300,
-   M4OSA_kThreadLowestPriority   =  0x400
-} M4OSA_ThreadPriorityLevel;
-
-
-
-typedef enum
-{
-   M4OSA_ThreadStarted
-      = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x01),
-
-   M4OSA_ThreadStopped
-      = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x02),
-
-   M4OSA_ThreadPriority
-      = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x03),
-
-   M4OSA_ThreadName
-      = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x04),
-
-   M4OSA_ThreadStackSize
-      = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x05),
-
-   M4OSA_ThreadUserData
-      = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x06)
-
-} M4OSA_ThreadOptionID;
-
-
-
-typedef M4OSA_ERR  (*M4OSA_ThreadDoIt)(M4OSA_Void*);
-typedef M4OSA_Void (*M4OSA_ThreadCallBack)(M4OSA_Context, M4OSA_Void*);
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncOpen(     M4OSA_Context*        context,
-                                    M4OSA_ThreadDoIt      func );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncStart(    M4OSA_Context         context,
-                                    M4OSA_Void*           param );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncStop(     M4OSA_Context         context );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncClose(    M4OSA_Context         context );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncGetState( M4OSA_Context         context,
-                                    M4OSA_ThreadState*    state );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSleep(        M4OSA_UInt32          time );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncSetOption(M4OSA_Context         context,
-                                    M4OSA_ThreadOptionID  option,
-                                    M4OSA_DataOption      value );
-
-
-M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncGetOption(M4OSA_Context         context,
-                                    M4OSA_ThreadOptionID  option,
-                                    M4OSA_DataOption*     value );
-
-#ifdef __cplusplus
-}
-#endif
-
-
-#endif /*M4OSA_THREAD_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Thread_priv.h b/libvideoeditor/osal/inc/M4OSA_Thread_priv.h
deleted file mode 100755
index b424b05..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Thread_priv.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Thread_priv.h
- * @ingroup      OSAL
- * @brief        Thread private for Android
- * @note
- ************************************************************************
-*/
-
-#ifndef M4OSA_THREAD_PRIV_H
-#define M4OSA_THREAD_PRIV_H
-
-
-#include "M4OSA_Types.h"
-
-
-/* Context for the thread */
-typedef struct M4OSA_ThreadContext {
-   M4OSA_UInt32 coreID;                /* thread context identifiant */
-   pthread_t threadID;                 /* thread identifier. */
-   M4OSA_Char* name;                   /* thread name */
-   M4OSA_UInt32 stackSize;             /* thread stackSize in bytes */
-   M4OSA_ThreadDoIt func;              /* thread function */
-   M4OSA_Void* param;                  /* thread parameter */
-/*
-   M4OSA_Void* userData;               / * thread user data * /
-*/
-   M4OSA_ThreadState state;            /* thread automaton state */
-   M4OSA_Context stateMutex;           /* mutex for thread state management */
-/*
-   M4OSA_ThreadCallBack startCallBack; / * starting thread call back * /
-   M4OSA_ThreadCallBack stopCallBack;  / * stopping thread call back * /
-*/
-   M4OSA_Context semStartStop;         /* semaphore for start and stop do_it */
-   M4OSA_ThreadPriorityLevel priority; /* thread priority level */
-} M4OSA_ThreadContext ;
-
-
-/** Those define enable/disable option ID*/
-#define M4OSA_OPTIONID_THREAD_STARTED           M4OSA_TRUE
-#define M4OSA_OPTIONID_THREAD_STOPPED           M4OSA_TRUE
-#define M4OSA_OPTIONID_THREAD_PRIORITY          M4OSA_TRUE
-#define M4OSA_OPTIONID_THREAD_STACK_SIZE        M4OSA_TRUE
-#define M4OSA_OPTIONID_THREAD_NAME              M4OSA_TRUE
-#define M4OSA_OPTIONID_THREAD_USER_DATA         M4OSA_TRUE
-
-#endif /*M4OSA_THREAD_PRIV_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Time.h b/libvideoeditor/osal/inc/M4OSA_Time.h
deleted file mode 100755
index 21f25ed..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Time.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Time.h
- * @ingroup      OSAL
- * @brief        Time macros
- * @note         This file defines time type and associated macros which must
- *               be used to manipulate time.
- ************************************************************************
-*/
-
-/* $Id: M4OSA_Time.h,v 1.2 2007/01/05 13:12:22 thenault Exp $ */
-
-#ifndef M4OSA_TIME_H
-#define M4OSA_TIME_H
-
-
-#include "M4OSA_Types.h"
-
-
-typedef signed long long  M4OSA_Time;
-
-
-/** This macro sets the unknown time value */
-
-#define M4OSA_TIME_UNKNOWN 0x80000000
-
-/** This macro converts a time with a time scale to millisecond.
-    The result is a M4OSA_Double*/
-#define M4OSA_TIME_TO_MS(result, time, timescale)\
-      { result = (1000*(M4OSA_Double)time)/((M4OSA_Double)timescale); }
-
-#endif /*M4OSA_TIME_H*/
-
diff --git a/libvideoeditor/osal/inc/M4OSA_Types.h b/libvideoeditor/osal/inc/M4OSA_Types.h
deleted file mode 100755
index ee258a0..0000000
--- a/libvideoeditor/osal/inc/M4OSA_Types.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Types.h
- * @ingroup      OSAL
- * @brief        Abstraction types for Android
- * @note         This file redefines basic types which must be
- *               used to declare any variable.
-************************************************************************
-*/
-
-
-#ifndef M4OSA_TYPES_H
-#define M4OSA_TYPES_H
-
-#include <ctype.h>
-#include <stdio.h>
-#include <string.h>
-#include "M4OSA_Export.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-typedef int8_t     M4OSA_Bool;
-typedef uint8_t    M4OSA_UInt8;
-typedef int8_t     M4OSA_Int8;
-typedef uint16_t   M4OSA_UInt16;
-typedef int16_t    M4OSA_Int16;
-typedef uint32_t   M4OSA_UInt32;
-typedef int32_t    M4OSA_Int32;
-
-typedef signed char     M4OSA_Char;
-typedef unsigned char   M4OSA_UChar;
-
-typedef double          M4OSA_Double;
-typedef float           M4OSA_Float;
-
-typedef unsigned char   M4OSA_WChar;
-
-typedef void            M4OSA_Void;
-
-/* Min & max definitions*/
-#define M4OSA_UINT8_MIN                  0
-#define M4OSA_UINT8_MAX                255
-
-#define M4OSA_UINT16_MIN                 0
-#define M4OSA_UINT16_MAX             65535
-
-#define M4OSA_UINT32_MIN                 0
-#define M4OSA_UINT32_MAX        0xFFFFFFFF
-
-#define M4OSA_INT8_MIN                -128
-#define M4OSA_INT8_MAX                 127
-
-#define M4OSA_INT16_MIN             -32768
-#define M4OSA_INT16_MAX              32767
-
-#define M4OSA_INT32_MIN       (-0x7FFFFFFF-1)
-#define M4OSA_INT32_MAX         0x7FFFFFFF
-
-#define M4OSA_CHAR_MIN                -128
-#define M4OSA_CHAR_MAX                 127
-
-#define M4OSA_UCHAR_MIN                  0
-#define M4OSA_UCHAR_MAX                255
-
-#define M4OSA_NULL                     0x00
-#define M4OSA_TRUE                     0x01
-#define M4OSA_FALSE                    0x00
-#define M4OSA_WAIT_FOREVER       0xffffffff
-
-#define M4OSA_CONST                   const
-#define M4OSA_INLINE                 inline
-
-/* Rollover offset of the clock */
-/* This value must be the one of M4OSA_clockGetTime */
-#define M4OSA_CLOCK_ROLLOVER           M4OSA_INT32_MAX
-
-typedef void*                M4OSA_Context;
-
-/** It is a unique ID for each core component*/
-typedef  M4OSA_UInt16 M4OSA_CoreID;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*M4OSA_TYPES_H*/
-
diff --git a/libvideoeditor/osal/src/Android.mk b/libvideoeditor/osal/src/Android.mk
deleted file mode 100755
index 4f38b0c..0000000
--- a/libvideoeditor/osal/src/Android.mk
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# Copyright (C) 2011 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH:= $(call my-dir)
-
-#
-# libvideoeditor_osal
-#
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE:= libvideoeditor_osal
-
-LOCAL_SRC_FILES:=          \
-    M4OSA_CharStar.c \
-    M4OSA_Clock.c \
-    M4OSA_FileCommon.c \
-    M4OSA_FileReader.c \
-    M4OSA_FileWriter.c \
-    M4OSA_Mutex.c \
-    M4OSA_Random.c \
-    M4OSA_Semaphore.c \
-    M4OSA_Thread.c \
-    M4PSW_DebugTrace.c \
-    M4PSW_MemoryInterface.c \
-    M4PSW_Trace.c \
-    LVOSA_FileReader_optim.c
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_SHARED_LIBRARIES := libcutils libutils liblog
-
-LOCAL_C_INCLUDES += \
-    $(TOP)/frameworks/av/libvideoeditor/osal/inc \
-
-LOCAL_SHARED_LIBRARIES += libdl
-
-# All of the shared libraries we link against.
-LOCAL_LDLIBS := \
-    -lpthread -ldl
-
-LOCAL_CFLAGS += -Wno-multichar \
-    -D__ANDROID__ \
-    -DM4OSA_FILE_BLOCK_WITH_SEMAPHORE \
-    -DUSE_STAGEFRIGHT_CODECS \
-    -DUSE_STAGEFRIGHT_AUDIODEC \
-    -DUSE_STAGEFRIGHT_VIDEODEC \
-    -DUSE_STAGEFRIGHT_AUDIOENC \
-    -DUSE_STAGEFRIGHT_VIDEOENC \
-    -DUSE_STAGEFRIGHT_READERS \
-    -DUSE_STAGEFRIGHT_3GPP_READER
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/libvideoeditor/osal/src/LVOSA_FileReader_optim.c b/libvideoeditor/osal/src/LVOSA_FileReader_optim.c
deleted file mode 100755
index e11e008..0000000
--- a/libvideoeditor/osal/src/LVOSA_FileReader_optim.c
+++ /dev/null
@@ -1,1052 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file         M4OSA_FileReader_optim.c
- * @brief
- * @note         This file implements functions to manipulate filesystem access
- ******************************************************************************
-*/
-
-/** Addition of Trace ID **/
-#include "M4OSA_CoreID.h"
-#include "M4OSA_Error.h"
-
-#ifdef M4TRACE_ID
-#undef M4TRACE_ID
-#endif
-#define M4TRACE_ID    M4OSA_FILE_READER
-
-
-#include "M4OSA_FileCommon.h"
-#include "M4OSA_FileReader.h"
-#include "M4OSA_FileWriter.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Debug.h"
-
-#include "LVOSA_FileReader_optim.h"
-
-#define M4OSA_READER_OPTIM_USE_OSAL_IF
-#ifndef M4OSA_READER_OPTIM_USE_OSAL_IF
-    #include "M4OSA_FileAccess.h"
-#endif
-
-#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) if ((pointer) == M4OSA_NULL) return (retval);
-
-
-
-
-/**
- ******************************************************************************
- * File reader cache buffers parameters (size, number of buffers, etc)
- ******************************************************************************
-*/
-#define M4OSA_READBUFFER_SIZE    1024*16
-#define M4OSA_READBUFFER_NB        2
-#define M4OSA_READBUFFER_NONE    -1
-#define M4OSA_EOF               -1
-
-#define MAX_FILLS_SINCE_LAST_ACCESS    M4OSA_READBUFFER_NB*2
-
-/**
- ******************************************************************************
- * structure    M4OSA_FileReader_Buffer
- * @brief       This structure defines the File reader Buffers context (private)
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_MemAddr8      data;        /**< buffer data */
-    M4OSA_FilePosition  size;        /**< size of the buffer */
-    M4OSA_FilePosition  filepos;    /**< position in the file where the buffer starts */
-    M4OSA_FilePosition  remain;        /**< data amount not already copied from buffer */
-    M4OSA_UInt32        nbFillSinceLastAcess;    /**< To know since how many time we didn't use this buffer */
-} M4OSA_FileReader_Buffer_optim;
-
-/**
- ******************************************************************************
- * structure    M4OSA_FileReader_Context
- * @brief       This structure defines the File reader context (private)
- * @note        This structure is used for all File Reader calls to store the context
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_Bool              IsOpened;       /**< Micro state machine */
-    M4OSA_FileAttribute     FileAttribute;  /**< Opening mode */
-    M4OSA_FilePosition         readFilePos;    /**< Effective position of the GFL read pointer */
-    M4OSA_FilePosition         absolutePos;    /**< Virtual position for next reading */
-    M4OSA_FilePosition         fileSize;        /**< Size of the file */
-
-    M4OSA_FileReader_Buffer_optim buffer[M4OSA_READBUFFER_NB];  /**< Read buffers */
-
-    M4OSA_Void*             aFileDesc;  /**< File descriptor */
-
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    M4OSA_FileReadPointer*     FS;            /**< Filesystem interface */
-#else
-    M4OSA_FileSystem_FctPtr *FS;        /**< Filesystem interface */
-#endif
-
-} M4OSA_FileReader_Context_optim;
-
-/* __________________________________________________________ */
-/*|                                                          |*/
-/*|    Global function for handling low level read access    |*/
-/*|__________________________________________________________|*/
-
-static M4OSA_FileReadPointer* gv_NXPSW_READOPT_lowLevelFunctions;
-
-M4OSA_ERR NXPSW_FileReaderOptim_init(M4OSA_Void *lowLevel_functionPointers, M4OSA_Void *optimized_functionPointers)
-{
-    M4OSA_FileReadPointer* lowLevel_fp  = (M4OSA_FileReadPointer*) lowLevel_functionPointers;
-    M4OSA_FileReadPointer* optimized_fp = (M4OSA_FileReadPointer*) optimized_functionPointers;
-
-    //Set the optimized functions, to be called by the user
-    optimized_fp->openRead  = M4OSA_fileReadOpen_optim;
-    optimized_fp->readData  = M4OSA_fileReadData_optim;
-    optimized_fp->seek      = M4OSA_fileReadSeek_optim;
-    optimized_fp->closeRead = M4OSA_fileReadClose_optim;
-    optimized_fp->setOption = M4OSA_fileReadSetOption_optim;
-    optimized_fp->getOption = M4OSA_fileReadGetOption_optim;
-
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR NXPSW_FileReaderOptim_cleanUp()
-{
-
-    gv_NXPSW_READOPT_lowLevelFunctions = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-
-M4OSA_ERR NXPSW_FileReaderOptim_getLowLevelFunctions(M4OSA_Void **FS)
-{
-    M4OSA_FileReadPointer** pFunctionsPointer = (M4OSA_FileReadPointer**) FS;
-    *pFunctionsPointer = gv_NXPSW_READOPT_lowLevelFunctions;
-    return M4NO_ERROR;
-}
-
-
-/* __________________________________________________________ */
-/*|                                                          |*/
-/*|        Buffer handling functions for Read access         |*/
-/*|__________________________________________________________|*/
-
-/**************************************************************/
-M4OSA_ERR M4OSA_FileReader_BufferInit(M4OSA_FileReader_Context_optim* apContext)
-/**************************************************************/
-{
-    M4OSA_UInt8 i;
-
-    for(i=0; i<M4OSA_READBUFFER_NB; i++)
-    {
-        apContext->buffer[i].data = M4OSA_NULL;
-        apContext->buffer[i].size = 0;
-        apContext->buffer[i].filepos = 0;
-        apContext->buffer[i].remain = 0;
-    }
-
-    for(i=0; i<M4OSA_READBUFFER_NB; i++)
-    {
-        apContext->buffer[i].data = (M4OSA_MemAddr8) M4OSA_32bitAlignedMalloc(M4OSA_READBUFFER_SIZE, 
-            M4OSA_FILE_READER, (M4OSA_Char *)"M4OSA_FileReader_BufferInit");
-        M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_ALLOC, apContext->buffer[i].data);
-    }
-
-    return M4NO_ERROR;
-}
-
-/**************************************************************/
-M4OSA_Void M4OSA_FileReader_BufferFree(M4OSA_FileReader_Context_optim* apContext)
-/**************************************************************/
-{
-    M4OSA_Int8 i;
-
-    for(i=0; i<M4OSA_READBUFFER_NB; i++)
-        if(apContext->buffer[i].data != M4OSA_NULL)
-            free(apContext->buffer[i].data);
-}
-
-/**************************************************************/
-M4OSA_FilePosition M4OSA_FileReader_BufferCopy(M4OSA_FileReader_Context_optim* apContext,
-                                               M4OSA_Int8 i, M4OSA_FilePosition pos,
-                                               M4OSA_FilePosition size, M4OSA_MemAddr8 pData)
-/**************************************************************/
-{
-    M4OSA_FilePosition copysize;
-    M4OSA_FilePosition offset;
-
-    if(apContext->buffer[i].size == M4OSA_EOF) return M4OSA_EOF;
-
-    if(   (pos < apContext->buffer[i].filepos)
-       || (pos > (apContext->buffer[i].filepos + apContext->buffer[i].size - 1)) )
-    {
-        return 0; /* nothing copied */
-    }
-
-    offset = pos - apContext->buffer[i].filepos;
-
-    copysize = apContext->buffer[i].size - offset;
-    copysize = (size < copysize) ? size : copysize;
-
-    memcpy((void *)pData, (void *)(apContext->buffer[i].data + offset), copysize);
-
-    apContext->buffer[i].remain -= copysize;
-    apContext->buffer[i].nbFillSinceLastAcess = 0;
-
-    return copysize;
-}
-
-/**************************************************************/
-M4OSA_ERR M4OSA_FileReader_BufferFill(M4OSA_FileReader_Context_optim* apContext,
-                                       M4OSA_Int8 i, M4OSA_FilePosition pos)
-/**************************************************************/
-{
-    M4OSA_FilePosition     gridPos;
-    M4OSA_FilePosition    tempPos;
-    M4OSA_UInt32        bufferSize;
-    M4OSA_FilePosition     diff;
-    M4OSA_FilePosition     size;
-    M4OSA_ERR             err = M4NO_ERROR;
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    M4OSA_ERR             errno = M4NO_ERROR;
-    M4OSA_UInt32         fileReadSize = 0;
-    M4OSA_FilePosition     fileSeekPosition = 0;
-#else
-    M4OSA_Int32         ret_val;
-    M4OSA_UInt16         errno;
-#endif
-
-    M4OSA_TRACE3_4("BufferFill  i = %d  pos = %ld  read = %ld  old = %ld", i, pos,
-                              apContext->readFilePos, apContext->buffer[i].filepos);
-
-    /* Avoid cycling statement because of EOF */
-    if(pos >= apContext->fileSize)
-        return M4WAR_NO_MORE_AU;
-
-    /* Relocate to absolute postion if necessary */
-    bufferSize = M4OSA_READBUFFER_SIZE;
-    tempPos = (M4OSA_FilePosition) (pos / bufferSize);
-    gridPos = tempPos * M4OSA_READBUFFER_SIZE;
-    diff = gridPos - apContext->readFilePos;
-
-    if(diff != 0)
-    {
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-        fileSeekPosition = diff;
-        errno = apContext->FS->seek(apContext->aFileDesc, M4OSA_kFileSeekCurrent,
-                                    &fileSeekPosition);
-        apContext->readFilePos = gridPos;
-
-        if(M4NO_ERROR != errno)
-        {
-            err = errno;
-            M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR1 = 0x%x", err);
-            return err;
-        }
-
-#else
-        ret_val = apContext->FS->pFctPtr_Seek(apContext->aFileDesc, diff,
-                                               M4OSA_kFileSeekCurrent, &errno);
-        apContext->readFilePos = gridPos;
-
-        if(ret_val != 0)
-        {
-            err = M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_READER, errno);
-            M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR1 = 0x%x", err);
-            return err;
-        }
-#endif /*M4OSA_READER_OPTIM_USE_OSAL_IF*/
-    }
-
-    apContext->buffer[i].filepos = apContext->readFilePos;
-
-    /* Read Data */
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    fileReadSize = M4OSA_READBUFFER_SIZE;
-    errno = apContext->FS->readData(apContext->aFileDesc,
-                      (M4OSA_MemAddr8)apContext->buffer[i].data, &fileReadSize);
-
-    size = (M4OSA_FilePosition)fileReadSize;
-    if ((M4NO_ERROR != errno)&&(M4WAR_NO_DATA_YET != errno))
-    {
-        apContext->buffer[i].size = M4OSA_EOF;
-        apContext->buffer[i].remain = 0;
-
-        err = errno;
-        M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR2 = 0x%x", err);
-        return err;
-    }
-#else
-    size = apContext->FS->pFctPtr_Read(apContext->aFileDesc,
-        (M4OSA_UInt8 *)apContext->buffer[i].data, M4OSA_READBUFFER_SIZE, &errno);
-    if(size == -1)
-    {
-        apContext->buffer[i].size = M4OSA_EOF;
-        apContext->buffer[i].remain = 0;
-
-        err = M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_READER, errno);
-        M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR2 = 0x%x", err);
-        return err;
-    }
-#endif
-
-    apContext->buffer[i].size = size;
-    apContext->buffer[i].remain = size;
-    apContext->buffer[i].nbFillSinceLastAcess = 0;
-
-    /* Retrieve current position */
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    errno = apContext->FS->getOption(apContext->aFileDesc,
-                                     M4OSA_kFileReadGetFilePosition,
-                                     (M4OSA_DataOption*) &apContext->readFilePos);
-
-    if (M4NO_ERROR != errno)
-    {
-        err = errno;
-        M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR3 = 0x%x", err);
-    }
-    else if(   (apContext->buffer[i].size >= 0)
-       && (apContext->buffer[i].size < M4OSA_READBUFFER_SIZE) )
-    {
-        err = M4WAR_NO_DATA_YET;
-        M4OSA_TRACE2_0("M4OSA_FileReader_BufferFill returns NO DATA YET");
-        return err;
-    }
-#else
-    apContext->readFilePos = apContext->FS->pFctPtr_Tell(apContext->aFileDesc, &errno);
-
-    if(   (apContext->buffer[i].size >= 0)
-       && (apContext->buffer[i].size < M4OSA_READBUFFER_SIZE) )
-    {
-        err = M4WAR_NO_DATA_YET;
-        M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR3 = 0x%x", err);
-        return err;
-    }
-#endif /*M4OSA_READER_OPTIM_USE_OSAL_IF*/
-
-    /* Return without error */
-    return M4NO_ERROR;
-}
-
-/**************************************************************/
-M4OSA_Int8 M4OSA_FileReader_BufferMatch(M4OSA_FileReader_Context_optim* apContext,
-                                        M4OSA_FilePosition pos)
-/**************************************************************/
-{
-    M4OSA_Int8 i;
-
-
-    /* Select the buffer which matches with given pos */
-    for(i=0; i<M4OSA_READBUFFER_NB; i++)
-    {
-        if(   (pos >= apContext->buffer[i].filepos)
-           && (pos < (apContext->buffer[i].filepos + apContext->buffer[i].size)) )
-        {
-            return i;
-        }
-    }
-    return M4OSA_READBUFFER_NONE;
-}
-
-/**************************************************************/
-M4OSA_Int8 M4OSA_FileReader_BufferSelect(M4OSA_FileReader_Context_optim* apContext,
-                                         M4OSA_Int8 current_i)
-/**************************************************************/
-{
-    M4OSA_Int8 i,j;
-    M4OSA_FilePosition min_amount,max_amount;
-    M4OSA_Int8 min_i,max_count;
-
-    /* update nbFillSinceLastAcess field */
-    for(i=0; i<M4OSA_READBUFFER_NB; i++)
-    {
-        apContext->buffer[i].nbFillSinceLastAcess ++;
-    }
-
-    /* Plan A : Scan for empty buffer */
-    for(i=0; i<M4OSA_READBUFFER_NB; i++)
-    {
-        if(apContext->buffer[i].remain == 0)
-        {
-            return i;
-        }
-    }
-
-    max_count = M4OSA_READBUFFER_NB;
-    max_amount = MAX_FILLS_SINCE_LAST_ACCESS;
-
-    /* Plan B : Scan for dead buffer */
-    for(i=0; i<M4OSA_READBUFFER_NB; i++)
-    {
-        if(apContext->buffer[i].nbFillSinceLastAcess >= (M4OSA_UInt32) max_amount)
-        {
-            max_amount = apContext->buffer[i].nbFillSinceLastAcess;
-            max_count = i;
-        }
-    }
-    if(max_count<M4OSA_READBUFFER_NB)
-    {
-        M4OSA_TRACE2_2("DEAD BUFFER: %d, %d",max_count,apContext->buffer[max_count].nbFillSinceLastAcess);
-        return max_count;
-    }
-
-    min_i = current_i;
-    min_amount = M4OSA_READBUFFER_SIZE;
-
-    /* Select the buffer which is the most "empty" */
-    for(i=0; i<M4OSA_READBUFFER_NB; i++)
-    {
-        j = (i+current_i)%M4OSA_READBUFFER_NB;
-
-        if(apContext->buffer[j].remain < min_amount)
-        {
-            min_amount = apContext->buffer[j].remain;
-            min_i = j;
-        }
-    }
-
-    return min_i;
-
-}
-
-/**************************************************************/
-M4OSA_ERR M4OSA_FileReader_CalculateSize(M4OSA_FileReader_Context_optim* apContext)
-/**************************************************************/
-{
-    M4OSA_ERR            err = M4NO_ERROR;
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    M4OSA_ERR            errno = M4NO_ERROR;
-#else
-    M4OSA_Int32          ret_val;
-    M4OSA_UInt16         errno;
-#endif
-
-    /* go to the end of file*/
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    errno = apContext->FS->getOption(apContext->aFileDesc, M4OSA_kFileReadGetFileSize,
-                                        (M4OSA_DataOption*) &apContext->fileSize);
-    if (M4NO_ERROR != errno)
-    {
-        err = errno;
-        M4OSA_TRACE1_1("M4OSA_FileReader_CalculateSize ERR = 0x%x", err);
-    }
-#else
-    ret_val = apContext->FS->pFctPtr_Seek(apContext->aFileDesc, 0, M4OSA_kFileSeekEnd, &errno);
-
-    if (ret_val != 0)
-    {
-        apContext->readFilePos = M4OSA_EOF;
-        err = M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_READER, errno);
-        M4OSA_TRACE1_1("M4OSA_FileReader_CalculateSize ERR = 0x%x", err);
-    }
-    else
-    {
-        /* Retrieve size of the file */
-        apContext->fileSize = apContext->FS->pFctPtr_Tell(apContext->aFileDesc, &errno);
-        apContext->readFilePos = apContext->fileSize;
-    }
-#endif /*M4OSA_READER_OPTIM_USE_OSAL_IF*/
-
-    return err;
-}
-
-
-/* __________________________________________________________ */
-/*|                                                          |*/
-/*|                   OSAL filesystem API                    |*/
-/*|__________________________________________________________|*/
-
-/**
-******************************************************************************
-* @brief       This method opens the provided fileDescriptor and returns its context.
-* @param       pContext:       (OUT) File reader context.
-* @param       pFileDescriptor :       (IN) File Descriptor of the input file.
-* @param       FileModeAccess :        (IN) File mode access.
-* @return      M4NO_ERROR: there is no error
-* @return      M4ERR_PARAMETER pContext or fileDescriptor is NULL
-* @return      M4ERR_ALLOC     there is no more memory available
-* @return      M4ERR_FILE_BAD_MODE_ACCESS      the file mode access is not correct (it must be either isTextMode or read)
-* @return      M4ERR_FILE_NOT_FOUND The file can not be opened.
-******************************************************************************
-*/
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    M4OSA_ERR M4OSA_fileReadOpen_optim(M4OSA_Context* pContext,
-                                       M4OSA_Void* pFileDescriptor,
-                                       M4OSA_UInt32 FileModeAccess)
-#else
-    M4OSA_ERR M4OSA_fileReadOpen_optim(M4OSA_Context* pContext,
-                                       M4OSA_Void* pFileDescriptor,
-                                       M4OSA_UInt32 FileModeAccess,
-                                       M4OSA_FileSystem_FctPtr *FS)
-#endif
-{
-    M4OSA_FileReader_Context_optim* apContext = M4OSA_NULL;
-
-    M4OSA_ERR   err       = M4NO_ERROR;
-    M4OSA_Void* aFileDesc = M4OSA_NULL;
-    M4OSA_Bool  buffers_allocated = M4OSA_FALSE;
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    M4OSA_ERR errno = M4NO_ERROR;
-#else
-    M4OSA_UInt16 errno;
-#endif /*M4OSA_READER_OPTIM_USE_OSAL_IF*/
-
-    M4OSA_TRACE2_3("M4OSA_fileReadOpen_optim p = 0x%p fd = %s mode = %lu", pContext,
-                                                   pFileDescriptor, FileModeAccess);
-
-    /*      Check input parameters */
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext);
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pFileDescriptor);
-
-    *pContext = M4OSA_NULL;
-
-    /*      Allocate memory for the File reader context. */
-    apContext = (M4OSA_FileReader_Context_optim *)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_FileReader_Context_optim),
-                                      M4OSA_FILE_READER, (M4OSA_Char *)"M4OSA_FileReader_Context_optim");
-
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_ALLOC, apContext);
-
-    /* Set filesystem interface */
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-
-    /*Set the optimized functions, to be called by the user*/
-
-    apContext->FS = (M4OSA_FileReadPointer*) M4OSA_32bitAlignedMalloc(sizeof(M4OSA_FileReadPointer),
-                                       M4OSA_FILE_READER, (M4OSA_Char *)"M4OSA_FileReaderOptim_init");
-    if (M4OSA_NULL==apContext->FS)
-    {
-        M4OSA_TRACE1_0("M4OSA_FileReaderOptim_init - ERROR : allocation failed");
-        return M4ERR_ALLOC;
-    }
-    apContext->FS->openRead  = M4OSA_fileReadOpen;
-    apContext->FS->readData  = M4OSA_fileReadData;
-    apContext->FS->seek      = M4OSA_fileReadSeek;
-    apContext->FS->closeRead = M4OSA_fileReadClose;
-    apContext->FS->setOption = M4OSA_fileReadSetOption;
-    apContext->FS->getOption = M4OSA_fileReadGetOption;
-#else
-    apContext->FS = FS;
-#endif
-
-    /* Verify access mode */
-    if (   ((FileModeAccess & M4OSA_kFileAppend) != 0)
-        || ((FileModeAccess & M4OSA_kFileRead) == 0))
-    {
-        err = M4ERR_FILE_BAD_MODE_ACCESS;
-        goto cleanup;
-    }
-
-    /* Open file in read mode */
-    if((FileModeAccess & M4OSA_kFileCreate) != 0)
-    {
-        err = M4ERR_FILE_BAD_MODE_ACCESS;
-    }
-    else
-    {
-        if ((FileModeAccess & M4OSA_kFileRead))
-        {
-            /* File is opened in read only*/
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-            errno = apContext->FS->openRead(&aFileDesc, pFileDescriptor, FileModeAccess);
-
-            if ((aFileDesc == M4OSA_NULL)||(M4NO_ERROR != errno))
-            {
-                /* converts the error to PSW format*/
-                err = errno;
-                M4OSA_TRACE2_1("M4OSA_fileReadOpen_optim ERR1 = 0x%x", err);
-                apContext->IsOpened = M4OSA_FALSE;
-            }
-#else
-            aFileDesc = apContext->FS->pFctPtr_Open(pFileDescriptor, FileModeAccess, &errno);
-
-            if (aFileDesc == M4OSA_NULL)
-            {
-                /* converts the error to PSW format*/
-                err = M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_READER, errno);
-                M4OSA_TRACE2_1("M4OSA_fileReadOpen_optim ERR1 = 0x%x", err);
-                apContext->IsOpened = M4OSA_FALSE;
-            }
-#endif
-
-            else
-            {
-                apContext->IsOpened = M4OSA_TRUE;
-            }
-        }
-        else
-        {
-            err = M4ERR_FILE_BAD_MODE_ACCESS;
-        }
-    }
-
-    if (M4NO_ERROR != err) goto cleanup;
-
-    /* Allocate buffers */
-    err = M4OSA_FileReader_BufferInit(apContext);
-    buffers_allocated = M4OSA_TRUE;
-
-    if (M4NO_ERROR != err) goto cleanup;
-
-    /* Initialize parameters */
-    apContext->fileSize = 0;
-    apContext->absolutePos = 0;
-    apContext->readFilePos = 0;
-
-    /* Retrieve the File Descriptor*/
-    apContext->aFileDesc = aFileDesc;
-
-    /* Retrieve the File mode Access */
-    apContext->FileAttribute.modeAccess = (M4OSA_FileModeAccess) FileModeAccess;
-
-    /*Retrieve the File reader context */
-    *pContext= (M4OSA_Context)apContext;
-
-    /* Compute file size */
-    err = M4OSA_FileReader_CalculateSize(apContext);
-
-    if (M4NO_ERROR != err) goto cleanup;
-
-    return M4NO_ERROR;
-
-cleanup:
-
-    /* free context */
-    if (M4OSA_NULL != apContext)
-    {
-        if(buffers_allocated == M4OSA_TRUE)
-        {
-            M4OSA_FileReader_BufferFree(apContext);
-        }
-
-        free( apContext);
-        *pContext = M4OSA_NULL;
-    }
-
-    M4OSA_TRACE2_1 ("M4OSA_fileReadOpen_optim: returns error 0x%0x", err)
-    return err;
-}
-
-/**
-******************************************************************************
-* @brief       This method reads the 'size' bytes in the core file reader (selected by its 'context')
-*                      and writes the data to the 'data' pointer. If 'size' byte can not be read in the core file reader,
-*                      'size' parameter is updated to match the correct number of read bytes.
-* @param       pContext:       (IN) File reader context.
-* @param       pData : (OUT) Data pointer of the read data.
-* @param       pSize : (INOUT) Size of the data to read (in byte).
-* @return      M4NO_ERROR: there is no error
-* @return      M4ERR_PARAMETER pSize, fileDescriptor or pData is NULL
-* @return      M4ERR_ALLOC     there is no more memory available
-* @return      M4ERR_BAD_CONTEXT       provided context is not a valid one.
-******************************************************************************
-*/
-M4OSA_ERR M4OSA_fileReadData_optim(M4OSA_Context pContext,M4OSA_MemAddr8 pData,
-                                                            M4OSA_UInt32* pSize)
-{
-    M4OSA_FileReader_Context_optim* apContext =
-                                     (M4OSA_FileReader_Context_optim*) pContext;
-
-    M4OSA_ERR err;
-    M4OSA_FilePosition aSize;
-    M4OSA_FilePosition copiedSize;
-    M4OSA_Int8 selected_buffer, current_buffer;
-
-    M4OSA_TRACE3_3("M4OSA_fileReadData_optim p = 0x%p  d = 0x%p  s = %lu",
-                                                       pContext, pData, *pSize);
-
-    /* Check input parameters */
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_BAD_CONTEXT, apContext);
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pData);
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pSize);
-
-    if (apContext->IsOpened != M4OSA_TRUE)
-    {
-        return M4ERR_BAD_CONTEXT;
-    }
-
-    /* Prevent reading beyond EOF */
-    if((*pSize > 0) && (apContext->absolutePos >= apContext->fileSize))
-    {
-        copiedSize = 0;
-        err = M4WAR_NO_MORE_AU;
-        goto cleanup;
-    }
-
-    /* Check if data can be read from a buffer */
-    /* If not, fill one according to quantized positions */
-    copiedSize = 0;
-    err = M4NO_ERROR;
-
-    selected_buffer = M4OSA_FileReader_BufferMatch(apContext, apContext->absolutePos);
-
-    if(selected_buffer == M4OSA_READBUFFER_NONE)
-    {
-        selected_buffer = M4OSA_FileReader_BufferSelect(apContext, 0);
-        err = M4OSA_FileReader_BufferFill(apContext, selected_buffer,
-                                                        apContext->absolutePos);
-    }
-
-    if(err != M4NO_ERROR)
-    {
-        if(err == M4WAR_NO_DATA_YET)
-        {
-            if (*pSize <= (M4OSA_UInt32)apContext->buffer[selected_buffer].size)
-            {
-                err = M4NO_ERROR;
-            }
-            else
-            {
-                copiedSize = (M4OSA_UInt32)apContext->buffer[selected_buffer].size;
-                /*copy the content into pData*/
-                M4OSA_FileReader_BufferCopy(apContext, selected_buffer,
-                                     apContext->absolutePos, copiedSize, pData);
-                goto cleanup;
-            }
-        }
-        else
-        {
-            goto cleanup;
-        }
-    }
-
-    M4OSA_TRACE3_3("read  size = %lu  buffer = %d  pos = %ld", *pSize,
-                                       selected_buffer, apContext->absolutePos);
-
-    /* Copy buffer into pData */
-    while(((M4OSA_UInt32)copiedSize < *pSize) && (err == M4NO_ERROR))
-    {
-        aSize = M4OSA_FileReader_BufferCopy(apContext, selected_buffer,
-                                            apContext->absolutePos+copiedSize,
-                                            *pSize-copiedSize, pData+copiedSize);
-        copiedSize += aSize;
-
-        if(aSize == 0)
-        {
-            err = M4WAR_NO_DATA_YET;
-        }
-        else
-        {
-            if((M4OSA_UInt32)copiedSize < *pSize)
-            {
-                current_buffer = selected_buffer;
-                selected_buffer = M4OSA_FileReader_BufferMatch(apContext,
-                                             apContext->absolutePos+copiedSize);
-
-                if(selected_buffer == M4OSA_READBUFFER_NONE)
-                {
-                    selected_buffer = M4OSA_FileReader_BufferSelect(apContext,
-                                                                current_buffer);
-                    err = M4OSA_FileReader_BufferFill(apContext, selected_buffer,
-                                             apContext->absolutePos+copiedSize);
-
-                    if(err != M4NO_ERROR)
-                    {
-                        if(err == M4WAR_NO_DATA_YET)
-                        {
-                            /*If we got all the data that we wanted, we should return no error*/
-                            if ((*pSize-copiedSize) <= (M4OSA_UInt32)apContext->buffer[selected_buffer].size)
-                            {
-                                err = M4NO_ERROR;
-                            }
-                            /*If we did not get enough data, we will return NO_DATA_YET*/
-
-                            /*copy the data read*/
-                            aSize = M4OSA_FileReader_BufferCopy(apContext, selected_buffer,
-                                                               apContext->absolutePos+copiedSize,
-                                                               *pSize-copiedSize, pData+copiedSize);
-                            copiedSize += aSize;
-
-                            /*we reached end of file, so stop trying to read*/
-                            goto cleanup;
-                        }
-                        if (err == M4WAR_NO_MORE_AU)
-                        {
-                            err = M4WAR_NO_DATA_YET;
-
-                            /*copy the data read*/
-                            aSize = M4OSA_FileReader_BufferCopy(apContext, selected_buffer,
-                                                             apContext->absolutePos+copiedSize,
-                                                             *pSize-copiedSize, pData+copiedSize);
-                            copiedSize += aSize;
-
-                            /*we reached end of file, so stop trying to read*/
-                            goto cleanup;
-
-                        }
-                        else
-                        {
-                            goto cleanup;
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-cleanup :
-
-    /* Update the new position of the pointer */
-    apContext->absolutePos = apContext->absolutePos + copiedSize;
-
-    if((err != M4NO_ERROR)&&(err!=M4WAR_NO_DATA_YET))
-    {
-        M4OSA_TRACE2_3("M4OSA_fileReadData_optim size = %ld  copied = %ld  err = 0x%x",
-                                                           *pSize, copiedSize, err);
-    }
-
-    /* Effective copied size must be returned */
-    *pSize = copiedSize;
-
-
-    /* Read is done */
-    return err;
-}
-
-/**
-******************************************************************************
-* @brief       This method seeks at the provided position in the core file reader (selected by its 'context').
-*              The position is related to the seekMode parameter it can be either :
-*              From the beginning (position MUST be positive) : end position = position
-*              From the end (position MUST be negative) : end position = file size + position
-*              From the current position (signed offset) : end position = current position + position.
-* @param       pContext:       (IN) File reader context.
-* @param       SeekMode :      (IN) Seek access mode.
-* @param       pPosition :     (IN) Position in the file.
-* @return      M4NO_ERROR: there is no error
-* @return      M4ERR_PARAMETER Seekmode or fileDescriptor is NULL
-* @return      M4ERR_ALLOC     there is no more memory available
-* @return      M4ERR_BAD_CONTEXT       provided context is not a valid one.
-* @return      M4ERR_FILE_INVALID_POSITION the position cannot be reached.
-******************************************************************************
-*/
-M4OSA_ERR M4OSA_fileReadSeek_optim( M4OSA_Context pContext, M4OSA_FileSeekAccessMode SeekMode,
-                                                              M4OSA_FilePosition* pPosition)
-{
-    M4OSA_FileReader_Context_optim* apContext = (M4OSA_FileReader_Context_optim*) pContext;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_TRACE3_3("M4OSA_fileReadSeek_optim p = 0x%p mode = %d pos = %d", pContext,
-                                                             SeekMode, *pPosition);
-
-    /* Check input parameters */
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_BAD_CONTEXT, apContext);
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pPosition);
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, SeekMode);
-
-    if (apContext->IsOpened != M4OSA_TRUE)
-    {
-        return M4ERR_BAD_CONTEXT;       /*< The context can not be correct */
-    }
-
-    /* Go to the desired position */
-    switch(SeekMode)
-    {
-        case M4OSA_kFileSeekBeginning :
-            if(*pPosition < 0) {
-                return M4ERR_PARAMETER; /**< Bad SeekAcess mode */
-            }
-            apContext->absolutePos = *pPosition;
-            *pPosition = apContext->absolutePos;
-            break;
-
-        case M4OSA_kFileSeekEnd :
-            if(*pPosition > 0) {
-                return M4ERR_PARAMETER; /**< Bad SeekAcess mode */
-            }
-            apContext->absolutePos = apContext->fileSize + *pPosition;
-            *pPosition = apContext->absolutePos;
-            break;
-
-        case M4OSA_kFileSeekCurrent :
-            if(((apContext->absolutePos + *pPosition) > apContext->fileSize) ||
-                ((apContext->absolutePos + *pPosition) < 0)){
-                return M4ERR_PARAMETER; /**< Bad SeekAcess mode */
-            }
-            apContext->absolutePos = apContext->absolutePos + *pPosition;
-            *pPosition = apContext->absolutePos;
-            break;
-
-        default :
-            err = M4ERR_PARAMETER; /**< Bad SeekAcess mode */
-            break;
-    }
-
-    /* Return without error */
-    return err;
-}
-
-/**
-******************************************************************************
-* @brief       This method asks the core file reader to close the file
-*              (associated to the context) and also frees the context.
-* @param       pContext:       (IN) File reader context.
-* @return      M4NO_ERROR: there is no error
-* @return      M4ERR_BAD_CONTEXT       provided context is not a valid one.
-******************************************************************************
-*/
-M4OSA_ERR M4OSA_fileReadClose_optim(M4OSA_Context pContext)
-{
-    M4OSA_FileReader_Context_optim* apContext = (M4OSA_FileReader_Context_optim*) pContext;
-
-    M4OSA_ERR err = M4NO_ERROR;
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    M4OSA_ERR errno = M4NO_ERROR;
-#else
-    M4OSA_UInt16 errno;
-#endif
-
-    M4OSA_TRACE2_1("M4OSA_fileReadClose_optim p = 0x%p", pContext );
-
-    /* Check input parameters */
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_BAD_CONTEXT, apContext);
-
-    if (apContext->IsOpened != M4OSA_TRUE)
-    {
-        return M4ERR_BAD_CONTEXT;       /**< The context can not be correct */
-    }
-
-    /* buffer */
-    M4OSA_FileReader_BufferFree(apContext);
-
-    /* Close the file */
-#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
-    errno = apContext->FS->closeRead(apContext->aFileDesc);
-
-    if (M4NO_ERROR != errno)
-    {
-        /* converts the error to PSW format*/
-        err = errno;
-        M4OSA_TRACE2_1("M4OSA_fileReadClose_optim ERR1 = 0x%x", err);
-    }
-#else
-    aRet_Val = apContext->FS->pFctPtr_Close(apContext->aFileDesc, &errno);
-
-    if (aRet_Val != 0)
-    {
-        /* converts the error to PSW format*/
-        err = M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_READER, errno);
-        M4OSA_TRACE2_1("M4OSA_fileReadClose_optim ERR1 = 0x%x", err);
-    }
-#endif /*M4OSA_READER_OPTIM_USE_OSAL_IF*/
-
-    apContext->IsOpened = M4OSA_FALSE;
-
-    //>>>> GLM20090212 : set the low level function statically
-    if (apContext->FS != M4OSA_NULL)
-    {
-        free( apContext->FS);
-    }
-    //<<<< GLM20090212 : set the low level function statically
-
-    /* Free the context */
-    free(apContext);
-
-    /* Return without error */
-    return err;
-}
-
-/**
-******************************************************************************
-* @brief       This is a dummy function required to maintain function pointer
-*              structure.
-* @note        This is a dummy function required to maintain function pointer
-*              structure.
-* @param       pContext:       (IN) Execution context.
-* @param       OptionId :      (IN) Id of the option to set.
-* @param       OptionValue :   (IN) Value of the option.
-* @return      M4NO_ERROR: there is no error
-******************************************************************************
-*/
-M4OSA_ERR M4OSA_fileReadSetOption_optim(M4OSA_Context pContext,
-                                        M4OSA_FileReadOptionID OptionID,
-                                        M4OSA_DataOption OptionValue)
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    return err;
-}
-
-/**
-******************************************************************************
-* @brief       This method asks the core file reader to return the value associated
-*              with the optionID.The caller is responsible for allocating/de-allocating
-*              the memory of the value field.
-* @note        The options handled by the component depend on the implementation
-*                                                               of the component.
-* @param       pContext:       (IN) Execution context.
-* @param       OptionId :      (IN) Id of the option to set.
-* @param       pOptionValue :  (OUT) Value of the option.
-* @return      M4NO_ERROR: there is no error
-* @return      M4ERR_BAD_CONTEXT       pContext is NULL
-* @return      M4ERR_BAD_OPTION_ID the option id is not valid.
-* @return      M4ERR_NOT_IMPLEMENTED The option is not implemented yet.
-******************************************************************************
-*/
-M4OSA_ERR M4OSA_fileReadGetOption_optim(M4OSA_Context pContext,
-                                        M4OSA_FileReadOptionID OptionID,
-                                        M4OSA_DataOption* pOptionValue)
-{
-    M4OSA_FileReader_Context_optim* apContext = (M4OSA_FileReader_Context_optim*) pContext;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    /*  Check input parameters */
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_BAD_CONTEXT, apContext);
-
-    if (apContext->IsOpened != M4OSA_TRUE)
-    {
-        return M4ERR_BAD_CONTEXT;       /**< The context can not be correct */
-    }
-
-    /* Get the desired option if it is avalaible */
-    switch(OptionID)
-    {
-        /* Get File Size */
-        case M4OSA_kFileReadGetFileSize:/**< Get size of the file, limited to 32 bit size */
-
-            (*(M4OSA_UInt32 *)pOptionValue) = apContext->fileSize;
-            break;
-
-        /* Check End of file Occurs */
-        case M4OSA_kFileReadIsEOF :     /**< See if we are at the end of the file */
-
-            (*(M4OSA_Bool *)pOptionValue) = (apContext->absolutePos >= apContext->fileSize) ? M4OSA_TRUE : M4OSA_FALSE;
-            break;
-
-        /* Get File Position */
-        case M4OSA_kFileReadGetFilePosition :   /**< Get file position */
-
-            *(M4OSA_FilePosition *)pOptionValue = apContext->absolutePos;
-            break;
-
-        /* Get Attribute */
-        case M4OSA_kFileReadGetFileAttribute :  /**< Get the file attribute = access mode */
-
-            (*(M4OSA_FileAttribute *)pOptionValue).modeAccess = apContext->FileAttribute.modeAccess;
-            break;
-
-        default:
-            /**< Bad option ID */
-            err = M4ERR_BAD_OPTION_ID;
-            break;
-    }
-
-    /*Return without error */
-    return err;
-}
diff --git a/libvideoeditor/osal/src/M4OSA_CharStar.c b/libvideoeditor/osal/src/M4OSA_CharStar.c
deleted file mode 100755
index 0814cbf..0000000
--- a/libvideoeditor/osal/src/M4OSA_CharStar.c
+++ /dev/null
@@ -1,506 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4DPAK_CharStar.c
- * @ingroup
-  * @brief        definition of the Char Star set of functions.
- * @note         This file defines the Char Star set of functions.
- *
- ************************************************************************
-*/
-
-
-#include "M4OSA_CharStar.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Debug.h"
-
-/* WARNING: Specific Android */
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <errno.h>
-
-
-/**
- ************************************************************************
- * @brief      This function mimics the functionality of the libc's strncpy().
- * @note       It copies exactly len2Copy characters from pStrIn to pStrOut,
- *             truncating  pStrIn or adding null characters to pStrOut if
- *             necessary.
- *             - If len2Copy is less than or equal to the length of pStrIn,
- *               a null character is appended automatically to the copied
- *               string.
- *             - If len2Copy is greater than the length of pStrIn, pStrOut is
- *               padded with null characters up to length len2Copy.
- *             - pStrOut and pStrIn MUST NOT OVERLAP (this is NOT CHECKED).
- * @param      pStrOut: (OUT) Destination character string.
- * @param      pStrIn: (IN) Source character string.
- * @param      len2Copy: (IN) Maximum number of characters from pStrIn to copy.
- * @return     M4NO_ERROR: there is no error.
- * @return     M4ERR_PARAMETER: pStrIn or pStrOut is M4OSA_NULL.
-  ************************************************************************
-*/
-M4OSA_ERR M4OSA_chrNCopy(M4OSA_Char* pStrOut, M4OSA_Char   *pStrIn, M4OSA_UInt32 len2Copy)
-{
-    M4OSA_TRACE1_3("M4OSA_chrNCopy\t(M4OSA_Char* %x,M4OSA_Char* %x,M4OSA_UInt32 %ld)",
-        pStrOut,pStrIn,len2Copy);
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pStrOut),M4ERR_PARAMETER,
-                            "M4OSA_chrNCopy:\tpStrOut is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pStrIn),M4ERR_PARAMETER,
-                            "M4OSA_chrNCopy:\tpStrIn is M4OSA_NULL");
-
-    strncpy((char *)pStrOut, (const char *)pStrIn, (size_t)len2Copy);
-    if(len2Copy <= (M4OSA_UInt32)strlen((const char *)pStrIn))
-    {
-        pStrOut[len2Copy] = '\0';
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
-  * @brief      This function returns the boolean comparison of pStrIn1 and pStrIn2.
- * @note       The value returned in result is M4OSA_TRUE if the string
- *             pointed to by pStrIn1 is strictly identical to the string pointed
- *             to by pStrIn2, and M4OSA_FALSE otherwise.
- * @param      pStrIn1: (IN) First character string.
- * @param      pStrIn2: (IN) Second character string.
- * @param      cmpResult: (OUT) Comparison result.
- * @return     M4NO_ERROR: there is no error.
- * @return     M4ERR_PARAMETER: pStrIn1 pStrIn2 or cmpResult is M4OSA_NULL.
-  ************************************************************************
-*/
-M4OSA_ERR M4OSA_chrAreIdentical(M4OSA_Char* pStrIn1, M4OSA_Char* pStrIn2,
-                                                            M4OSA_Bool* pResult)
-{
-    M4OSA_UInt32 i32,len32;
-    M4OSA_TRACE1_3("M4OSA_chrAreIdentical\t(M4OSA_Char* %x,M4OSA_Char* %x,"
-        "M4OSA_Int32* %x)",pStrIn1,pStrIn2,pResult);
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pStrIn1, M4ERR_PARAMETER,
-                               "M4OSA_chrAreIdentical:\tpStrIn1 is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pStrIn2, M4ERR_PARAMETER,
-                               "M4OSA_chrAreIdentical:\tpStrIn2 is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pResult, M4ERR_PARAMETER,
-                               "M4OSA_chrAreIdentical:\tpResult is M4OSA_NULL");
-
-    len32 = (M4OSA_UInt32)strlen((const char *)pStrIn1);
-    if(len32 != (M4OSA_UInt32)strlen((const char *)pStrIn2))
-    {
-        *pResult = M4OSA_FALSE;
-        return M4NO_ERROR;
-    }
-
-    for(i32=0;i32<len32;i32++)
-    {
-        if(pStrIn1[i32] != pStrIn2[i32])
-        {
-            *pResult = M4OSA_FALSE;
-            return M4NO_ERROR;
-        }
-    }
-
-    *pResult = M4OSA_TRUE;
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ************************************************************************
- * @brief      This function gets a M4OSA_UInt32 from string.
- * @note       This function converts the first set of non-whitespace
- *             characters of pStrIn to a M4OSA_UInt32 value pVal, assuming a
- *             representation in base provided by the parameter base. pStrOut is
- *             set to the first character of the string following the last
- *             character of the number that has been converted.
- *             - in case of a failure during the conversion, pStrOut is not
- *               updated, and pVal is set to null.
- *             - in case of negative number, pStrOut is not updated, and pVal is
- *               set to null.
- *             - in case of numerical overflow, pVal is set to M4OSA_UINT32_MAX.
- *             - if pStrOut is not to be used, it can be set to M4OSA_NULL.
- * @param      pStrIn: (IN) Character string.
- * @param      pVal: (OUT) read value.
- * @param      pStrOut: (OUT) Output character string.
- * @param      base: (IN) Base of the character string representation.
- * @return     M4NO_ERROR: there is no error.
- * @return     M4ERR_PARAMETER: pStrIn or pVal is M4OSA_NULL.
- * @return     M4ERR_CHR_CONV_FAILED: conversion failure.
- * @return     M4WAR_CHR_NUM_RANGE: the character string represents a number
- *             greater than M4OSA_UINT32_MAX.
- * @return     M4WAR_CHR_NEGATIVE: the character string represents a negative
- *             number.
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_chrGetUInt32(M4OSA_Char*    pStrIn,
-                             M4OSA_UInt32*    pVal,
-                             M4OSA_Char**    pStrOut,
-                             M4OSA_chrNumBase base)
-{
-    M4OSA_UInt32 ul;
-    char*        pTemp;
-
-    M4OSA_TRACE1_4("M4OSA_chrGetUInt32\t(M4OSA_Char* %x, M4OSA_UInt32* %x"
-        "M4OSA_Char** %x,M4OSA_chrNumBase %d)",pStrIn,pVal,pStrOut,base);
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pStrIn, M4ERR_PARAMETER,
-                                   "M4OSA_chrGetUInt32:\tpStrIn is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pVal, M4ERR_PARAMETER,
-                                     "M4OSA_chrGetUInt32:\tpVal is M4OSA_NULL");
-
-    errno = 0;
-    switch(base)
-    {
-    case M4OSA_kchrDec:
-        ul = strtoul((const char *)pStrIn, &pTemp, 10);
-        break;
-    case M4OSA_kchrHexa:
-        ul = strtoul((const char *)pStrIn, &pTemp,16);
-        break;
-    case M4OSA_kchrOct:
-        ul = strtoul((const char *)pStrIn, &pTemp,8);
-        break;
-    default:
-        return M4ERR_PARAMETER;
-    }
-
-    /* has conversion failed ? */
-    if((M4OSA_Char*)pTemp == pStrIn)
-    {
-        *pVal = 0;
-        return M4ERR_CHR_CONV_FAILED;
-    }
-
-    /* was the number negative ? */
-    if(*(pStrIn+strspn((const char *)pStrIn," \t")) == '-')
-    {
-        *pVal = 0;
-        return M4WAR_CHR_NEGATIVE;
-    }
-
-    /* has an overflow occured ? */
-    if(errno == ERANGE)
-    {
-        *pVal = M4OSA_UINT32_MAX;
-        if(M4OSA_NULL != pStrOut)
-        {
-            *pStrOut = (M4OSA_Char*)pTemp;
-        }
-        return M4WAR_CHR_NUM_RANGE;
-    }
-
-    /* nominal case */
-    *pVal = (M4OSA_UInt32)ul;
-    if(M4OSA_NULL != pStrOut)
-    {
-        *pStrOut = (M4OSA_Char*)pTemp;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * @brief      This function gets a M4OSA_UInt16 from string.
- * @note       This function converts the first set of non-whitespace
- *             characters of pStrIn to a M4OSA_UInt16 value pVal, assuming a
- *             representation in base provided by the parameter base. pStrOut is
- *             set to the first character of the string following the last
- *             character of the number that has been converted.
- *             - in case of a failure during the conversion, pStrOut is not
- *               updated, and pVal is set to null.
- *             - in case of negative number, pStrOut is not updated, and pVal is
- *               set to null.
- *             - in case of numerical overflow, pVal is set to M4OSA_UINT16_MAX.
- *             - if pStrOut is not to be used, it can be set to M4OSA_NULL.
- * @param      pStrIn: (IN) Character string.
- * @param      pVal: (OUT) read value.
- * @param      pStrOut: (OUT) Output character string.
- * @param      base: (IN) Base of the character string representation.
- * @return     M4NO_ERROR: there is no error.
- * @return     M4ERR_PARAMETER: pStrIn or pVal is M4OSA_NULL.
- * @return     M4ERR_CHR_CONV_FAILED: conversion failure.
- * @return     M4WAR_CHR_NUM_RANGE: the character string represents a number
- *             greater than M4OSA_UINT16_MAX.
- * @return     M4WAR_CHR_NEGATIVE: the character string represents a negative
- *             number.
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_chrGetUInt16 (M4OSA_Char* pStrIn, M4OSA_UInt16 *pVal,
-                              M4OSA_Char** pStrOut, M4OSA_chrNumBase base)
-{
-    M4OSA_UInt32 ul;
-    char*        pTemp;
-
-    M4OSA_TRACE1_4("M4OSA_chrGetUInt16\t(M4OSA_Char* %x, M4OSA_UInt16* %x"
-        "M4OSA_Char** %x,M4OSA_chrNumBase %d)",pStrIn,pVal,pStrOut,base);
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pStrIn,M4ERR_PARAMETER,
-                                   "M4OSA_chrGetUInt16:\tpStrIn is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pVal, M4ERR_PARAMETER,
-                                     "M4OSA_chrGetUInt16:\tpVal is M4OSA_NULL");
-
-    switch(base)
-    {
-    case M4OSA_kchrDec:
-        ul = strtoul((const char *)pStrIn, &pTemp,10);
-        break;
-    case M4OSA_kchrHexa:
-        ul = strtoul((const char *)pStrIn, &pTemp,16);
-        break;
-    case M4OSA_kchrOct:
-        ul = strtoul((const char *)pStrIn, &pTemp,8);
-        break;
-    default:
-        return M4ERR_PARAMETER;
-    }
-
-    /* has conversion failed ? */
-    if((M4OSA_Char*)pTemp == pStrIn)
-    {
-        *pVal = 0;
-        return M4ERR_CHR_CONV_FAILED;
-    }
-
-    /* was the number negative ? */
-    if(*(pStrIn+strspn((const char *)pStrIn," \t")) == '-')
-    {
-        *pVal = 0;
-        return M4WAR_CHR_NEGATIVE;
-    }
-
-    /* has an overflow occured ? */
-    if(ul>M4OSA_UINT16_MAX)
-    {
-        *pVal = M4OSA_UINT16_MAX;
-        if(M4OSA_NULL != pStrOut)
-        {
-            *pStrOut = (M4OSA_Char*)pTemp;
-        }
-        return M4WAR_CHR_NUM_RANGE;
-    }
-
-    /* nominal case */
-    *pVal = (M4OSA_UInt16)ul;
-    if(M4OSA_NULL != pStrOut)
-    {
-        *pStrOut = (M4OSA_Char*)pTemp;
-    }
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR M4OSA_chrSPrintf(M4OSA_Char  *pStrOut, M4OSA_UInt32 strOutMaxLen,
-                           M4OSA_Char   *format, ...)
-{
-    va_list       marker;
-    M4OSA_Char   *pTemp;
-    M4OSA_Char   *percentPointer;
-    M4OSA_Char   *newFormat;
-    M4OSA_Int32  newFormatLength=0;
-    M4OSA_UInt32  count_ll = 0;
-    M4OSA_UInt32  count_tm = 0;
-    M4OSA_UInt32  count_aa = 0;
-    M4OSA_UInt32  count;
-    M4OSA_UInt32  nbChar;
-    M4OSA_Int32     err;
-    M4OSA_Char flagChar[]             = "'-+ #0";
-    M4OSA_Char widthOrPrecisionChar[] = "*0123456789";
-    M4OSA_Char otherPrefixChar[]      = "hlL";
-    M4OSA_Char conversionChar[]       = "diouxXnfeEgGcCsSp%";
-
-    M4OSA_TRACE1_3("M4OSA_chrSPrintf\t(M4OSA_Char* %x, M4OSA_UInt32 %ld"
-        "M4OSA_Char* %x)",pStrOut,strOutMaxLen,format);
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pStrOut, M4ERR_PARAMETER,
-                                    "M4OSA_chrSPrintf:\tpStrOut is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == format, M4ERR_PARAMETER,
-                                     "M4OSA_chrSPrintf:\tformat is M4OSA_NULL");
-
-    va_start(marker,format);
-
-    /* count the number of %[flags][width][.precision]ll[conversion] */
-    pTemp = format;
-    while(*pTemp)
-    {
-        percentPointer = (M4OSA_Char *)strchr((const char *)pTemp,'%'); /* get the next percent character */
-        if(!percentPointer)
-            break;                            /* "This is the End", (c) J. Morrisson */
-        pTemp = percentPointer+1;           /* span it */
-        if(!*pTemp)
-            break;                            /* "This is the End", (c) J. Morrisson */
-        pTemp += strspn((const char *)pTemp,(const char *)flagChar);    /* span the optional flags */
-        if(!*pTemp)
-            break;                            /* "This is the End", (c) J. Morrisson */
-        pTemp += strspn((const char *)pTemp,(const char *)widthOrPrecisionChar); /* span the optional width */
-        if(!*pTemp)
-            break;                            /* "This is the End", (c) J. Morrisson */
-        if(*pTemp=='.')
-        {
-            pTemp++;
-            pTemp += strspn((const char *)pTemp, (const char *)widthOrPrecisionChar); /* span the optional precision */
-        }
-        if(!*pTemp)
-            break;                            /* "This is the End", (c) J. Morrisson */
-        if(strlen((const char *)pTemp)>=2)
-        {
-            if(!strncmp((const char *)pTemp,"ll",2))
-            {
-                count_ll++;                 /* I got ONE */
-                pTemp +=2;                  /* span the "ll" prefix */
-            }
-            else if(!strncmp((const char *)pTemp,"tm",2))
-            {
-                count_tm++;
-                pTemp +=2;
-            }
-            else if(!strncmp((const char *)pTemp,"aa",2))
-            {
-                count_aa++;
-                pTemp +=2;
-            }
-        }
-        pTemp += strspn((const char *)pTemp, (const char *)otherPrefixChar); /* span the other optional prefix */
-        if(!*pTemp)
-            break;                        /* "This is the End", (c) J. Morrisson */
-        pTemp += strspn((const char *)pTemp, (const char *)conversionChar);
-        if(!*pTemp)
-            break;                        /* "This is the End", (c) J. Morrisson */
-    }
-
-    count = count_ll + count_tm + count_aa;
-
-    if(!count)
-    {
-        err= vsnprintf((char *)pStrOut, (size_t)strOutMaxLen + 1, (const char *)format, marker);
-        va_end(marker);
-        if ((err<0) || ((M4OSA_UInt32)err>strOutMaxLen))
-        {
-            pStrOut[strOutMaxLen] = '\0';
-            return M4ERR_CHR_STR_OVERFLOW;
-        }
-        else
-        {
-            return M4NO_ERROR;
-        }
-    }
-
-
-    newFormatLength = strlen((const char *)format) + 1;
-
-    newFormatLength -= (count_ll+count_tm+count_aa);
-
-    newFormat =(M4OSA_Char*)M4OSA_32bitAlignedMalloc(newFormatLength,
-        M4OSA_CHARSTAR,(M4OSA_Char*)"M4OSA_chrPrintf: newFormat");
-    if(M4OSA_NULL == newFormat)
-        return M4ERR_ALLOC;
-    newFormat[newFormatLength-1] = '\0';
-    pTemp = newFormat;
-
-    /* copy format to newFormat, replacing %[flags][width][.precision]ll[conversion]
-     * by %[flags][width][.precision]I64[conversion] */
-    while(*format)
-    {
-        nbChar = strcspn((const char *)format, "%");
-        if(nbChar)
-        {
-            strncpy((char *)pTemp, (const char *)format, nbChar);      /* copy characters before the % character */
-            format +=nbChar;
-            pTemp   +=nbChar;
-        }
-        if(!*format) break;
-        *pTemp++ = *format++;                 /* copy the % character */
-        nbChar = strspn((const char *)format, (const char *)flagChar);
-        if(nbChar)
-        {
-            strncpy((char *)pTemp, (const char *)format, nbChar);      /* copy the flag characters */
-            format +=nbChar;
-            pTemp   +=nbChar;
-        }
-        if(!*format) break;
-        nbChar = strspn((const char *)format, (const char *)widthOrPrecisionChar);
-        if(nbChar)
-        {
-            strncpy((char *)pTemp, (const char *)format, nbChar);      /* copy the width characters */
-            format +=nbChar;
-            pTemp   +=nbChar;
-        }
-        if(!*format) break;
-        if(*format=='.')
-        {
-            *pTemp++ = *format++;              /* copy the dot character */
-            if(!format) break;
-            nbChar = strspn((const char *)format, (const char *)widthOrPrecisionChar);
-            if(nbChar)
-            {
-                strncpy((char *)pTemp, (const char *)format, nbChar);      /* copy the width characters */
-                format +=nbChar;
-                pTemp   +=nbChar;
-            }
-            if(!format) break;
-        }
-        if(strlen((const char *)format)>=2)
-        {
-            if(!strncmp((const char *)format, "ll", 2))
-            {
-                *pTemp++ = 'l'; /* %l */
-                format +=2;                         /* span the "ll" prefix */
-            }
-            else if(!strncmp((const char *)format, "tm", 2))
-            {
-                *pTemp++ = 'l'; /* %l */
-                format +=2;                         /* span the "tm" prefix */
-            }
-            else if(!strncmp((const char *)format, "aa", 2))
-            {
-                *pTemp++ = 'l';
-                format +=2;                         /* span the "aa" prefix */
-            }
-        }
-        nbChar = strspn((const char *)format, (const char *)otherPrefixChar);
-        if(nbChar)
-        {
-            strncpy((char *)pTemp, (const char *)format, nbChar);      /* copy the other Prefix */
-            format +=nbChar;
-            pTemp   +=nbChar;
-        }
-        if(!*format) break;
-        nbChar = strspn((const char *)format, (const char *)conversionChar);
-        if(nbChar)
-        {
-            strncpy((char *)pTemp, (const char *)format, nbChar);
-            format += nbChar;
-            pTemp   += nbChar;
-        }
-        if(!*format) break;
-    }
-
-    /* Zero terminate the format string. */
-    (*pTemp) = '\0';
-
-    err = vsnprintf((char *)pStrOut, (size_t)strOutMaxLen + 1, (const char *)newFormat, marker);
-    va_end(marker);
-    free(newFormat);
-    if ((err<0) || ((M4OSA_UInt32)err>strOutMaxLen))
-    {
-        pStrOut[strOutMaxLen] = '\0';
-        return M4ERR_CHR_STR_OVERFLOW;
-    }
-    else
-    {
-        return M4NO_ERROR;
-    }
-}
-
diff --git a/libvideoeditor/osal/src/M4OSA_Clock.c b/libvideoeditor/osal/src/M4OSA_Clock.c
deleted file mode 100755
index 9817b22..0000000
--- a/libvideoeditor/osal/src/M4OSA_Clock.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/**
- ************************************************************************
- * @file         M4OSA_Clock.c
- * @brief        Clock related functions
- * @note         This file implements functions to manipulate clock
- ************************************************************************
-*/
-
-#include <sys/time.h>
-#include <time.h>
-
-#include "M4OSA_Debug.h"
-#include "M4OSA_Clock.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Types.h"
-
-
-
-
-/**
- ************************************************************************
- * @brief      This function gets an absolute time to an unknown reference with
- *             a high precision.
- * @note       It means it can only be used to get a relative time by computing
- *             differences between to times.
- *             It is to the caller to allocate time. Time is expressed in
- *             timescale unit.
- *             M4OSA_ROLLOVER_CLOCK in M4OSA_Types.h must be configured with the rollover
- *             offset of this function.
- * @param      time: (IN/OUT) time
- * @param      timescale: (IN) The timescale (time unit per second)
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4WAR_TIMESCALE_TOO_BIG: the precision of the system clock is
- *             not
- *             compliant with the input timescale
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_clockGetTime(M4OSA_Time* pTime, M4OSA_UInt32 timescale)
-{
-    struct timeval tv;
-    struct timezone tz;
-    M4OSA_UInt32 u32_time = 0;
-    M4OSA_UInt32 u32_time_hi;
-    M4OSA_UInt32 u32_time_lo;
-    M4OSA_UInt32 u32_time_lh;
-    M4OSA_UInt32 factor;
-
-    M4OSA_TRACE1_2("M4OSA_clockGetTime\t\tM4OSA_Time* 0x%x\tM4OSA_UInt32 %d",
-                                                              pTime, timescale);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pTime, M4ERR_PARAMETER,
-                                     "M4OSA_clockGetTime: pTime is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(0 == timescale, M4ERR_PARAMETER,
-                                          "M4OSA_clockGetTime: timescale is 0");
-
-    factor = 1000000 / timescale;
-
-    if(gettimeofday(&tv, &tz) == 0)
-    {
-        u32_time_lo = (tv.tv_sec & 0xFFFF) * timescale;
-        u32_time_hi = (((tv.tv_sec >> 16) & 0xFFFF) * timescale) + ((u32_time_lo >> 16) & 0xFFFF);
-        u32_time_lo &= 0xFFFF;
-        u32_time_lo += tv.tv_usec / factor;
-        u32_time_hi += ((u32_time_lo >> 16) & 0xFFFF);
-        u32_time_lo &= 0xFFFF;
-        u32_time = ((u32_time_hi & 0x7FFF) << 16) | u32_time_lo;
-    }
-
-    /* M4OSA_Time is signed, so we need to check the max value*/
-    if (u32_time > M4OSA_INT32_MAX)
-    {
-        u32_time = u32_time - M4OSA_INT32_MAX;
-    }
-
-    *pTime = (M4OSA_Time)u32_time;
-
-    if( timescale > 10000 )
-    {
-        return M4WAR_TIMESCALE_TOO_BIG;
-    }
-
-    return M4NO_ERROR;
-}
diff --git a/libvideoeditor/osal/src/M4OSA_FileCommon.c b/libvideoeditor/osal/src/M4OSA_FileCommon.c
deleted file mode 100755
index c12db5d..0000000
--- a/libvideoeditor/osal/src/M4OSA_FileCommon.c
+++ /dev/null
@@ -1,667 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_FileCommon.c
- * @brief        File common for Android
- * @note         This file implements functions used by both the file writer
- *               and file reader.
- ************************************************************************
-*/
-
-#ifndef USE_STAGEFRIGHT_CODECS
-#error "USE_STAGEFRIGHT_CODECS is not defined"
-#endif /*USE_STAGEFRIGHT_CODECS*/
-
-#ifdef UTF_CONVERSION
-#include <string.h>
-#endif /*UTF_CONVERSION*/
-
-#include <sys/stat.h>
-#include <errno.h>
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-#include "M4OSA_Semaphore.h"
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-
-#include "M4OSA_Debug.h"
-#include "M4OSA_FileCommon.h"
-#include "M4OSA_FileCommon_priv.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_CharStar.h"
-
-/**
- ************************************************************************
- * @brief      This function opens the provided URL and returns its context.
- *             If an error occured, the context is set to NULL.
- * @param      core_id: (IN) Core ID of the caller (M4OSA_FILE_READER or M4OSA_FILE_WRITER)
- * @param      context: (OUT) Context of the core file reader
- * @param      url: (IN) URL of the input file
- * @param      fileModeAccess: (IN) File mode access
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_ALLOC: there is no more memory available
- * @return     M4ERR_NOT_IMPLEMENTED: the URL does not match with the supported
- *             file
- * @return     M4ERR_FILE_NOT_FOUND: the file cannot be found
- * @return     M4ERR_FILE_LOCKED: the file is locked by an other
- *             application/process
- * @return     M4ERR_FILE_BAD_MODE_ACCESS: the file mode access is not correct
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileCommonOpen(M4OSA_UInt16 core_id, M4OSA_Context* pContext,
-                               M4OSA_Char* pUrl, M4OSA_FileModeAccess fileModeAccess)
-{
-
-    M4OSA_Int32 i            = 0;
-    M4OSA_Int32 iMode        = 0;
-    M4OSA_Int32 iSize        = 0;
-    M4OSA_Int32 iSavePos    = 0;
-
-    M4OSA_Char  mode[4]            = "";
-    M4OSA_Char* pReadString        = (M4OSA_Char*)"r";
-    M4OSA_Char* pWriteString    = (M4OSA_Char*)"w";
-    M4OSA_Char* pAppendString    = (M4OSA_Char*)"a";
-    M4OSA_Char* pBinaryString    = (M4OSA_Char*)"b";
-    M4OSA_Char* pPlusString        = (M4OSA_Char*)"+";
-
-    M4OSA_ERR err = M4NO_ERROR;
-
-    FILE* pFileHandler = M4OSA_NULL;
-    M4OSA_FileContext *pFileContext    = M4OSA_NULL;
-
-
-#ifdef UTF_CONVERSION
-    /*FB: to test the UTF16->UTF8 conversion into Video Artist*/
-    /*Convert the URL from UTF16 to UTF8*/
-    M4OSA_Void* tempConversionBuf;
-    M4OSA_UInt32 tempConversionSize = 1000;
-
-    tempConversionBuf = (M4OSA_Char*)M4OSA_32bitAlignedMalloc(tempConversionSize +1, 0, "conversion buf");
-    if(tempConversionBuf == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Error when allocating conversion buffer\n");
-        return M4ERR_PARAMETER;
-    }
-    M4OSA_ToUTF8_OSAL(pUrl, tempConversionBuf, &tempConversionSize);
-    ((M4OSA_Char*)tempConversionBuf)[tempConversionSize ] = '\0';
-
-    printf("file open %s\n", tempConversionBuf);
-#endif /*UTF CONVERSION*/
-
-    M4OSA_TRACE3_4("M4OSA_fileCommonOpen\t\tM4OSA_UInt16 %d\tM4OSA_Context* 0x%x\t"
-        "M4OSA_Char* %s\tfileModeAccess %d", core_id, pContext, pUrl, fileModeAccess);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext,    M4ERR_PARAMETER,    "M4OSA_fileCommonOpen: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pUrl,        M4ERR_PARAMETER,    "M4OSA_fileCommonOpen: pUrl  is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(0 == fileModeAccess,    M4ERR_PARAMETER,    "M4OSA_fileCommonOpen: fileModeAccess is 0");
-
-    /* Read mode not set for the reader */
-    M4OSA_DEBUG_IF1((M4OSA_FILE_READER == core_id) && !(fileModeAccess & M4OSA_kFileRead),
-        M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileRead");
-
-    /* Read mode not set for the reader */
-    M4OSA_DEBUG_IF1((M4OSA_FILE_READER == core_id) && !(fileModeAccess & M4OSA_kFileRead),
-        M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileRead");
-
-    /* M4OSAfileReadOpen cannot be used with Write file mode access */
-    M4OSA_DEBUG_IF1((M4OSA_FILE_READER == core_id) && (fileModeAccess & M4OSA_kFileWrite),
-        M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileWrite");
-
-    /* Append and Create flags cannot be used with Read */
-    M4OSA_DEBUG_IF1((M4OSA_FILE_READER == core_id) && (fileModeAccess & M4OSA_kFileAppend),
-        M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileAppend");
-
-    M4OSA_DEBUG_IF1((M4OSA_FILE_READER == core_id) && (fileModeAccess & M4OSA_kFileCreate),
-        M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileCreate");
-
-    /* Write mode not set for the writer */
-    M4OSA_DEBUG_IF1((M4OSA_FILE_WRITER == core_id) && !(fileModeAccess & M4OSA_kFileWrite),
-        M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileWrite");
-
-    /* Create flag necessary for opening file */
-    if ((fileModeAccess & M4OSA_kFileRead) &&
-        (fileModeAccess & M4OSA_kFileWrite)&&(fileModeAccess & M4OSA_kFileCreate))
-    {
-        strncat((char *)mode, (const char *)pWriteString, (size_t)1);
-        strncat((char *)mode, (const char *)pPlusString, (size_t)1);
-    }
-    else
-    {
-        if(fileModeAccess & M4OSA_kFileAppend)
-        {
-            strncat((char *)mode, (const char *)pAppendString, (size_t)1);
-        }
-        else if(fileModeAccess & M4OSA_kFileRead)
-        {
-            strncat((char *)mode, (const char *)pReadString, (size_t)1);
-        }
-        else if(fileModeAccess & M4OSA_kFileWrite)
-        {
-            strncat((char *)mode, (const char *)pWriteString, (size_t)1);
-        }
-
-        if((fileModeAccess & M4OSA_kFileRead)&&(fileModeAccess & M4OSA_kFileWrite))
-        {
-            strncat((char *)mode,(const char *)pPlusString, (size_t)1);
-        }
-    }
-
-    if(!(fileModeAccess & M4OSA_kFileIsTextMode))
-    {
-        strncat((char *)mode, (const char *)pBinaryString,(size_t)1);
-    }
-
-    /*Open the file*/
-
-#ifdef UTF_CONVERSION
-    /*Open the converted path*/
-    pFileHandler = fopen((const char *)tempConversionBuf, (const char *)mode);
-    /*Free the temporary decoded buffer*/
-    free(tempConversionBuf);
-#else /* UTF_CONVERSION */
-    pFileHandler = fopen((const char *)pUrl, (const char *)mode);
-#endif /* UTF_CONVERSION */
-
-    if (M4OSA_NULL == pFileHandler)
-    {
-        switch(errno)
-        {
-        case ENOENT:
-            {
-                M4OSA_DEBUG(M4ERR_FILE_NOT_FOUND, "M4OSA_fileCommonOpen: No such file or directory");
-                M4OSA_TRACE1_1("File not found: %s", pUrl);
-                return M4ERR_FILE_NOT_FOUND;
-            }
-        case EACCES:
-            {
-                M4OSA_DEBUG(M4ERR_FILE_LOCKED, "M4OSA_fileCommonOpen: Permission denied");
-                return M4ERR_FILE_LOCKED;
-            }
-         case EINVAL:
-         {
-            M4OSA_DEBUG(M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: Invalid Argument");
-            return M4ERR_FILE_BAD_MODE_ACCESS;
-         }
-        case EMFILE:
-         case ENOSPC:
-        case ENOMEM:
-            {
-                M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileCommonOpen: Too many open files");
-                return M4ERR_ALLOC;
-            }
-        default:
-            {
-                M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_fileCommonOpen");
-                return M4ERR_NOT_IMPLEMENTED;
-            }
-        }
-    }
-
-    /* Allocate the file context */
-    pFileContext = (M4OSA_FileContext*) M4OSA_32bitAlignedMalloc(sizeof(M4OSA_FileContext),
-                    core_id, (M4OSA_Char*)"M4OSA_fileCommonOpen: file context");
-    if (M4OSA_NULL == pFileContext)
-    {
-        fclose(pFileHandler);
-        M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileCommonOpen");
-        return M4ERR_ALLOC;
-    }
-
-    pFileContext->file_desc        = pFileHandler;
-    pFileContext->access_mode    = fileModeAccess;
-    pFileContext->current_seek    = SeekNone;
-    pFileContext->b_is_end_of_file    = M4OSA_FALSE;
-
-    /**
-     * Note: Never use this expression "i = (value1 == value2) ? x: y;"
-     * because that doens't compile on other platforms (ADS for example)
-     * Use: if(value1 == value2)
-     *        { i= x; ..etc
-     */
-    pFileContext->coreID_write = 0;
-    pFileContext->coreID_read = 0;
-    pFileContext->m_DescrModeAccess = M4OSA_kDescNoneAccess;
-
-    if (M4OSA_FILE_READER == core_id)
-    {
-        pFileContext->coreID_read = core_id;
-        pFileContext->m_DescrModeAccess = M4OSA_kDescReadAccess;
-    }
-    else if (M4OSA_FILE_WRITER == core_id)
-    {
-        pFileContext->coreID_write = core_id;
-        pFileContext->m_DescrModeAccess = M4OSA_kDescWriteAccess;
-    }
-
-    pFileContext->read_position = 0;
-    pFileContext->write_position = 0;
-
-    /* Allocate the memory to store the URL string */
-    pFileContext->url_name = (M4OSA_Char*) M4OSA_32bitAlignedMalloc(strlen((const char *)pUrl)+1,
-                        core_id, (M4OSA_Char*)"M4OSA_fileCommonOpen: URL name");
-    if (M4OSA_NULL == pFileContext->url_name)
-    {
-        fclose(pFileHandler);
-        free(pFileContext);
-        M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileCommonOpen");
-        return M4ERR_ALLOC;
-    }
-    M4OSA_chrNCopy(pFileContext->url_name, pUrl, strlen((const char *)pUrl)+1);
-
-    /* Get the file name */
-    err = M4OSA_fileCommonGetFilename(pUrl, &pFileContext->file_name);
-    if(M4NO_ERROR != err)
-    {
-        fclose(pFileHandler);
-        free(pFileContext->url_name);
-        free(pFileContext);
-        M4OSA_DEBUG(err, "M4OSA_fileCommonOpen");
-        return err;
-    }
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphoreOpen(&(pFileContext->semaphore_context), 1); /* Allocate the semaphore */
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-
-
-#ifdef USE_STAGEFRIGHT_CODECS
-    // Workaround for file system bug on Stingray/Honeycomb where a file re-created will keep
-    // the original file's size filled with 0s. Do not seek to the end to avoid ill effects
-    if(fileModeAccess & M4OSA_kFileAppend) {
-        /* Get the file size */
-        iSavePos = ftell(pFileHandler);            /*    1- Check the first position */
-        fseek(pFileHandler, 0, SEEK_END);        /*    2- Go to the end of the file*/
-        iSize = ftell(pFileHandler);            /*    3- Check the file size        */
-        fseek(pFileHandler, iSavePos, SEEK_SET);/*    4- go to the first position */
-    } else {
-        iSize = 0;
-    }
-#else /* USE_STAGEFRIGHT_CODECS */
-    /* Get the file size */
-    iSavePos = ftell(pFileHandler);            /*    1- Check the first position */
-    fseek(pFileHandler, 0, SEEK_END);        /*    2- Go to the end of the file*/
-    iSize = ftell(pFileHandler);            /*    3- Check the file size        */
-    fseek(pFileHandler, iSavePos, SEEK_SET);/*    4- go to the first position */
-#endif /* USE_STAGEFRIGHT_CODECS */
-
-
-
-    /* Warning possible overflow if the file is higher than 2GBytes */
-    pFileContext->file_size = iSize;
-
-    *pContext = pFileContext;
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ************************************************************************
- * @brief      This function convert from UTF16 to UTF8
- * @param      pBufferIn: (IN) UTF16 input path
- * @param      pBufferOut: (OUT) UTF8 output path
- * @param      bufferOutSize: (IN/OUT) size of the output path
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: the output path size is not enough to contain
- *               the decoded path
- ************************************************************************
-*/
-#ifdef UTF_CONVERSION
-M4OSA_ERR M4OSA_ToUTF8_OSAL (M4OSA_Void   *pBufferIn, M4OSA_UInt8  *pBufferOut,
-                                                    M4OSA_UInt32 *bufferOutSize)
-{
-    M4OSA_UInt16 i;
-    wchar_t      *w_str = (wchar_t *) pBufferIn;
-    M4OSA_UInt32 len, size_needed, size_given;
-    if (pBufferIn == NULL)
-    {
-        *pBufferOut=NULL;
-        *bufferOutSize=1;
-    }
-    else
-    {
-        len         = wcslen(w_str);
-        size_needed = len+1;
-        size_given  = *bufferOutSize;
-
-       *bufferOutSize=size_needed;
-        if (size_given < size_needed )
-        {
-            return M4ERR_PARAMETER;
-        }
-        else
-        {
-            for (i=0; i<len; i++)
-            {
-                pBufferOut[i]=(M4OSA_UInt8)w_str[i];
-            }
-            pBufferOut[len]=0;
-        }
-    }
-    return M4NO_ERROR;
-}
-#endif /*UTF CONVERSION*/
-
-/**
- ************************************************************************
- * @brief      This function seeks at the provided position.
- * @param      context: (IN/OUT) Context of the core file reader
- * @param      seekMode: (IN) Seek access mode
- * @param      position: (IN/OUT) Position in the file
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_ALLOC: there is no more memory available
- * @return     M4ERR_FILE_INVALID_POSITION: the position cannot be reached
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileCommonSeek(M4OSA_Context pContext,
-                               M4OSA_FileSeekAccessMode seekMode,
-                               M4OSA_FilePosition* pFilePos)
-{
-    M4OSA_FileContext* pFileContext = pContext;
-    M4OSA_FilePosition fpos_current;
-    M4OSA_FilePosition fpos_seek;
-    M4OSA_FilePosition fpos_null = 0;
-    M4OSA_FilePosition fpos_neg_un = -1;
-    M4OSA_FilePosition fpos_file_size;
-    M4OSA_FilePosition fpos_seek_from_beginning;
-
-    M4OSA_TRACE3_3("M4OSA_fileCommonSeek\t\tM4OSA_Context 0x%x\t M4OSA_FileSeekAccessMode %d\tM4OSA_FilePosition* 0x%x",
-        pContext, seekMode, pFilePos);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER, "M4OSA_fileCommonSeek");
-    M4OSA_DEBUG_IF2(0 == seekMode, M4ERR_PARAMETER, "M4OSA_fileCommonSeek");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFilePos, M4ERR_PARAMETER, "M4OSA_fileCommonSeek");
-
-    fpos_file_size = pFileContext->file_size;
-
-    if(SeekRead == pFileContext->current_seek)
-    {
-        fpos_current = pFileContext->read_position;
-    }
-    else if(SeekWrite == pFileContext->current_seek)
-    {
-        fpos_current = pFileContext->write_position;
-    }
-    else
-    {
-        fpos_current = 0;
-    }
-
-    switch(seekMode)
-    {
-    case M4OSA_kFileSeekCurrent:
-        {
-            fpos_seek = *pFilePos;
-            break;
-        }
-    case M4OSA_kFileSeekBeginning:
-        {
-            fpos_seek = *pFilePos - fpos_current;
-            break;
-        }
-    case M4OSA_kFileSeekEnd:
-        {
-            fpos_seek = *pFilePos + fpos_file_size - fpos_current;
-            break;
-        }
-    default:
-        {
-            return M4ERR_PARAMETER;
-        }
-    }
-
-    fpos_seek_from_beginning = fpos_current + fpos_seek;
-
-    if(fseek(pFileContext->file_desc, fpos_seek, SEEK_CUR) != 0)
-    {
-        switch(errno)
-        {
-        case EINVAL:
-            {
-            /* meaning the value for origin is invalid or the position
-                specified by offset is before the beginning of the file */
-                return M4ERR_FILE_INVALID_POSITION;
-            }
-
-        case EBADF:
-        default:
-            {
-                return M4ERR_BAD_CONTEXT;/* file handle is invalid */
-            }
-        }
-    }
-
-    /* Set the returned position from the beginning of the file */
-    *pFilePos = fpos_seek_from_beginning;
-
-    /* SEEK done, reset end of file value */
-    pFileContext->b_is_end_of_file = M4OSA_FALSE;
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ************************************************************************
- * @brief      This function asks to close the file (associated to the context)
- * @note       The context of the core file reader/writer is freed.
- * @param      context: (IN/OUT) Context of the core file reader
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_ALLOC: there is no more memory available
- ************************************************************************
-*/
-
-M4OSA_ERR M4OSA_fileCommonClose(M4OSA_UInt16 core_id, M4OSA_Context pContext)
-{
-    M4OSA_FileContext* pFileContext = pContext;
-    M4OSA_Int32 i32_err_code=0;
-
-    M4OSA_TRACE3_2("M4OSA_fileCommonClose\tM4OSA_UInt16 %d\tM4OSA_Context 0x%x",
-                                                             core_id, pContext);
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext,
-              M4ERR_PARAMETER, "M4OSA_fileCommonClose: pContext is M4OSA_NULL");
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context, M4ERR_BAD_CONTEXT,
-                     "M4OSA_fileCommonClose: semaphore_context is M4OSA_NULL");
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    free(pFileContext->url_name);
-    pFileContext->url_name = M4OSA_NULL;
-
-    free(pFileContext->file_name);
-    pFileContext->file_name = M4OSA_NULL;
-
-    i32_err_code = fclose(pFileContext->file_desc);
-
-    pFileContext->file_desc = M4OSA_NULL;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphoreClose(pFileContext->semaphore_context);/* free the semaphore */
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    free(pFileContext);
-
-    if (i32_err_code != 0)
-    {
-        M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_fileCommonClose");
-        return M4ERR_BAD_CONTEXT;
-    }
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ************************************************************************
- * @brief      This function gets the file attributes (associated to the
- *             context)
- * @param      context: (IN) Context of the core file reader
- * @param      attribute: (OUT) The file attribute (allocated by the caller)
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileCommonGetAttribute(M4OSA_Context pContext, M4OSA_FileAttribute* pAttribute)
-{
-
-    M4OSA_FileContext* fileContext = pContext;
-
-    struct stat TheStat;
-
-    M4OSA_TRACE3_2("M4OSA_fileCommonGetAttribute\tM4OSA_Context 0x%x\t"
-        "M4OSA_FileAttribute* 0x%x", pContext, pAttribute);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext,        M4ERR_PARAMETER, "M4OSA_fileCommonGetAttribute");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pAttribute,    M4ERR_PARAMETER, "M4OSA_fileCommonGetAttribute");
-
-    if(stat((char*)fileContext->url_name, &TheStat) != 0)
-    {
-        M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_fileCommonGetAttribute");
-        return M4ERR_BAD_CONTEXT;
-    }
-
-    pAttribute->creationDate.time = (M4OSA_Time)TheStat.st_ctime;
-    pAttribute->lastAccessDate.time = (M4OSA_Time)TheStat.st_atime;
-    pAttribute->modifiedDate.time = (M4OSA_Time)TheStat.st_mtime;
-
-    pAttribute->creationDate.timeScale = 1;
-    pAttribute->lastAccessDate.timeScale = 1;
-    pAttribute->modifiedDate.timeScale = 1;
-
-    pAttribute->creationDate.referenceYear = 1970;
-    pAttribute->lastAccessDate.referenceYear = 1970;
-    pAttribute->modifiedDate.referenceYear = 1970;
-
-    pAttribute->modeAccess = fileContext->access_mode;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * @brief      This function gets the file URL (associated to the context).
- * @note
- * @param      context: (IN) Context of the core file reader
- * @param      url: (OUT) The buffer containing the URL (allocated by
- *             M4OSA_fileCommonGetURL)
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_ALLOC: there is no more memory available
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileCommonGetURL(M4OSA_Context pContext, M4OSA_Char** pUrl)
-{
-    M4OSA_FileContext* pFileContext = pContext;
-    M4OSA_UInt32    uiLength;
-
-    M4OSA_TRACE3_2("M4OSA_fileCommonGetURL\tM4OSA_Context 0x%x\tM4OSA_Char** 0x%x",
-                    pContext, pUrl);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext,    M4ERR_PARAMETER,
-                              "M4OSA_fileCommonGetURL: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pUrl,    M4ERR_PARAMETER,
-                                  "M4OSA_fileCommonGetURL: pUrl is M4OSA_NULL");
-
-    uiLength = strlen((const char *)pFileContext->url_name)+1;
-
-    /* Allocate the memory to store the url_name */
-    *pUrl = (M4OSA_Char*)M4OSA_32bitAlignedMalloc(uiLength, M4OSA_FILE_COMMON,
-                                    (M4OSA_Char*)"M4OSA_fileCommonGetURL: url");
-    if(M4OSA_NULL == *pUrl)
-    {
-        M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileCommonGetURL");
-        return M4ERR_ALLOC;
-    }
-
-    M4OSA_chrNCopy(*pUrl, pFileContext->url_name, uiLength);
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ************************************************************************
- * @brief      This function gets a string containing the file name associated
- *             to the input URL.
- * @note       The user should not forget to delete the output string using
- *             M4OSA_strDestroy
- * @param      pUrl:            (IN) The buffer containing the URL
- * @param      pFileName:    (OUT) The string containing the URL. It is
- *                            allocated inside this function
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_NOT_IMPLEMENTED: the URL does not match with the supported
- *             file
- * @return     M4ERR_ALLOC: there is no more memory available
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileCommonGetFilename(M4OSA_Char* pUrl, M4OSA_Char** pFileName)
-{
-    M4OSA_Int32 i            = 0;
-    M4OSA_Int32 iUrlLen        = 0;
-    M4OSA_Int32 FileNameLen = 0;
-
-    M4OSA_Char* ptrUrl        = M4OSA_NULL;
-    M4OSA_Char* ptrFilename    = M4OSA_NULL;
-
-    M4OSA_TRACE3_2("M4OSA_fileCommonGetURL\tM4OSA_Char* %s\tM4OSA_Char** 0x%x",
-                                                               pUrl, pFileName);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pUrl,    M4ERR_PARAMETER,
-                             "M4OSA_fileCommonGetFilename: pUrl is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileName,    M4ERR_PARAMETER,
-                        "M4OSA_fileCommonGetFilename: pFileName is M4OSA_NULL");
-
-    *pFileName = M4OSA_NULL;
-
-    /*Parse URL*/
-    iUrlLen = strlen((const char *)pUrl);
-    for(i=iUrlLen-1; i>=0; i--)
-    {
-        if (pUrl[i] != '\\' && pUrl[i] != '/')
-        {
-            FileNameLen++;
-        }
-        else
-        {
-            break; /* find the beginning of the file name */
-        }
-    }
-
-    ptrFilename = (M4OSA_Char*) M4OSA_32bitAlignedMalloc(FileNameLen+1, M4OSA_FILE_COMMON,
-                    (M4OSA_Char*)"M4OSA_fileCommonGetFilename: Filename string");
-    if (ptrFilename == M4OSA_NULL)
-    {
-        M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileCommonGetFilename");
-        return M4ERR_ALLOC;
-    }
-
-    ptrUrl = pUrl + (iUrlLen - FileNameLen);
-    M4OSA_chrNCopy(ptrFilename, ptrUrl, FileNameLen+1);
-
-    *pFileName = ptrFilename;
-
-    return M4NO_ERROR;
-}
-
diff --git a/libvideoeditor/osal/src/M4OSA_FileReader.c b/libvideoeditor/osal/src/M4OSA_FileReader.c
deleted file mode 100755
index 40a72f5..0000000
--- a/libvideoeditor/osal/src/M4OSA_FileReader.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-************************************************************************
- * @file         M4OSA_FileReader.c
- * @author       Cedric Lecoutre (cedric.lecoutre@philips.com)
- *               Laurent Fay (laurent.fay@philips.com)
- * @par Org:     Philips Digital Systems Laboratories - Paris (PDSL-P)
- * @brief        File reader for Android
- * @note         This file implements functions to read a file.
- ************************************************************************
-*/
-
-
-#include "M4OSA_Debug.h"
-#include "M4OSA_FileCommon_priv.h"
-#include "M4OSA_FileReader.h"
-#include "M4OSA_FileReader_priv.h"
-#include "M4OSA_Memory.h"
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-#include "M4OSA_Semaphore.h"
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-
-/**
-************************************************************************
-* @brief      This function opens the provided URL and returns its context.
-*             If an error occured, the context is set to NULL.
-* @param      context: (OUT) Context of the core file reader
-* @param      url: (IN) URL of the input file
-* @param      fileModeAccess: (IN) File mode access
-* @return     M4NO_ERROR: there is no error
-* @return     M4ERR_PARAMETER: at least one parameter is NULL
-* @return     M4ERR_ALLOC: there is no more memory available
-* @return     M4ERR_NOT_IMPLEMENTED: the URL does not match with the supported
-*             file
-* @return     M4ERR_FILE_NOT_FOUND: the file cannot be found
-* @return     M4ERR_FILE_LOCKED: the file is locked by an other
-*             application/process
-* @return     M4ERR_FILE_BAD_MODE_ACCESS: the file mode access is not correct
-************************************************************************
-*/
-M4OSA_ERR M4OSA_fileReadOpen(M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
-                             M4OSA_UInt32 fileModeAccess)
-{
-    M4OSA_TRACE1_3("M4OSA_fileReadOpen : pC = 0x%p  fd = 0x%p  mode = %lu",
-                                     pContext, pFileDescriptor, fileModeAccess);
-
-    return M4OSA_fileCommonOpen(M4OSA_FILE_READER, pContext,
-                                               pFileDescriptor, fileModeAccess);
-}
-
-/**
-************************************************************************
-* @brief      This function reads the 'size' bytes in the core file reader
-*             (selected by its 'context') and writes the data to the 'data'
-*             pointer.
-* @note       If 'size' byte cannot be read in the core file reader, 'size'
-*             parameter is updated to match the correct
-* @note       number of read bytes.
-* @param      context: (IN/OUT) Context of the core file reader
-* @param      buffer: (OUT) Data pointer of the read data
-* @param      size: (IN/OUT) Size of the data to read (in bytes)
-* @return     M4NO_ERROR: there is no error
-* @return     M4ERR_PARAMETER: at least one parameter is NULL
-* @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
-* @return     M4ERR_ALLOC: there is no more memory available
-* @return     M4WAR_NO_DATA_YET: there is no enough data to fill the 'data'
-*             buffer, so the size parameter has been updated.
-************************************************************************
-*/
-M4OSA_ERR M4OSA_fileReadData(M4OSA_Context pContext, M4OSA_MemAddr8 data,
-                                                            M4OSA_UInt32* pSize)
-{
-    M4OSA_FileContext* pFileContext = pContext;
-    M4OSA_ERR    err = M4NO_ERROR;
-    M4OSA_Int32    uiSizeRead;
-
-    M4OSA_TRACE2_2("M4OSA_fileReadData : data = 0x%p  size = %lu",
-                                    data, (M4OSA_NULL != pSize) ? (*pSize) : 0);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                                  "M4OSA_fileReadData: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == data, M4ERR_PARAMETER,
-                                      "M4OSA_fileReadData: data is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pSize, M4ERR_PARAMETER,
-                                     "M4OSA_fileReadData: pSize is M4OSA_NULL");
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context,
-      M4ERR_BAD_CONTEXT, "M4OSA_fileReadData: semaphore_context is M4OSA_NULL");
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    if(M4OSA_kDescRWAccess == pFileContext->m_DescrModeAccess) /* read write */
-    {
-        uiSizeRead = fread(data, sizeof(M4OSA_Char), *pSize,
-                                                       pFileContext->file_desc);
-        if(-1 == uiSizeRead)
-        {
-            /* handle is invalid, or the file is not open for reading, or the file is locked */
-            *pSize = 0;
-            err = M4ERR_BAD_CONTEXT;
-        }
-        else
-        {
-            pFileContext->read_position = pFileContext->read_position + uiSizeRead;
-            if ((M4OSA_UInt32)uiSizeRead < *pSize)
-            {
-                *pSize = uiSizeRead;
-                /* This is the end of file */
-                pFileContext->b_is_end_of_file = M4OSA_TRUE;
-                err = M4WAR_NO_DATA_YET;
-            }
-            else
-            {
-                *pSize = uiSizeRead;
-            }
-        }
-
-        return err;
-    }
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    if(pFileContext->current_seek != SeekRead)
-    {
-        /* fseek to the last read position */
-        err = M4OSA_fileCommonSeek(pContext, M4OSA_kFileSeekBeginning,
-                                                &(pFileContext->read_position));
-        if(M4OSA_ERR_IS_ERROR(err))
-        {
-            M4OSA_DEBUG(err, "M4OSA_fileReadData: M4OSA_fileCommonSeek");
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            return err;
-        }
-
-        pFileContext->current_seek = SeekRead;
-    }
-
-    /* Read data */
-    uiSizeRead = fread(data, sizeof(M4OSA_Char), *pSize,
-                                                       pFileContext->file_desc);
-    if(-1 == uiSizeRead)
-    {
-        /* handle is invalid, or the file is not open for reading,
-         or the file is locked */
-        *pSize = 0;
-        err = M4ERR_BAD_CONTEXT;
-    }
-    else
-    {
-        pFileContext->read_position = pFileContext->read_position + uiSizeRead;
-        if ((M4OSA_UInt32)uiSizeRead < *pSize)
-        {
-            *pSize = uiSizeRead;
-
-            /* This is the end of file */
-            pFileContext->b_is_end_of_file = M4OSA_TRUE;
-
-            err = M4WAR_NO_DATA_YET;
-        }
-        else
-        {
-            *pSize = uiSizeRead;
-        }
-    }
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-
-    return err;
-}
-
-
-/**
-************************************************************************
- * @brief      This function seeks at the provided position in the core file
- *             reader (selected by its 'context'). The position is related to
- *             the seekMode parameter it can be either from the beginning, from
- *             the end or from the current postion. To support large file
- *             access (more than 2GBytes), the position is provided on a 64
- *             bits.
- * @note       If this function returns an error the current position pointer
- *             in the file must not change. Else the current
- *             position pointer must be updated.
- * @param      context: (IN/OUT) Context of the core file reader
- * @param      seekMode: (IN) Seek access mode
- * @param      position: (IN/OUT) Position in the file
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_ALLOC: there is no more memory available
- * @return     M4ERR_FILE_INVALID_POSITION: the position cannot be reached
- ************************************************************************
-*/
-
-M4OSA_ERR M4OSA_fileReadSeek(M4OSA_Context pContext, M4OSA_FileSeekAccessMode seekMode,
-                             M4OSA_FilePosition* pPosition)
-{
-    M4OSA_FileContext* pFileContext = (M4OSA_FileContext*)pContext;
-    M4OSA_ERR err;
-
-    M4OSA_TRACE2_2("M4OSA_fileReadSeek : mode = %d  pos = %lu", seekMode,
-                                  (pPosition != M4OSA_NULL) ? (*pPosition) : 0);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                                  "M4OSA_fileReadSeek: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(0 == seekMode, M4ERR_PARAMETER,
-                                           "M4OSA_fileReadSeek: seekMode is 0");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pPosition, M4ERR_PARAMETER,
-                                 "M4OSA_fileReadSeek: pPosition is M4OSA_NULL");
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context,
-      M4ERR_BAD_CONTEXT, "M4OSA_fileReadSeek: semaphore_context is M4OSA_NULL");
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    if (M4OSA_kDescRWAccess == pFileContext->m_DescrModeAccess)
-    {
-         M4OSA_UInt32    SeekModeOption;
-         /* Go to the desired position */
-        if (M4OSA_kFileSeekBeginning == seekMode)
-        {
-            SeekModeOption = SEEK_SET;
-        }
-        else if (M4OSA_kFileSeekEnd == seekMode)
-        {
-            SeekModeOption = SEEK_END;
-        }
-        else if (M4OSA_kFileSeekCurrent == seekMode)
-        {
-            SeekModeOption = SEEK_CUR;
-        }
-        else
-        {
-            M4OSA_TRACE1_0("M4OSA_fileReadSeek: END WITH ERROR !!! (CONVERION ERROR FOR THE SEEK MODE)");
-            return M4ERR_PARAMETER;
-        }
-
-        /**
-         * Go to the desired position */
-        err = fseek(pFileContext->file_desc, *pPosition, SeekModeOption);
-        if(err != 0)
-        {
-            /* converts the error to PSW format*/
-            err=((M4OSA_UInt32)(M4_ERR)<<30)+(((M4OSA_FILE_WRITER)&0x003FFF)<<16)+(M4OSA_Int16)(err);
-            M4OSA_TRACE1_1("M4OSA_FileReadSeek error:%x",err);
-        }
-        else
-        {
-            return M4NO_ERROR;
-        }
-
-        /* Return without error */
-        return err;
-    }
-
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    if(pFileContext->current_seek != SeekRead)
-    {
-
-        /* fseek to the last read position */
-        err = M4OSA_fileCommonSeek(pContext, M4OSA_kFileSeekBeginning,
-                                                &(pFileContext->read_position));
-        if(M4OSA_ERR_IS_ERROR(err))
-        {
-            M4OSA_DEBUG(err, "M4OSA_fileReadData: M4OSA_fileCommonSeek");
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            return err;
-        }
-
-        pFileContext->current_seek = SeekRead;
-    }
-
-    err = M4OSA_fileCommonSeek(pContext, seekMode, pPosition);
-    if(M4OSA_ERR_IS_ERROR(err))
-    {
-        M4OSA_DEBUG(err, "M4OSA_fileReadData: M4OSA_fileCommonSeek");
-    }
-    else
-    {
-        pFileContext->read_position = *pPosition;
-    }
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    return err;
-}
-
-
-/**
- ************************************************************************
- * @brief      This function asks the core file reader to close the file
- *             (associated to the context).
- * @note       The context of the core file reader is freed.
- * @param      pContext: (IN/OUT) Context of the core file reader
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_ALLOC: there is no more memory available
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileReadClose(M4OSA_Context pContext)
-{
-    M4OSA_FileContext* pFileContext = (M4OSA_FileContext*)pContext;
-
-    M4OSA_TRACE1_1("M4OSA_fileReadClose : pC = 0x%p", pContext);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                                 "M4OSA_fileReadClose: pContext is M4OSA_NULL");
-
-    if(M4OSA_FILE_WRITER == pFileContext->coreID_write)
-    {
-        return M4NO_ERROR;
-    }
-
-    return M4OSA_fileCommonClose(M4OSA_FILE_READER, pContext);
-}
-
-
-
-
-/**
- ************************************************************************
- * @brief      This function asks the core file reader to return the value
- *             associated with the optionID. The caller is responsible for
- *             allocating/de-allocating the memory of the value field.
- * @note       'value' must be cast according to the type related to the
- *             optionID As the caller is responsible for
- *             allocating/de-allocating the 'value' field, the callee must copy
- *             this field to its internal variable.
- * @param      pContext: (IN/OUT) Context of the core file reader
- * @param      pOptionID: (IN) ID of the option
- * @param      pOptionValue: (OUT) Value of the option
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_BAD_OPTION_ID: the optionID is not a valid one
- * @return     M4ERR_WRITE_ONLY: this option is a write only one
- * @return     M4ERR_NOT_IMPLEMENTED: this option is not implemented
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileReadGetOption(M4OSA_Context pContext, M4OSA_FileReadOptionID optionID,
-                                  M4OSA_DataOption* pOptionValue)
-{
-    M4OSA_FileContext* pFileContext = pContext;
-
-    M4OSA_TRACE2_1("M4OSA_fileReadGetOption : option = 0x%x", optionID);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                             "M4OSA_fileReadGetOption: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(optionID == 0, M4ERR_PARAMETER,
-                                      "M4OSA_fileReadGetOption: optionID is 0");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pOptionValue, M4ERR_PARAMETER,
-                         "M4OSA_fileReadGetOption: pOptionValue is M4OSA_NULL");
-
-    M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_FILE_READER),
-                                M4ERR_BAD_OPTION_ID, "M4OSA_fileReadGetOption");
-    M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_READABLE(optionID),
-                                   M4ERR_WRITE_ONLY, "M4OSA_fileReadGetOption");
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context,
-                                  M4ERR_BAD_CONTEXT,
-                                  "M4OSA_fileReadGetOption: semaphore_context is M4OSA_NULL");
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    switch(optionID)
-    {
-#if(M4OSA_OPTIONID_FILE_READ_GET_FILE_POSITION == M4OSA_TRUE)
-    case M4OSA_kFileReadGetFilePosition:
-        {
-            M4OSA_FilePosition* pPosition = (M4OSA_FilePosition*)pOptionValue;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            *pPosition = pFileContext->read_position;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            return M4NO_ERROR;
-        }
-#endif /*M4OSA_OPTIONID_FILE_READ_GET_FILE_POSITION*/
-
-#if(M4OSA_OPTIONID_FILE_READ_IS_EOF == M4OSA_TRUE)
-    case M4OSA_kFileReadIsEOF:
-        {
-            M4OSA_Bool* bIsEndOfFile = (M4OSA_Bool*)pOptionValue;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            *bIsEndOfFile = pFileContext->b_is_end_of_file;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            return M4NO_ERROR;
-        }
-#endif /*M4OSA_OPTIONID_FILE_READ_IS_EOF*/
-
-
-#if(M4OSA_OPTIONID_FILE_READ_GET_FILE_SIZE == M4OSA_TRUE)
-    case M4OSA_kFileReadGetFileSize:
-        {
-            M4OSA_FilePosition* pPosition = (M4OSA_FilePosition*)pOptionValue;
-            M4OSA_Int32 iSavePos    = 0;
-            M4OSA_Int32 iSize        = 0;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-            /**
-            * Bugfix: update the file size.
-            * When a file is in read mode, may be another application is writing in.
-            * So, we have to update the file size */
-            iSavePos = ftell(pFileContext->file_desc);            /*1- Check the first position */
-            fseek(pFileContext->file_desc, 0, SEEK_END);        /*2- Go to the end of the file */
-            iSize = ftell(pFileContext->file_desc);                /*3- Check the file size*/
-            fseek(pFileContext->file_desc, iSavePos, SEEK_SET);    /*4- go to the first position*/
-            pFileContext->file_size = iSize;
-
-            *pPosition = pFileContext->file_size;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            return M4NO_ERROR;
-        }
-#endif /*M4OSA_OPTIONID_FILE_READ_GET_FILE_SIZE*/
-
-#if(M4OSA_OPTIONID_FILE_READ_GET_FILE_ATTRIBUTE == M4OSA_TRUE)
-    case M4OSA_kFileReadGetFileAttribute:
-        {
-            return M4OSA_fileCommonGetAttribute(pContext,
-                                            (M4OSA_FileAttribute*)pOptionValue);
-        }
-#endif /*M4OSA_OPTIONID_FILE_READ_GET_FILE_ATTRIBUTE*/
-
-#if(M4OSA_OPTIONID_FILE_READ_GET_URL == M4OSA_TRUE)
-    case M4OSA_kFileReadGetURL:
-        {
-            return M4OSA_fileCommonGetURL(pContext, (M4OSA_Char**)pOptionValue);
-        }
-#endif /*M4OSA_OPTIONID_FILE_READ_GET_URL*/
-
-        case M4OSA_kFileReadLockMode:
-        {
-            *(M4OSA_UInt32*)pOptionValue = pFileContext->m_uiLockMode;
-            return M4NO_ERROR;
-        }
-    }
-
-    M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_fileReadGetOption");
-
-    return M4ERR_NOT_IMPLEMENTED;
-}
-
-/**
- ************************************************************************
- * @fn         M4OSA_ERR M4OSA_fileReadSetOption (M4OSA_Context context,
- *                       M4OSA_OptionID optionID, M4OSA_DataOption optionValue))
- * @brief      This function asks the core file reader to set the value associated with the optionID.
- *             The caller is responsible for allocating/de-allocating the memory of the value field.
- * @note       As the caller is responsible for allocating/de-allocating the 'value' field, the callee must copy this field
- *             to its internal variable.
- * @param      pContext: (IN/OUT) Context of the core file reader
- * @param      optionID: (IN) ID of the option
- * @param      value: (IN) Value of the option
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_BAD_OPTION_ID: the optionID is not a valid one
- * @return     M4ERR_READ_ONLY: this option is a read only one
- * @return     M4ERR_NOT_IMPLEMENTED: this option is not implemented
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileReadSetOption(M4OSA_Context pContext,
-                                  M4OSA_FileReadOptionID optionID,
-                                  M4OSA_DataOption optionValue)
-{
-    M4OSA_FileContext* pFileContext = pContext;
-
-    M4OSA_TRACE2_1("M4OSA_fileReadSetOption : option = 0x%x", optionID);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                             "M4OSA_fileReadSetOption: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(0 == optionID, M4ERR_PARAMETER,
-                                                     "M4OSA_fileReadSetOption");
-    M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_FILE_READER),
-                                M4ERR_BAD_OPTION_ID, "M4OSA_fileReadSetOption");
-
-    M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_WRITABLE(optionID),
-                                    M4ERR_READ_ONLY, "M4OSA_fileReadSetOption");
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context,
-                                  M4ERR_BAD_CONTEXT,
-                                  "M4OSA_fileReadSetOption: semaphore_context is M4OSA_NULL");
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    switch(optionID)
-    {
-        case M4OSA_kFileReadLockMode:
-        {
-            pFileContext->m_uiLockMode= (M4OSA_UInt32)*(M4OSA_UInt32*)optionValue;
-            return M4NO_ERROR;
-        }
-        default:
-            M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_fileReadSetOption");
-            return M4ERR_NOT_IMPLEMENTED;
-    }
-}
-
diff --git a/libvideoeditor/osal/src/M4OSA_FileWriter.c b/libvideoeditor/osal/src/M4OSA_FileWriter.c
deleted file mode 100755
index 37fc173..0000000
--- a/libvideoeditor/osal/src/M4OSA_FileWriter.c
+++ /dev/null
@@ -1,574 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_FileWriter.c
- * @brief        File writer for Android
- * @note         This file implements functions to write in a file.
- ************************************************************************
-*/
-
-#include "M4OSA_Debug.h"
-#include "M4OSA_FileCommon_priv.h"
-#include "M4OSA_FileWriter.h"
-#include "M4OSA_FileWriter_priv.h"
-#include "M4OSA_Memory.h"
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-#include "M4OSA_Semaphore.h"
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-/**
- ************************************************************************
- * @brief      This function opens the provided URL and returns its context.
- *             If an error occured, the context is set to NULL.
- * @param      pContext: (OUT) Context of the core file writer
- * @param      pUrl: (IN) URL of the input file
- * @param      fileModeAccess: (IN) File mode access
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_ALLOC: there is no more memory available
- * @return     M4ERR_NOT_IMPLEMENTED: the URL does not match with the supported
- *             file
- * @return     M4ERR_FILE_NOT_FOUND: the file cannot be found
- * @return     M4ERR_FILE_LOCKED: the file is locked by an other
- *             application/process
- * @return     M4ERR_FILE_BAD_MODE_ACCESS: the file mode access is not correct
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileWriteOpen(M4OSA_Context* pContext, M4OSA_Void* pUrl,
-                              M4OSA_UInt32 fileModeAccess)
-{
-    M4OSA_TRACE1_3("M4OSA_fileWriteOpen : pC = 0x%p  fd = 0x%p  mode = %d",
-                                                pContext, pUrl, fileModeAccess);
-
-    return M4OSA_fileCommonOpen(M4OSA_FILE_WRITER, pContext, pUrl,
-                                fileModeAccess);
-}
-
-
-/**
- ************************************************************************
- * @brief      This function writes the 'size' bytes stored at 'data' memory
- *             in the file selected by its context.
- * @note       The caller is responsible for allocating/de-allocating the
- *             memory for 'data' parameter.
- * @note       Moreover the data pointer must be allocated to store at least
- *             'size' bytes.
- * @param      pContext: (IN/OUT) Context of the core file reader
- * @param      buffer: (IN) Data pointer of the write data
- * @param      size: (IN) Size of the data to write (in bytes)
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_ALLOC: there is no more memory available
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileWriteData(M4OSA_Context pContext, M4OSA_MemAddr8 data,
-                              M4OSA_UInt32 uiSize)
-{
-    M4OSA_FileContext* pFileContext = pContext;
-    M4OSA_ERR err;
-    M4OSA_UInt32 uiSizeWrite;
-
-    M4OSA_TRACE2_2("M4OSA_fileWriteData : data = 0x%p  size = %lu", data,
-                                                                        uiSize);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                                 "M4OSA_fileWriteData: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == data, M4ERR_PARAMETER,
-                                     "M4OSA_fileWriteData: data is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(0 == uiSize, M4ERR_PARAMETER,
-                                            "M4OSA_fileWriteData: uiSize is 0");
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context,
-                                  M4ERR_BAD_CONTEXT,
-                                  "M4OSA_fileWriteData: semaphore_context is M4OSA_NULL");
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    if (M4OSA_kDescRWAccess == pFileContext->m_DescrModeAccess)
-    {
-        M4OSA_UInt32    WriteSize;
-        err = M4NO_ERROR;
-        WriteSize = fwrite((void *)data,1, uiSize, pFileContext->file_desc);
-        if(WriteSize != uiSize)
-        {
-            /* converts the error to PSW format*/
-            err = ((M4OSA_UInt32)(M4_ERR)<<30)+(((M4OSA_FILE_WRITER)&0x003FFF)<<16)+(M4OSA_Int16)(WriteSize);
-            M4OSA_TRACE1_1("M4OSA_FileWriteData error:%x",err);
-        }
-        fflush(pFileContext->file_desc);
-
-        pFileContext->write_position = pFileContext->write_position + WriteSize;
-
-        /* Update the file size */
-        if(pFileContext->write_position > pFileContext->file_size)
-        {
-            pFileContext->file_size = pFileContext->write_position;
-        }
-        return err;
-    }
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    if(pFileContext->current_seek != SeekWrite)
-    {
-        /* fseek to the last read position */
-        err = M4OSA_fileCommonSeek(pContext, M4OSA_kFileSeekBeginning,
-            &(pFileContext->write_position));
-
-        if(M4OSA_ERR_IS_ERROR(err))
-        {
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-            M4OSA_DEBUG(err, "M4OSA_fileWriteData: M4OSA_fileCommonSeek");
-            return err;
-        }
-
-        pFileContext->current_seek = SeekWrite;
-    }
-
-    /* Write data */
-    uiSizeWrite = fwrite(data, sizeof(M4OSA_Char), uiSize, pFileContext->file_desc);
-
-    if(uiSizeWrite == (M4OSA_UInt32)-1)
-    {
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-        M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-        /* An error occured */
-
-        M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_fileWriteData: fwrite failed");
-        return M4ERR_BAD_CONTEXT;
-    }
-
-    pFileContext->write_position = pFileContext->write_position + uiSizeWrite;
-
-    /* Update the file size */
-    if(pFileContext->write_position > pFileContext->file_size)
-    {
-        pFileContext->file_size = pFileContext->write_position;
-    }
-
-    if((M4OSA_UInt32)uiSizeWrite < uiSize)
-    {
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-        M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-        M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileWriteData");
-        return M4ERR_ALLOC;
-    }
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ************************************************************************
- * @brief      This function seeks at the provided position in the core file
- *             writer (selected by its 'context'). The position is related to
- *             the seekMode parameter it can be either from the beginning,
- *             from the end or from the current postion. To support large file
- *             access (more than 2GBytes), the position is provided on a 64
- *             bits.
- * @note       If this function returns an error the current position pointer
- *             in the file must not change. Else the current position pointer
- *             must be updated.
- * @param      pContext: (IN/OUT) Context of the core file reader
- * @param      seekMode: (IN) Seek access mode
- * @param      position: (IN/OUT) Position in the file
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_ALLOC: there is no more memory available
- * @return     M4ERR_FILE_INVALID_POSITION: the position cannot be reached
- ************************************************************************
-                              */
-M4OSA_ERR M4OSA_fileWriteSeek(M4OSA_Context pContext, M4OSA_FileSeekAccessMode seekMode,
-                              M4OSA_FilePosition* pPosition)
-{
-    M4OSA_FileContext* pFileContext = (M4OSA_FileContext*)pContext;
-    M4OSA_ERR err;
-
-    M4OSA_TRACE2_2("M4OSA_fileWriteSeek : mode = %d  pos = %lu",
-                        seekMode, (M4OSA_NULL != pPosition) ? (*pPosition) : 0);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                                 "M4OSA_fileWriteSeek: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(0 == seekMode, M4ERR_PARAMETER,
-                                          "M4OSA_fileWriteSeek: seemMode is 0");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pPosition, M4ERR_PARAMETER,
-                                "M4OSA_fileWriteSeek: pPosition is M4OSA_NULL");
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context, M4ERR_BAD_CONTEXT,
-                        "M4OSA_fileWriteSeek: semaphore_context is M4OSA_NULL");
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    if (M4OSA_kDescRWAccess == pFileContext->m_DescrModeAccess) /* read write */
-    {
-         M4OSA_UInt32    SeekModeOption;
-        /*The position for the seek mode between the SHP and the OSAl part are different */
-        if (M4OSA_kFileSeekBeginning == seekMode)
-        {
-            SeekModeOption = SEEK_SET;
-        }
-        else if (M4OSA_kFileSeekEnd == seekMode)
-        {
-            SeekModeOption = SEEK_END;
-        }
-        else if (M4OSA_kFileSeekCurrent == seekMode)
-        {
-            SeekModeOption = SEEK_CUR;
-        }
-        else
-        {
-            M4OSA_TRACE1_0("M4OSA_fileWriteSeek: END WITH ERROR !!! (CONVERION ERROR FOR THE SEEK MODE) ");
-            return M4ERR_PARAMETER;
-        }
-
-        /**
-         * Go to the desired position */
-        err = fseek(pFileContext->file_desc,*pPosition,SeekModeOption);
-        if(err != 0)
-        {
-            /* converts the error to PSW format*/
-            err=((M4OSA_UInt32)(M4_ERR)<<30)+(((M4OSA_FILE_WRITER)&0x003FFF)<<16)+(M4OSA_Int16)(err);
-            M4OSA_TRACE1_1("M4OSA_FileWriteSeek error:%x",err);
-        }
-        else
-        {
-            return M4NO_ERROR;
-        }
-
-        return err;
-    }
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    err = M4OSA_fileCommonSeek(pContext, seekMode, pPosition);
-
-    if(M4OSA_ERR_IS_ERROR(err))
-    {
-        M4OSA_DEBUG(err, "M4OSA_fileWriteSeek: M4OSA_fileCommonSeek");
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-        M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-        return err;
-    }
-
-    pFileContext->write_position = *pPosition;
-
-    pFileContext->current_seek = SeekWrite;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ************************************************************************
- * @brief      This function asks the core file writer to close the file
- *             (associated to the context).
- * @note       The context of the core file writer is freed.
- * @param      pContext: (IN/OUT) Context of the core file writer
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_ALLOC: there is no more memory available
-************************************************************************
-*/
-
-M4OSA_ERR M4OSA_fileWriteClose(M4OSA_Context pContext)
-{
-    M4OSA_FileContext* pFileContext = (M4OSA_FileContext*)pContext;
-
-    M4OSA_TRACE1_1("M4OSA_fileWriteClose : pC = 0x%p", pContext);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                                "M4OSA_fileWriteClose: pContext is M4OSA_NULL");
-
-    return M4OSA_fileCommonClose(M4OSA_FILE_WRITER, pContext);
-}
-
-
-/**
- ************************************************************************
- * @brief      This function flushes the stream associated to the context.
- * @param      pContext: (IN/OUT) Context of the core file writer
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_fileWriteFlush(M4OSA_Context pContext)
-{
-    M4OSA_FileContext* pFileContext = pContext;
-    M4OSA_ERR    err = M4NO_ERROR;
-
-    M4OSA_TRACE2_1("M4OSA_fileWriteFlush : pC = 0x%p", pContext);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                                "M4OSA_fileWriteFlush: pcontext is M4OSA_NULL");
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context, M4ERR_BAD_CONTEXT,
-                       "M4OSA_fileWriteFlush: semaphore_context is M4OSA_NULL");
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    if (fflush(pFileContext->file_desc) != 0)
-    {
-        err = M4ERR_BAD_CONTEXT;
-    }
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    return err;
-}
-
-
-/**
- ************************************************************************
- * @brief      This function asks the core file writer to return the value
- *             associated with the optionID.
- *             The caller is responsible for allocating/de-allocating the
- *             memory of the value field.
- * @note       'value' must be cast according to the type related to the
- *             optionID
- *             As the caller is responsible for allocating/de-allocating the
- *             'value' field, the callee must copy this field
- *             to its internal variable.
- * @param      pContext: (IN/OUT) Context of the core file writer
- * @param      optionID: (IN) ID of the option
- * @param      value: (OUT) Value of the option
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_BAD_OPTION_ID: the optionID is not a valid one
- * @return     M4ERR_WRITE_ONLY: this option is a write only one
- * @return     M4ERR_NOT_IMPLEMENTED: this option is not implemented
-************************************************************************
-*/
-
-M4OSA_ERR M4OSA_fileWriteGetOption(M4OSA_Context pContext, M4OSA_OptionID optionID,
-                                   M4OSA_DataOption* pOptionValue)
-{
-    M4OSA_FileContext* pFileContext = pContext;
-
-    M4OSA_TRACE2_1("M4OSA_fileWriteGetOption : option = 0x%x", optionID);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                            "M4OSA_fileWriteGetOption: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(optionID == 0, M4ERR_PARAMETER, "M4OSA_fileWriteGetOption");
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pOptionValue, M4ERR_PARAMETER,
-                         "M4OSA_fileWriteGetOption: pOtionValue is M4OSA_NULL");
-
-    M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_FILE_WRITER),
-                               M4ERR_BAD_OPTION_ID, "M4OSA_fileWriteGetOption");
-    M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_READABLE(optionID), M4ERR_WRITE_ONLY,
-                                                    "M4OSA_fileWriteGetOption");
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context, M4ERR_BAD_CONTEXT,
-                   "M4OSA_fileWriteGetOption: semaphore_context is M4OSA_NULL");
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    switch(optionID)
-    {
-#if(M4OSA_OPTIONID_FILE_WRITE_GET_FILE_POSITION == M4OSA_TRUE)
-    case M4OSA_kFileWriteGetFilePosition:
-        {
-            M4OSA_FilePosition* position = (M4OSA_FilePosition*)pOptionValue;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            *position = pFileContext->write_position;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            return M4NO_ERROR;
-        }
-#endif /*M4OSA_OPTIONID_FILE_WRITE_GET_FILE_POSITION*/
-
-#if(M4OSA_OPTIONID_FILE_WRITE_GET_FILE_SIZE == M4OSA_TRUE)
-    case M4OSA_kFileWriteGetFileSize:
-        {
-            M4OSA_FilePosition* position = (M4OSA_FilePosition*)pOptionValue;
-
-            if(M4OSA_kDescRWAccess == pFileContext->m_DescrModeAccess)
-            {
-                M4OSA_Int32 iSavePos    = 0;
-                M4OSA_Int32 iSize        = 0;
-
-                iSavePos = ftell(pFileContext->file_desc);            /*1- Check the first position */
-                fseek(pFileContext->file_desc, 0, SEEK_END);        /*2- Go to the end of the file */
-                *position = ftell(pFileContext->file_desc);            /*3- Check the file size*/
-                fseek(pFileContext->file_desc, iSavePos, SEEK_SET);    /*4- go to the first position*/
-                return M4NO_ERROR;
-            }
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            *position = pFileContext->file_size;
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-            M4OSA_semaphorePost(pFileContext->semaphore_context);
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-            return M4NO_ERROR;
-        }
-#endif /*M4OSA_OPTIONID_FILE_WRITE_GET_FILE_SIZE*/
-
-#if(M4OSA_OPTIONID_FILE_WRITE_GET_URL == M4OSA_TRUE)
-    case M4OSA_kFileWriteGetURL:
-        {
-            return M4OSA_fileCommonGetURL (pContext, (M4OSA_Char**)pOptionValue);
-        }
-#endif /*M4OSA_OPTIONID_FILE_WRITE_GET_URL*/
-
-#if(M4OSA_OPTIONID_FILE_WRITE_GET_FILE_ATTRIBUTE == M4OSA_TRUE)
-    case M4OSA_kFileWriteGetAttribute:
-        {
-            return M4OSA_fileCommonGetAttribute(pContext,
-                (M4OSA_FileAttribute*)pOptionValue);
-        }
-#endif /*M4OSA_OPTIONID_FILE_WRITE_GET_FILE_ATTRIBUTE*/
-
-#if(M4OSA_OPTIONID_FILE_WRITE_GET_READER_CONTEXT == M4OSA_TRUE)
-    case M4OSA_kFileWriteGetReaderContext:
-        {
-            M4OSA_FileModeAccess access = pFileContext->access_mode;
-
-            M4OSA_DEBUG_IF1(!(access & M4OSA_kFileRead), M4ERR_BAD_CONTEXT,
-                "M4OSA_fileWriteGetOption: M4OSA_kFileRead");
-
-            M4OSA_DEBUG_IF1(!(access & M4OSA_kFileWrite), M4ERR_BAD_CONTEXT,
-                "M4OSA_fileWriteGetOption: M4OSA_kFileWrite");
-
-            pFileContext->coreID_read = M4OSA_FILE_READER;
-
-            *pOptionValue = pContext;
-
-            return M4NO_ERROR;
-        }
-#endif /*M4OSA_OPTIONID_FILE_WRITE_GET_READER_CONTEXT*/
-
-    case M4OSA_kFileWriteLockMode:
-        {
-            *(M4OSA_UInt32*)pOptionValue = pFileContext->m_uiLockMode;
-            return M4NO_ERROR;
-        }
-
-    }
-
-    M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_fileWriteGetOption");
-
-    return M4ERR_NOT_IMPLEMENTED;
-}
-
-
-/**
-************************************************************************
-* @brief      This function asks the core file writer to set the value
-*             associated with the optionID.
-*             The caller is responsible for allocating/de-allocating the
-*             memory of the value field.
-* @note       As the caller is responsible for allocating/de-allocating the
-*             'value' field, the callee must copy this field to its internal
-*             variable.
-* @param      pContext: (IN/OUT) Context of the core file writer
-* @param      optionID: (IN) ID of the option
-* @param      value: (IN) Value of the option
-* @return     M4NO_ERROR: there is no error
-* @return     M4ERR_PARAMETER: at least one parameter is NULL
-* @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
-* @return     M4ERR_BAD_OPTION_ID: the optionID is not a valid one
-* @return     M4ERR_READ_ONLY: this option is a read only one
-* @return     M4ERR_NOT_IMPLEMENTED: this option is not implemented
-************************************************************************
-*/
-
-M4OSA_ERR M4OSA_fileWriteSetOption(M4OSA_Context pContext,
-                                   M4OSA_OptionID optionID,
-                                   M4OSA_DataOption optionValue)
-{
-    M4OSA_FileContext* pFileContext = pContext;
-
-    M4OSA_TRACE2_1("M4OSA_fileWriteSetOption : option = 0x%x", optionID);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                                                    "M4OSA_fileWriteSetOption");
-
-    M4OSA_DEBUG_IF2(0 == optionID, M4ERR_PARAMETER, "M4OSA_fileWriteSetOption");
-
-    M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_FILE_WRITER),
-        M4ERR_BAD_OPTION_ID, "M4OSA_fileWriteSetOption");
-
-    M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_WRITABLE(optionID), M4ERR_READ_ONLY,
-                                                     "M4OSA_fileReadSetOption");
-
-#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context, M4ERR_BAD_CONTEXT,
-                   "M4OSA_fileWriteSetOption: semaphore_context is M4OSA_NULL");
-#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
-
-    switch(optionID)
-    {
-        case M4OSA_kFileWriteLockMode:
-        {
-            pFileContext->m_uiLockMode = (M4OSA_UInt32)*(M4OSA_UInt32*)optionValue;
-            return M4NO_ERROR;
-        }
-
-        case M4OSA_kFileWriteDescMode:
-        {
-            pFileContext->m_DescrModeAccess = (M4OSA_Int32)*(M4OSA_Int32*)optionValue;
-            return M4NO_ERROR;
-        }
-
-        default:
-            return M4ERR_NOT_IMPLEMENTED;
-    }
-
-    return M4ERR_NOT_IMPLEMENTED;
-}
-
diff --git a/libvideoeditor/osal/src/M4OSA_Mutex.c b/libvideoeditor/osal/src/M4OSA_Mutex.c
deleted file mode 100755
index bbe6bba..0000000
--- a/libvideoeditor/osal/src/M4OSA_Mutex.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @brief        Mutex for Android
- * @note         This file implements functions to manipulate mutex
- ************************************************************************
-*/
-
-#include "M4OSA_Debug.h"
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Mutex.h"
-
-#include <pthread.h>
-#include <errno.h>
-
-
-/* Context for the mutex */
-typedef struct
-{
-   M4OSA_UInt32     coreID;               /* mutex context identifiant */
-   pthread_mutex_t  mutex;                /* mutex */
-   pthread_t        threadOwnerID;        /* thread owner identifiant */
-} M4OSA_MutexContext;
-
-
-
-/**
- ************************************************************************
- * @brief      This method creates a new mutex.
- * @note       This function creates and allocates a unique context. It's the
- *             OSAL real time responsibility for managing its context. It must
- *             be freed by the M4OSA_mutexClose function. The context parameter
- *             will be sent back to any OSAL core mutex functions to allow
- *             retrieving data associated to the opened mutex.
- * @param      pContext:(OUT) Context of the created mutex
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_ALLOC: there is no more available memory
- * @return     M4ERR_CONTEXT_FAILED: the context creation failed
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_mutexOpen(M4OSA_Context* pContext)
-{
-    M4OSA_MutexContext* pMutexContext = (M4OSA_MutexContext*)M4OSA_NULL;
-    pthread_mutexattr_t attribute = { 0 };
-    M4OSA_Bool opened = M4OSA_FALSE;
-
-    M4OSA_TRACE1_1("M4OSA_mutexOpen\t\tM4OSA_Context* 0x%x", pContext);
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
-                                     "M4OSA_mutexOpen: pContext is M4OSA_NULL");
-
-    *pContext = M4OSA_NULL;
-
-    pMutexContext = (M4OSA_MutexContext*)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_MutexContext),
-                    M4OSA_MUTEX, (M4OSA_Char*)"M4OSA_mutexOpen: mutex context");
-
-    if(M4OSA_NULL == pMutexContext)
-    {
-        M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_mutexOpen");
-        return M4ERR_ALLOC;
-    }
-
-    /* Initialize the mutex attribute. */
-    if ( 0 == pthread_mutexattr_init( &attribute ) )
-    {
-        /* Initialize the mutex type. */
-        if ( 0 == pthread_mutexattr_settype( &attribute, PTHREAD_MUTEX_RECURSIVE ) )
-        {
-            /* Initialize the mutex. */
-            if (0 == pthread_mutex_init( &pMutexContext->mutex, &attribute ) )
-            {
-                opened = M4OSA_TRUE;
-            }
-        }
-
-        /* Destroy the mutex attribute. */
-        pthread_mutexattr_destroy( &attribute );
-    }
-
-    if(!opened)
-    {
-        M4OSA_DEBUG(M4ERR_CONTEXT_FAILED, "M4OSA_mutexOpen: OS mutex creation failed");
-        free(pMutexContext);
-        return M4ERR_CONTEXT_FAILED ;
-    }
-
-    pMutexContext->coreID = M4OSA_MUTEX;
-
-    pMutexContext->threadOwnerID = 0;
-
-    *pContext = (M4OSA_Context) pMutexContext;
-
-    return M4NO_ERROR;
-}
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method locks the mutex. "Context" identifies the mutex.
- * @note       If the mutex is already locked, the calling thread blocks until
- *             the mutex becomes available (by calling M4OSA_mutexUnlock) or
- *             "timeout" is reached. This is a blocking call.
- * @param      context:(IN/OUT) Context of the mutex
- * @param      timeout:(IN) Time out in milliseconds
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4WAR_TIME_OUT: time out is elapsed before mutex has been
- *             available
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_mutexLock(M4OSA_Context context, M4OSA_UInt32 timeout)
-{
-    M4OSA_MutexContext* pMutexContext = (M4OSA_MutexContext*)context;
-    pthread_t           currentThread;
-    int                 result;
-    struct timespec     ts;
-    struct timespec     left;
-
-    M4OSA_TRACE1_2("M4OSA_mutexLock\t\tM4OSA_Context 0x%x\tM4OSA_UInt32 %d",
-        context, timeout);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == context, M4ERR_PARAMETER,
-                                      "M4OSA_mutexLock: context is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(pMutexContext->coreID != M4OSA_MUTEX,
-                                          M4ERR_BAD_CONTEXT, "M4OSA_mutexLock");
-
-    currentThread = pthread_self();
-
-    if(pMutexContext ->threadOwnerID == currentThread)
-    {
-        M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_mutexLock: Thread tried to lock a mutex it already owns");
-        return M4ERR_BAD_CONTEXT ;
-    }
-
-    /* Lock the mutex. */
-    if ( M4OSA_WAIT_FOREVER == timeout)
-    {
-        if ( 0 != pthread_mutex_lock(&pMutexContext->mutex) )
-        {
-            M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_mutexLock: OS mutex wait failed");
-            return M4ERR_BAD_CONTEXT;
-        }
-    }
-    else
-    {
-        result = pthread_mutex_trylock(&pMutexContext->mutex);
-        while ( ( EBUSY == result ) && ( 0 < timeout ) )
-        {
-            ts.tv_sec  = 0;
-            if (1 <= timeout)
-            {
-                ts.tv_nsec = 1000000;
-                timeout -= 1;
-            }
-            else
-            {
-                ts.tv_nsec = timeout * 1000000;
-                timeout = 0;
-            }
-            nanosleep(&ts, &left);
-            result = pthread_mutex_trylock(&pMutexContext->mutex);
-        }
-        if (0 != result)
-        {
-            if (EBUSY == result)
-            {
-                return M4WAR_TIME_OUT;
-            }
-            else
-            {
-                M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_mutexLock: OS mutex wait failed");
-                return M4ERR_BAD_CONTEXT;
-            }
-        }
-    }
-
-    pMutexContext->threadOwnerID = currentThread;
-
-    return M4NO_ERROR;
-}
-
-
-
-/**
- ************************************************************************
- * @brief      This method unlocks the mutex. The mutex is identified by
- *             its context
- * @note       The M4OSA_mutexLock unblocks the thread with the highest
- *             priority and made it ready to run.
- * @note       No hypotheses can be made on which thread will be un-blocked
- *             between threads with the same priority.
- * @param      context:(IN/OUT) Context of the mutex
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
-************************************************************************
-*/
-M4OSA_ERR M4OSA_mutexUnlock(M4OSA_Context context)
-{
-    M4OSA_MutexContext* pMutexContext = (M4OSA_MutexContext*)context;
-    pthread_t currentThread;
-
-    M4OSA_TRACE1_1("M4OSA_mutexUnlock\t\tM4OSA_Context 0x%x", context);
-    M4OSA_DEBUG_IF2(M4OSA_NULL == context, M4ERR_PARAMETER,
-                                    "M4OSA_mutexUnlock: context is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(M4OSA_MUTEX != pMutexContext->coreID,
-                                        M4ERR_BAD_CONTEXT, "M4OSA_mutexUnlock");
-
-    currentThread = pthread_self();
-
-    if(pMutexContext->threadOwnerID != currentThread)
-    {
-        M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_mutexUnlock: Thread tried to unlock a mutex it doesn't own");
-        return M4ERR_BAD_CONTEXT;
-    }
-
-    pMutexContext->threadOwnerID = 0 ;
-
-    pthread_mutex_unlock(&pMutexContext->mutex);
-
-    return M4NO_ERROR;
-}
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method deletes a mutex (identify by its context). After
- *             this call, the mutex and its context is no more useable. This
- *             function frees all the memory related to this mutex.
- * @note       It is an application issue to warrant no more threads are locked
- *             on the deleted mutex.
- * @param      context:(IN/OUT) Context of the mutex
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_mutexClose(M4OSA_Context context)
-{
-    M4OSA_MutexContext* pMutexContext = (M4OSA_MutexContext*)context;
-
-    M4OSA_TRACE1_1("M4OSA_mutexClose\t\tM4OSA_Context 0x%x", context);
-
-    M4OSA_DEBUG_IF2(M4OSA_NULL == context, M4ERR_PARAMETER,
-                                     "M4OSA_mutexClose: context is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(pMutexContext->coreID != M4OSA_MUTEX,
-                                        M4ERR_BAD_CONTEXT, "M4OSA_mutexUnlock");
-
-    pthread_mutex_destroy(&pMutexContext->mutex);
-
-    free( pMutexContext);
-
-    return M4NO_ERROR;
-}
-
diff --git a/libvideoeditor/osal/src/M4OSA_Random.c b/libvideoeditor/osal/src/M4OSA_Random.c
deleted file mode 100755
index c24d039..0000000
--- a/libvideoeditor/osal/src/M4OSA_Random.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file        M4PSW_Trace.c
- * @brief        Trace function for trace macros
- * @note        This file gives the implementation of the trace function used
- *                in the trace instrumentation macros
- ************************************************************************
-*/
-
-#include <stdio.h>  /*for printf */
-#include <stdarg.h> /* ANSI C macros and defs for variable args */
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Mutex.h"
-/**
- ************************************************************************
- * @fn         M4OSA_ERR M4OSA_randInit()
- * @brief      this function initialize the number generator
- *               this function must be called once before any call to M4OSA_rand()
- *               need the stdlib and time libraries
- * @note
- * @param
- * @return     M4NO_ERROR
- ************************************************************************
-*/
-
-M4OSA_ERR M4OSA_randInit()
-{
-    int i;
-
-    srand(time(NULL));
-
-    /* Windows' rand is rotten, the first generated value after the init
-    above is not random enough, so let's shake things a little... */
-
-    for (i=0; i<100; i++) rand();
-
-    return M4NO_ERROR;
-}
-/**
- ************************************************************************
- * @fn           M4OSA_ERR M4OSA_rand(M4OSA_Int32* out_value, M4OSA_UInt32 max_value)
- * @brief       This function gives a random number between 1 and max_value
- *               (inclusive) with approximately equal probability, and
- *               returns this number in out_value. For instance, a max_value
- *             of 6 will simulate a fair 6-sided dice roll.
- * @note
- * @param      out_value (OUT): on return, points to random result
- * @param       max_value (IN): max expected value
- * @return     M4NO_ERROR
- ************************************************************************
-*/
-
-M4OSA_ERR M4OSA_rand(M4OSA_Int32* out_value, M4OSA_UInt32 max_value)
-{
-    if( (out_value == M4OSA_NULL) || (max_value < 1) )
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    (*out_value) = rand();
-    /* notice this algorithm will only work for max_values such that the multiplication
-    won't overflow, which means that max_value typically shouldn't go over the range of
-    an Int16. */
-    (*out_value) = (((*out_value) * max_value) / ((M4OSA_UInt32)RAND_MAX + 1)) + 1;
-
-    return M4NO_ERROR;
-}
-
-
diff --git a/libvideoeditor/osal/src/M4OSA_Semaphore.c b/libvideoeditor/osal/src/M4OSA_Semaphore.c
deleted file mode 100755
index f3b5852..0000000
--- a/libvideoeditor/osal/src/M4OSA_Semaphore.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4OSA_Semaphore.c
- * @brief        Semaphore for Windows
- * @note         This file implements functions to manipulate semaphore
- ************************************************************************
-*/
-
-
-
-#include "M4OSA_Debug.h"
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Semaphore.h"
-
-#include <semaphore.h>
-#include <string.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <errno.h>
-#include <time.h>
-
-
-/* Context for the semaphore */
-typedef struct {
-   M4OSA_UInt32   coreID;     /* semaphore context identifiant */
-   sem_t          semaphore;  /* semaphore */
-} M4OSA_SemaphoreContext;
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method creates a new semaphore with the "initialCounter"
- *             value.
- * @note       This function creates and allocates a unique context. It's the
- *             OSAL real time responsibility for managing its context. It must
- *             be freed by the M4OSA_semaphoreClose function. The context
- *             parameter will be sent back to any OSAL core semaphore functions
- *             to allow retrieving data associated to the opened semaphore.
- * @param      context:(OUT) Context of the created semaphore
- * @param      initial_count:(IN) Initial counter of the semaphore
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: provided context is NULL
- * @return     M4ERR_ALLOC: there is no more available memory
- * @return     M4ERR_CONTEXT_FAILED: the context creation failed
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_semaphoreOpen(M4OSA_Context* context,
-                              M4OSA_UInt32 initial_count)
-{
-   M4OSA_SemaphoreContext* semaphoreContext = M4OSA_NULL;
-
-   M4OSA_TRACE1_2("M4OSA_semaphoreOpen\t\tM4OSA_Context* 0x%x\tM4OSA_UInt32 "
-                  "%d", context, initial_count);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_semaphoreOpen");
-
-   *context = M4OSA_NULL;
-
-   semaphoreContext = (M4OSA_SemaphoreContext*) M4OSA_32bitAlignedMalloc(
-                      sizeof(M4OSA_SemaphoreContext), M4OSA_SEMAPHORE,
-                      (M4OSA_Char*)"M4OSA_semaphoreOpen: semaphore context");
-
-   if(semaphoreContext == M4OSA_NULL)
-   {
-      M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_semaphoreOpen");
-
-      return M4ERR_ALLOC;
-   }
-
-   if (0 != sem_init(&semaphoreContext->semaphore, 0, initial_count))
-   {
-      free(semaphoreContext);
-
-      M4OSA_DEBUG(M4ERR_CONTEXT_FAILED,
-         "M4OSA_semaphoreOpen: OS semaphore creation failed");
-
-      return M4ERR_CONTEXT_FAILED;
-   }
-
-   semaphoreContext->coreID = M4OSA_SEMAPHORE ;
-   *context = (M4OSA_Context)semaphoreContext;
-
-   return M4NO_ERROR;
-}
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method decrements (one by one) the semaphore counter. The
- *             semaphore is identified by its context This call is not blocking
- *             if the semaphore counter is positive or zero (after
- *             decrementation). This call is blocking if the semaphore counter
- *             is less than zero (after decrementation), until the semaphore is
- *             upper than zero (see M4OSA_semaphorePost) or time_out is
- *             reached.
- * @note       If "timeout" value is M4OSA_WAIT_FOREVER, the calling thread
- *             will block indefinitely until the semaphore  is unlocked.
- * @param      context:(IN/OUT) Context of the semaphore
- * @param      timeout:(IN) Time out in milliseconds
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4WAR_TIME_OUT: time out is elapsed before semaphore has been
- *             available.
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_semaphoreWait(M4OSA_Context context, M4OSA_Int32 timeout)
-{
-   M4OSA_SemaphoreContext* semaphoreContext = (M4OSA_SemaphoreContext*)context;
-   struct timespec         ts;
-   struct timespec         left;
-   int                     result;
-
-   M4OSA_TRACE1_2("M4OSA_semaphoreWait\t\tM4OSA_Context 0x%x\tM4OSA_UInt32 %d",
-                  context, timeout);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_semaphoreWait");
-
-   M4OSA_DEBUG_IF2(semaphoreContext->coreID != M4OSA_SEMAPHORE,
-                   M4ERR_BAD_CONTEXT, "M4OSA_semaphoreWait");
-
-   if ( (M4OSA_Int32)M4OSA_WAIT_FOREVER == timeout)
-   {
-       if ( 0 != sem_wait(&semaphoreContext->semaphore) )
-       {
-           M4OSA_DEBUG(M4ERR_BAD_CONTEXT,
-                  "M4OSA_semaphoreWait: OS semaphore wait failed");
-
-           return M4ERR_BAD_CONTEXT ;
-       }
-   }
-   else
-   {
-       result = sem_trywait(&semaphoreContext->semaphore);
-       while ( ((EBUSY == result) || (EAGAIN == result)) && ( 0 < timeout ) )
-       {
-           ts.tv_sec  = 0;
-           if (1 <= timeout)
-           {
-               ts.tv_nsec = 1000000;
-               timeout -= 1;
-           }
-           else
-           {
-               ts.tv_nsec = timeout * 1000000;
-               timeout = 0;
-           }
-           nanosleep(&ts, &left);
-           result = sem_trywait(&semaphoreContext->semaphore);
-       }
-       if (0 != result)
-       {
-           if ((EBUSY == result) || (EAGAIN == result))
-           {
-               return M4WAR_TIME_OUT;
-           }
-           else
-           {
-               M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_semaphoreWait: OS semaphore wait failed");
-               return M4ERR_BAD_CONTEXT;
-           }
-       }
-   }
-
-   return M4NO_ERROR;
-}
-
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method increments the semaphore counter. The semaphore is
- *             identified by its context
- * @note       If the semaphore counter is upper than zero (after addition),
- *             the M4OSA_semaphoreWait call of the thread with the highest
- *             priority is unblocked and made ready to run.
- * @note       No hypotheses can be made on which thread will be unblocked
- *             between threads with the same priority.
- * @param      context:(IN/OUT) Context of the semaphore
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
-************************************************************************
-*/
-M4OSA_ERR M4OSA_semaphorePost(M4OSA_Context context)
-{
-   M4OSA_SemaphoreContext* semaphoreContext = (M4OSA_SemaphoreContext*)context;
-
-   M4OSA_TRACE1_1("M4OSA_semaphorePost\t\tM4OSA_Context 0x%x", context);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_semaphorePost");
-
-   M4OSA_DEBUG_IF2(semaphoreContext->coreID != M4OSA_SEMAPHORE,
-                   M4ERR_BAD_CONTEXT, "M4OSA_semaphorePost");
-
-   sem_post(&semaphoreContext->semaphore);
-
-   return M4NO_ERROR;
-}
-
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method deletes a semaphore (identify by its context).
- *             After this call the semaphore and its context is no more
- *             useable. This function frees all the memory related to this
- *             semaphore.
- * @note       It is an application issue to warrant no more threads are locked
- *             on the deleted semaphore.
- * @param      context:(IN/OUT) Context of the semaphore
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one.
-************************************************************************
-*/
-M4OSA_ERR M4OSA_semaphoreClose(M4OSA_Context context)
-{
-   M4OSA_SemaphoreContext* semaphoreContext = (M4OSA_SemaphoreContext*)context;
-
-   M4OSA_TRACE1_1("M4OSA_semaphoreClose\t\tM4OSA_Context 0x%x", context);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_semaphoreClose");
-
-   M4OSA_DEBUG_IF2(semaphoreContext->coreID != M4OSA_SEMAPHORE,
-                   M4ERR_BAD_CONTEXT, "M4OSA_semaphoreClose");
-
-   sem_destroy(&semaphoreContext->semaphore);
-
-   free(semaphoreContext);
-
-   return M4NO_ERROR;
-}
-
diff --git a/libvideoeditor/osal/src/M4OSA_Thread.c b/libvideoeditor/osal/src/M4OSA_Thread.c
deleted file mode 100755
index 3e82fb3..0000000
--- a/libvideoeditor/osal/src/M4OSA_Thread.c
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ************************************************************************
- * @file         M4OSA_Thread.c
- * @ingroup      OSAL
- * @brief        Implements and manipulate threads
- * @note         This file implements functions to manipulate threads
- ************************************************************************
-*/
-
-#include <sched.h>
-#include <time.h>
-#include <pthread.h>
-#include <errno.h>
-
-#include <utils/threads.h>
-#include "M4OSA_Debug.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Thread.h"
-#include "M4OSA_Thread_priv.h"
-#include "M4OSA_Mutex.h"
-#include "M4OSA_Semaphore.h"
-#include "M4OSA_CharStar.h"
-
-
-void* M4OSA_threadSyncForEverDo(void *context)
-{
-   M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
-   M4OSA_Bool auto_kill = M4OSA_FALSE;
-
-    /*
-       M4OSA_Void* userData;
-    */
-
-   M4OSA_TRACE2_1("M4OSA_threadSyncForEverDo\t\tLPVOID 0x%x", context);
-
-    /*
-       userData = threadContext->userData;
-    */
-
-   M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
-
-
-   threadContext->state = M4OSA_kThreadRunning;
-
-   M4OSA_semaphorePost(threadContext->semStartStop);
-
-   while(threadContext->state == M4OSA_kThreadRunning)
-   {
-      M4OSA_mutexUnlock(threadContext->stateMutex);
-
-      if((threadContext->func(threadContext->param)) != M4NO_ERROR)
-      {
-         M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
-
-         if(threadContext->state == M4OSA_kThreadRunning)
-         {
-
-            //PR 2354 - ACO : Suppress stopping state and don't
-            //         unlock mutex before closing the thread
-            threadContext->state = M4OSA_kThreadOpened;
-            M4OSA_mutexUnlock(threadContext->stateMutex);
-            return 0;
-         }
-
-         M4OSA_mutexUnlock(threadContext->stateMutex);
-      }
-
-      M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
-   }
-
-
-   M4OSA_semaphorePost(threadContext->semStartStop);
-
-
-   M4OSA_mutexUnlock(threadContext->stateMutex);
-
-
-   return 0;
-}
-
-
-
-
-
-/**
- ************************************************************************
-  * @brief      This method creates a new thread. After this call the thread is
- *             identified by its "context". The thread function is provided by
- *             the "func" parameter. This function creates & allocates a unique
- *             context. It's the OSAL real time responsibility for managing its
- *             context. It must be freed by the M4OSA_threadSyncClose function.
- *             The context parameter will be sent back to any OSAL core thread
- *             functions to allow retrieving data associated to the opened
- *             thread.
- * @note       This function creates the thread, but the thread is not running.
- * @note       Once the thread is created, the state is M4OSA_kThreadOpened.
- * @param      context:(OUT) Context of the created thread
- * @param      func:(IN) "doIt" function pointer to run
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_ALLOC: there is no more available memory
- * @return     M4ERR_CONTEXT_FAILED: the context creation failed
-  ************************************************************************
-*/
-M4OSA_ERR M4OSA_threadSyncOpen(M4OSA_Context* context,
-                               M4OSA_ThreadDoIt func)
-{
-   M4OSA_ThreadContext* threadContext = M4OSA_NULL;
-   M4OSA_ERR err_code;
-
-   M4OSA_TRACE1_2("M4OSA_threadSyncOpen\t\tM4OSA_Context* 0x%x\t"
-                  "M4OSA_ThreadDoIt 0x%x", context, func);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncOpen");
-
-   M4OSA_DEBUG_IF2(func == M4OSA_NULL,
-                    M4ERR_PARAMETER, "M4OSA_threadSyncOpen");
-
-   *context = M4OSA_NULL;
-
-   threadContext =
-      (M4OSA_ThreadContext*)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_ThreadContext),
-      M4OSA_THREAD, (M4OSA_Char*)"M4OSA_threadSyncOpen: thread context");
-
-   if(threadContext == M4OSA_NULL)
-   {
-      M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_threadSyncOpen");
-
-      return M4ERR_ALLOC;
-   }
-
-   threadContext->func = func;
-   threadContext->stackSize = 64 * 1024;
-   threadContext->name = M4OSA_NULL;
-   threadContext->threadID = 0;
-   threadContext->coreID = M4OSA_THREAD;
-   threadContext->state = M4OSA_kThreadOpened;
-   threadContext->priority = M4OSA_kThreadNormalPriority ;
-
-   err_code = M4OSA_mutexOpen(&(threadContext->stateMutex));
-
-   if(M4OSA_ERR_IS_ERROR(err_code))
-   {
-      M4OSA_DEBUG(err_code, "M4OSA_threadSyncOpen: M4OSA_mutexOpen");
-
-      return err_code;
-   }
-
-   err_code = M4OSA_semaphoreOpen(&(threadContext->semStartStop), 0);
-
-   if(M4OSA_ERR_IS_ERROR(err_code))
-   {
-      M4OSA_DEBUG(err_code, "M4OSA_threadSyncOpen: M4OSA_semaphoreOpen");
-
-      return err_code;
-   }
-
-   *context = threadContext;
-
-   return M4NO_ERROR;
-}
-
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method runs a specified thread. The "param" parameter
- *             allows the application to set a specific parameter to the
- *             created thread. This parameter will be used as the second one of
- *             the "M4OSA_ThreadDoIt" function.
- * @note       This method is a blocking up to the thread is running.
- *             Before calling this method, the state is M4OSA_kThreadOpened.
- *             Once the method is called, the state is M4OSA_kThreadStarting.
- *             Once the thread is running, the state is M4OSA_kThreadRunning.
- * @note       This method returns immediately. If the "threadStarted" optionID
- *             is not NULL, the thread will call it before running the doIt
- *             function.
- * @param      context:(IN/OUT) Context of the thread
- * @param      param:(IN) Application data thread parameter
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_STATE: this function cannot be called now
- * @return     M4ERR_THREAD_NOT_STARTED: the thread did not start
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_threadSyncStart(M4OSA_Context context,
-                                M4OSA_Void* param)
-{
-   M4OSA_ThreadContext* threadContext =  (M4OSA_ThreadContext*)context;
-   pthread_attr_t     attribute = { 0, 0, 0, 0, 0, 0 };
-   int                min       = 0;
-   int                max       = 0;
-   int                priority  = 0;
-   struct sched_param sched     = { 0 };
-
-   M4OSA_TRACE1_2("M4OSA_threadSyncStart\t\tM4OSA_Context 0x%x\tM4OSA_Void* "
-                  "0x%x", context, param);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncStart");
-
-   M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
-                   M4ERR_BAD_CONTEXT, "M4OSA_threadSyncStart");
-
-   M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
-
-   if(threadContext->state != M4OSA_kThreadOpened)
-   {
-      M4OSA_mutexUnlock(threadContext->stateMutex);
-
-      M4OSA_DEBUG(M4ERR_STATE, "M4OSA_threadSyncStart");
-
-      return M4ERR_STATE;
-   }
-
-   threadContext->state = M4OSA_kThreadStarting;
-
-   M4OSA_mutexUnlock(threadContext->stateMutex);
-   threadContext->param = param;
-
-   if ( 0 == pthread_attr_init( &attribute ) )
-   {
-      if ( 0 == pthread_attr_setdetachstate( &attribute, PTHREAD_CREATE_DETACHED ) )
-      {
-         if ( 0 == pthread_attr_setstacksize( &attribute, (size_t)threadContext->stackSize ) )
-         {
-            if ( 0 == pthread_attr_setschedpolicy( &attribute, SCHED_OTHER ) )
-            {
-                /* Tentative patches to handle priorities in a better way : */
-                /* Use Android's predefined priorities (range +19..-20)
-                 *rather than Linux ones (0..99)*/
-
-                /* Get min and max priorities */
-                min = sched_get_priority_min( SCHED_FIFO );
-                max = sched_get_priority_max( SCHED_FIFO );
-
-                M4OSA_TRACE1_2("M4OSA_threadSyncStart MAX=%d MIN=%d", max, min);
-
-                /* tentative modification of the priorities */
-                /* Set the priority based on default android priorities */
-                /* This probably requires some more tuning,
-                 * outcome of this priority settings are not yet satisfactory */
-                /* Implementing thread handling based on Android's thread creation
-                 * helpers might bring some improvement (see threads.h) */
-                switch(threadContext->priority)
-                {
-                case M4OSA_kThreadLowestPriority:
-                    priority = ANDROID_PRIORITY_NORMAL;
-                    break;
-                case M4OSA_kThreadLowPriority:
-                    priority = ANDROID_PRIORITY_DISPLAY;
-                    break;
-                case M4OSA_kThreadNormalPriority:
-                    priority = ANDROID_PRIORITY_URGENT_DISPLAY;
-                    break;
-                case M4OSA_kThreadHighPriority:
-                    priority = ANDROID_PRIORITY_AUDIO;
-                    break;
-                case M4OSA_kThreadHighestPriority:
-                    priority = ANDROID_PRIORITY_URGENT_AUDIO;
-                    break;
-                }
-                sched.sched_priority = priority;
-
-                if ( 0 == pthread_attr_setschedparam( &attribute, &sched ) )
-                {
-                    if ( 0 == pthread_create( &threadContext->threadID,
-                                              &attribute,
-                                              &M4OSA_threadSyncForEverDo,
-                                              (void *)threadContext ) )
-                    {
-                        if ( M4OSA_FALSE == M4OSA_ERR_IS_ERROR( M4OSA_semaphoreWait(
-                                                                    threadContext->semStartStop,
-                                                                    M4OSA_WAIT_FOREVER ) ) )
-                        {
-                            return M4NO_ERROR;
-                        }
-                    }
-                }
-            }
-         }
-      }
-      pthread_attr_destroy( &attribute );
-   }
-
-   M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
-
-   threadContext->state = M4OSA_kThreadOpened;
-
-   M4OSA_mutexUnlock(threadContext->stateMutex);
-
-   M4OSA_DEBUG(M4ERR_THREAD_NOT_STARTED, "M4OSA_threadSyncStart");
-
-   return M4ERR_THREAD_NOT_STARTED;
-}
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method stops a specified thread.
- * @note       This call is a blocking one up to the "M4OSA_ThreadDoIt"
- *             function has returned.
- *             Before the method is called, the state is M4OSA_kThreadRunning.
- *             Once the method is called, the state is M4OSA_kThreadStopping.
- *             Once the thread is stopped, the state is M4OSA_kThreadOpened.
- * @note       This method returns once the thread has been stopped. If the
- *             "threadStopped" optionID is not NULL, the thread will call it
- *             before dying.
- * @param      context:(IN/OUT) Context of the thread
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_STATE: this function cannot be called now
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_threadSyncStop(M4OSA_Context context)
-{
-   M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
-
-   M4OSA_TRACE1_1("M4OSA_threadSyncStop\t\tM4OSA_Context 0x%x", context);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncStop");
-
-   M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
-                   M4ERR_BAD_CONTEXT, "M4OSA_threadSyncStop");
-
-   M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
-
-   if(threadContext->state != M4OSA_kThreadRunning)
-   {
-      M4OSA_mutexUnlock(threadContext->stateMutex);
-
-      M4OSA_DEBUG(M4ERR_STATE, "M4OSA_threadSyncStop");
-
-      return M4ERR_STATE;
-   }
-
-   threadContext->state = M4OSA_kThreadStopping;
-
-   M4OSA_mutexUnlock(threadContext->stateMutex);
-
-   M4OSA_semaphoreWait(threadContext->semStartStop, M4OSA_WAIT_FOREVER);
-
-   M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
-
-   threadContext->state = M4OSA_kThreadOpened;
-
-   M4OSA_mutexUnlock(threadContext->stateMutex);
-
-   return M4NO_ERROR;
-}
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method deletes a thread (identified by its context). After
- *             this call the thread and its context are no more useable. This
- *             function frees all the memory related to this thread.
- * @note       Before the method is called, the state is M4OSA_kThreadOpened.
- *             Once the method is called, the state is M4OSA_kThreadClosed.
- * @param      context:(IN/OUT) Context of the thread
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_STATE: this function cannot be called now
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_threadSyncClose(M4OSA_Context context)
-{
-   M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
-   M4OSA_ERR err_code;
-
-   M4OSA_TRACE1_1("M4OSA_threadSyncClose\t\tM4OSA_Context 0x%x", context);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncClose");
-
-   M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
-                   M4ERR_BAD_CONTEXT, "M4OSA_threadSyncClose");
-
-   M4OSA_DEBUG_IF2(threadContext->state == M4OSA_kThreadClosed,
-                   M4ERR_BAD_CONTEXT, "M4OSA_threadSyncClose");
-
-   M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
-
-   if(threadContext->state != M4OSA_kThreadOpened)
-   {
-      M4OSA_mutexUnlock(threadContext->stateMutex);
-
-      M4OSA_DEBUG(M4ERR_STATE, "M4OSA_threadSyncClose");
-
-      return M4ERR_STATE;
-   }
-
-   threadContext->state = M4OSA_kThreadClosed;
-
-   M4OSA_mutexUnlock(threadContext->stateMutex);
-
-   err_code = M4OSA_mutexClose(threadContext->stateMutex);
-
-   if(M4OSA_ERR_IS_ERROR(err_code))
-   {
-      M4OSA_DEBUG(err_code, "M4OSA_threadSyncClose: M4OSA_mutexClose");
-
-      return err_code;
-   }
-
-   err_code = M4OSA_semaphoreClose(threadContext->semStartStop);
-
-   if(M4OSA_ERR_IS_ERROR(err_code))
-   {
-      M4OSA_DEBUG(err_code, "M4OSA_threadSyncClose: M4OSA_semaphoreClose");
-
-      return err_code;
-   }
-
-   if(threadContext->name != M4OSA_NULL)
-   {
-      free(threadContext->name);
-   }
-
-   free(threadContext);
-
-   return M4NO_ERROR;
-}
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method asks the thread to return its state.
- * @note       The caller is responsible for allocating/deallocating the state
- *             field.
- * @param      context:(IN) Context of the thread
- * @param      state:(OUT) Thread state
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_threadSyncGetState(M4OSA_Context context,
-                                   M4OSA_ThreadState* state)
-{
-   M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
-
-   M4OSA_TRACE1_2("M4OSA_threadSyncGetState\t\tM4OSA_Context 0x%x\t"
-                  "M4OSA_ThreadState* 0x%x", context, state);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncGetState");
-
-   M4OSA_DEBUG_IF2(state == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncGetState");
-
-   M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
-                   M4ERR_BAD_CONTEXT, "M4OSA_threadSyncGetState");
-
-   *state = threadContext->state;
-
-   return M4NO_ERROR;
-}
-
-
-
-
-/**
- ************************************************************************
- * @brief      This method asks the calling thread to sleep during "timeSleep"
- *             milliseconds.
- * @note       This function does not have any context.
- * @param      time:(IN) Time to sleep in milliseconds
- * @return     M4NO_ERROR: there is no error
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_threadSleep(M4OSA_UInt32 time)
-{
-   struct timespec rqtp = { 0, 0 };
-   struct timespec rmtp = { 0, 0 };
-
-   M4OSA_TRACE1_1("M4OSA_threadSleep\t\tM4OSA_UInt32 %d", time);
-
-   rqtp.tv_sec = (time_t)time/1000;
-   rqtp.tv_nsec = (time%1000) * 1000000;
-   nanosleep(&rqtp, &rmtp);
-
-   return M4NO_ERROR;
-}
-
-#if(M4OSA_OPTIONID_THREAD_PRIORITY == M4OSA_TRUE)
-
-M4OSA_ERR M4OSA_SetThreadSyncPriority(M4OSA_Context context,
-                                  M4OSA_DataOption optionValue)
-{
-   M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
-   M4OSA_ThreadPriorityLevel priority
-                                 = (M4OSA_ThreadPriorityLevel)(optionValue);
-
-   M4OSA_TRACE2_2("M4OSA_SetThreadSyncPriority\t\tM4OSA_Context 0x%x\t"
-                  "M4OSA_DataOption 0x%x", context, optionValue);
-
-   if((M4OSA_UInt32)(uintptr_t)optionValue>M4OSA_kThreadLowestPriority)
-   {
-      return M4ERR_PARAMETER;
-   }
-
-   threadContext->priority = priority;
-
-   return M4NO_ERROR;
-}
-
-#endif /*M4OSA_OPTIONID_THREAD_PRIORITY*/
-
-
-
-
-#if(M4OSA_OPTIONID_THREAD_NAME == M4OSA_TRUE)
-
-M4OSA_ERR M4OSA_SetThreadSyncName(M4OSA_Context context,
-                              M4OSA_DataOption optionValue)
-{
-   M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
-   M4OSA_Char* name = (M4OSA_Char*)optionValue;
-   M4OSA_UInt32 nameSize ;
-
-   M4OSA_TRACE2_2("M4OSA_SetThreadSyncName\t\tM4OSA_Context 0x%x\t"
-                  "M4OSA_DataOption 0x%x", context, optionValue);
-
-   if(threadContext->name != NULL)
-   {
-      free(threadContext->name);
-      threadContext->name = M4OSA_NULL;
-   }
-
-   if(optionValue != M4OSA_NULL)
-   {
-      nameSize = strlen((const char *)name)+1;
-
-      threadContext->name =
-         (M4OSA_Char*)M4OSA_32bitAlignedMalloc(nameSize, M4OSA_THREAD,
-         (M4OSA_Char*)"M4OSA_SetThreadSyncName: thread name");
-
-      if(threadContext == M4OSA_NULL)
-      {
-         return M4ERR_ALLOC;
-      }
-
-      memcpy((void *)threadContext->name, (void *)name,
-                   nameSize);
-   }
-
-   return M4NO_ERROR;
-}
-
-#endif /*M4OSA_OPTIONID_THREAD_NAME*/
-
-
-#if(M4OSA_OPTIONID_THREAD_STACK_SIZE == M4OSA_TRUE)
-
-M4OSA_ERR M4OSA_SetThreadSyncStackSize(M4OSA_Context context,
-                                   M4OSA_DataOption optionValue)
-{
-   M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
-
-   M4OSA_TRACE2_2("M4OSA_SetThreadSyncStackSize\t\tM4OSA_Context 0x%x\t"
-                  "M4OSA_DataOption 0x%x", context, optionValue);
-
-   threadContext->stackSize = (M4OSA_UInt32)(uintptr_t)optionValue;
-
-   return M4NO_ERROR;
-}
-
-#endif /*M4OSA_OPTIONID_THREAD_STACK_SIZE*/
-
-/**
- ************************************************************************
- * @brief      This method asks the core OSAL-Thread component to set the value
- *             associated with the optionID. The caller is responsible for
- *             allocating/deallocating the memory of the value field.
- * @note       As the caller is responsible of allocating/de-allocating the
- *             "value" field, the callee must copy this field to its internal
- *             variable.
- * @param      context:(IN/OUT) Context of the thread
- * @param      optionID:(IN) ID of the option
- * @param      optionValue:(IN) Value of the option
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_BAD_OPTION_ID: the optionID is not a valid one
- * @return     M4ERR_STATE: this option is not available now
- * @return     M4ERR_READ_ONLY: this option is a read only one
- * @return     M4ERR_NOT_IMPLEMENTED: this option is not implemented
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_threadSyncSetOption(M4OSA_Context context,
-                                    M4OSA_ThreadOptionID optionID,
-                                    M4OSA_DataOption optionValue)
-{
-   M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
-   M4OSA_ERR err_code;
-
-   M4OSA_TRACE1_3("M4OSA_threadSyncSetOption\t\tM4OSA_Context 0x%x\t"
-                  "M4OSA_OptionID %d\tM4OSA_DataOption 0x%x",
-                  context, optionID, optionValue);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncSetOption");
-
-   M4OSA_DEBUG_IF2(optionID == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncSetOption");
-
-   M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
-                   M4ERR_BAD_CONTEXT, "M4OSA_threadSyncSetOption");
-
-   M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_THREAD),
-                   M4ERR_BAD_OPTION_ID, "M4OSA_threadSyncSetOption");
-
-   M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_WRITABLE(optionID),
-                   M4ERR_READ_ONLY, "M4OSA_threadSyncSetOption");
-
-
-   M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
-
-   if(threadContext->state != M4OSA_kThreadOpened)
-   {
-      M4OSA_mutexUnlock(threadContext->stateMutex);
-
-      M4OSA_DEBUG(M4ERR_STATE, "M4OSA_threadSyncSetOption");
-
-      return M4ERR_STATE;
-   }
-
-   switch(optionID)
-   {
-
-#if(M4OSA_OPTIONID_THREAD_PRIORITY == M4OSA_TRUE)
-      case M4OSA_ThreadPriority:
-      {
-         err_code = M4OSA_SetThreadSyncPriority(context, optionValue);
-
-         break;
-      }
-#endif /*M4OSA_OPTIONID_THREAD_PRIORITY*/
-
-#if(M4OSA_OPTIONID_THREAD_NAME == M4OSA_TRUE)
-      case M4OSA_ThreadName:
-      {
-         err_code = M4OSA_SetThreadSyncName(context, optionValue);
-
-         break;
-      }
-#endif /*M4OSA_OPTIONID_THREAD_NAME*/
-
-#if(M4OSA_OPTIONID_THREAD_STACK_SIZE == M4OSA_TRUE)
-      case M4OSA_ThreadStackSize:
-      {
-         err_code = M4OSA_SetThreadSyncStackSize(context, optionValue);
-
-         break;
-      }
-#endif /*M4OSA_OPTIONID_THREAD_STACK_SIZE*/
-
-      default:
-      {
-         M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_threadSyncSetOption");
-
-         err_code = M4ERR_NOT_IMPLEMENTED;
-      }
-   }
-
-   M4OSA_mutexUnlock(threadContext->stateMutex);
-
-   return err_code;
-}
-
-
-
-/**
- ************************************************************************
- * @brief      This method asks the OSAL-Thread to return the value associated
- *             with the optionID. The caller is responsible for
- *             allocating/deallocating the memory of the value field.
- * @note       "optionValue" must be cast according to the type related to the
- *             optionID.
- * @note       As the caller is responsible for de-allocating the "value"
- *             field, the core OSAL-Thread component must perform a copy of its
- *             internal value to the value field.
- * @param      context:(IN) Context of the thread
- * @param      optionID:(IN) ID of the option
- * @param      optionValue:(OUT) Value of the option
- * @return     M4NO_ERROR: there is no error
- * @return     M4ERR_PARAMETER: at least one parameter is NULL
- * @return     M4ERR_BAD_CONTEXT: provided context is not a valid one
- * @return     M4ERR_BAD_OPTION_ID: the optionID is not a valid one
- * @return     M4ERR_WRITE_ONLY: this option is a write only one
- * @return     M4ERR_NOT_IMPLEMENTED: this option is not implemented
- ************************************************************************
-*/
-M4OSA_ERR M4OSA_threadSyncGetOption(M4OSA_Context context,
-                                    M4OSA_ThreadOptionID optionID,
-                                    M4OSA_DataOption* optionValue)
-{
-   M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
-
-   M4OSA_TRACE1_3("M4OSA_threadSyncGetOption\t\tM4OSA_Context 0x%x\t"
-                  "M4OSA_OptionID %d\tM4OSA_DataOption* 0x%x",
-                  context, optionID, optionValue);
-
-   M4OSA_DEBUG_IF2(context == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncGetOption");
-
-   M4OSA_DEBUG_IF2(optionID == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncGetOption");
-
-   M4OSA_DEBUG_IF2(optionValue == M4OSA_NULL,
-                   M4ERR_PARAMETER, "M4OSA_threadSyncGetOption");
-
-   M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
-                   M4ERR_BAD_CONTEXT, "M4OSA_threadSyncGetOption");
-
-   M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_THREAD),
-                   M4ERR_BAD_OPTION_ID, "M4OSA_threadSyncGetOption");
-
-   M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_READABLE(optionID),
-                   M4ERR_WRITE_ONLY, "M4OSA_threadSyncGetOption");
-
-   switch(optionID)
-   {
-
-#if(M4OSA_OPTIONID_THREAD_PRIORITY == M4OSA_TRUE)
-      case M4OSA_ThreadPriority:
-      {
-         M4OSA_ThreadPriorityLevel* priority =
-                                    (M4OSA_ThreadPriorityLevel*)optionValue;
-
-         *priority = threadContext->priority;
-
-         return M4NO_ERROR;
-      }
-#endif /*M4OSA_OPTIONID_THREAD_PRIORITY*/
-
-#if(M4OSA_OPTIONID_THREAD_NAME == M4OSA_TRUE)
-      case M4OSA_ThreadName:
-      {
-         M4OSA_Char** name = (M4OSA_Char**)optionValue;
-
-         *name = threadContext->name;
-
-         return M4NO_ERROR;
-      }
-#endif /*M4OSA_OPTIONID_THREAD_NAME*/
-
-#if(M4OSA_OPTIONID_THREAD_STACK_SIZE == M4OSA_TRUE)
-      case M4OSA_ThreadStackSize:
-      {
-         M4OSA_UInt32* stackSize = (M4OSA_UInt32*)optionValue;
-
-         *stackSize = threadContext->stackSize;
-
-         return M4NO_ERROR;
-      }
-#endif /*M4OSA_OPTIONID_THREAD_STACK_SIZE*/
-
-      default:
-        break;
-   }
-
-   M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_threadSyncGetOption");
-
-   return M4ERR_NOT_IMPLEMENTED;
-}
-
diff --git a/libvideoeditor/osal/src/M4PSW_DebugTrace.c b/libvideoeditor/osal/src/M4PSW_DebugTrace.c
deleted file mode 100755
index 850ed91..0000000
--- a/libvideoeditor/osal/src/M4PSW_DebugTrace.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file       M4PSW_DebugTrace.c
- * @brief      Default trace function for debugging macros
- * @note       This file gives the default implementation of the trace function
- *             used in the debug instrumentation macros, based on printf.
- *             Application writers are strongly encouraged to implement their
- *             own "M4OSA_DebugTrace".
- ************************************************************************
-*/
-
-
-#include <inttypes.h>
-#include <stdio.h> /*for printf */
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-
-/*#define NO_FILE */ /* suppresses the file name print out */
-
-
-/**
- ************************************************************************
- * void M4OSA_DebugTrace(M4OSA_Int32 line, char* file, M4OSA_Int32 level,
- *                       M4OSA_Char* cond, char* msg, M4OSA_ERR err)
- * @brief    This function implements the trace for debug tests
- * @note    This function is to be called in the debug macros only.
- *            This implementation uses printf.
- * @param    line (IN): the line number in the source file
- * @param    file (IN): the source file name
- * @param    level (IN): the debug level
- * @param    msg (IN): the error message
- * @param    err (IN): the return value (error code)
- * @return    none
- ************************************************************************
-*/
-
-M4OSAL_TRACE_EXPORT_TYPE void M4OSA_DebugTrace(M4OSA_Int32 line,
-                                               M4OSA_Char* file,
-                                               M4OSA_Int32 level,
-                                               M4OSA_Char* cond,
-                                               M4OSA_Char* msg,
-                                               M4OSA_ERR err)
-{
-    M4OSA_Int32 i;
-
-    /* try to "indent" the resulting traces depending on the level */
-    for (i =0 ; i < level; i ++)
-    {
-        printf(" ");
-    }
-
-#ifdef NO_FILE
-    printf("Error: %" PRIu32 ", on %s: %s\n",err,cond,msg);
-#else /* NO_FILE     */
-    printf("Error: %" PRIu32 ", on %s: %s Line %" PRIu32 " in: %s\n",err,cond,msg,line,file);
-#endif /* NO_FILE     */
-
-}
-
-M4OSAL_TRACE_EXPORT_TYPE M4OSA_Void M4OSA_DEBUG_traceFunction(M4OSA_UInt32 line,
-                                                              M4OSA_Char* fileName,
-                                                              M4OSA_UInt32 level,
-                                                              M4OSA_Char* stringCondition,
-                                                              M4OSA_Char* message,
-                                                              M4OSA_ERR returnedError)
-{
-    M4OSA_DebugTrace(line, fileName, level, stringCondition, message, returnedError);
-}
-
diff --git a/libvideoeditor/osal/src/M4PSW_MemoryInterface.c b/libvideoeditor/osal/src/M4PSW_MemoryInterface.c
deleted file mode 100755
index ea4ccea..0000000
--- a/libvideoeditor/osal/src/M4PSW_MemoryInterface.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
- * @file   M4PSW_MemoryInterface.c
- * @brief  Memory Interface
- * @note   Implementation of the osal memory functions
- *************************************************************************
-*/
-
-#include <stdlib.h>
-#include <memory.h>
-
-#include <time.h>
-#include "M4OSA_Memory.h"
-#ifndef M4VPS_ADVANCED_MEMORY_MANAGER
-/**
- ************************************************************************
- * @fn         M4OSA_MemAddr32 M4OSA_32bitAlignedMalloc(M4OSA_UInt32 size,
- *                                          M4OSA_CoreID coreID,
- *                                          M4OSA_Char* string)
- * @brief      this function allocates a memory block (at least 32 bits aligned)
- * @note
- * @param      size (IN): size of allocated block in bytes
- * @param      coreID (IN): identification of the caller component
- * @param      string (IN): description of the allocated block (null terminated)
- * @return     address of the allocated block, M4OSA_NULL if no memory available
- ************************************************************************
-*/
-
-M4OSA_MemAddr32 M4OSA_32bitAlignedMalloc(M4OSA_UInt32 size,
-                             M4OSA_CoreID coreID,
-                             M4OSA_Char* string)
-{
-    M4OSA_MemAddr32 Address = M4OSA_NULL;
-
-    /**
-     * If size is 0, malloc on WIN OS allocates a zero-length item in
-     * the heap and returns a valid pointer to that item.
-     * On other platforms, malloc could returns an invalid pointer
-     * So, DON'T allocate memory of 0 byte */
-    if (size == 0)
-    {
-        return Address;
-    }
-
-    if (size%4 != 0)
-    {
-        size = size + 4 - (size%4);
-    }
-
-    Address = (M4OSA_MemAddr32) malloc(size);
-
-    return Address;
-}
-
-#endif
-
diff --git a/libvideoeditor/osal/src/M4PSW_Trace.c b/libvideoeditor/osal/src/M4PSW_Trace.c
deleted file mode 100755
index f3d9a1f..0000000
--- a/libvideoeditor/osal/src/M4PSW_Trace.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file        M4PSW_Trace.c
- * @brief        Trace function for trace macros
- * @note        This file gives the implementation of the trace function used
- *                in the trace instrumentation macros
- ************************************************************************
-*/
-
-
-#include <stdio.h> /*for printf */
-#include <stdarg.h> /* ANSI C macros and defs for variable args */
-#include "utils/Log.h"
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Debug.h"
-
-#define NO_FILE /* suppresses the file name print out */
-
-#define MAX_STRING_SIZE 1024
-
-/**
- ************************************************************************
- * void M4OSA_Trace(M4OSA_Int32 line, M4OSA_Char* file ,M4OSA_Int32 level,
- *                                                      M4OSA_Char* format, ...)
- * @brief    This function implements the trace for debug tests
- * @note    This implementation uses printf. First the variables are retrieved using
- *            ANSI C defs and macros which enable to access a variable number of arguments.
- *            Then the printf is done (with some ornemental adds).
- * @param    level (IN): the debug level
- * @param    format (IN): the "printf" formated string
- * @param    ... (IN): as many parameters as required ...
- * @return    none
- ************************************************************************
-*/
-
-M4OSAL_TRACE_EXPORT_TYPE void M4OSA_Trace(M4OSA_Int32 line, M4OSA_Char* file ,
-                                     M4OSA_Int32 level, M4OSA_Char* format, ...)
-{
-    M4OSA_Char message[MAX_STRING_SIZE];
-    M4OSA_Int32 i;
-    va_list marker; /* pointer to list of arguments */
-
-    /* get the var arguments into the string message to be able to print */
-    va_start(marker,format); /* set ptr to first argument in the list of arguments passed to the function */
-    vsprintf((char *)message, (const char *)format,marker );  /* formats and writes the data into message */
-    va_end(marker); /* reset pointer to NULL */
-
-    /* do the actual print */
-#ifdef NO_FILE
-    __android_log_print(ANDROID_LOG_INFO, "M4OSA_Trace", "%s", (char*)message);
-#else /* NO_FILE     */
-    __android_log_print(ANDROID_LOG_INFO, "M4OSA_Trace", "%s", "%s at %lu in %s",
-                                                   (char *)message, line, file);
-#endif /* NO_FILE     */
-
-}
-
-M4OSAL_TRACE_EXPORT_TYPE M4OSA_Void M4OSA_TRACE_traceFunction(M4OSA_UInt32 line,
-                                                              M4OSA_Char* fileName,
-                                                              M4OSA_CoreID coreID,
-                                                              M4OSA_UInt32 level,
-                                                              M4OSA_Char* stringMsg, ...)
-{
-    M4OSA_Char message[MAX_STRING_SIZE];
-    M4OSA_Int32 i;
-    va_list marker; /* pointer to list of arguments */
-
-    /* get the var arguments into the string message to be able to print */
-    va_start(marker,stringMsg); /* set ptr to first argument in the list of arguments passed to the function */
-    vsprintf((char *)message, (const char *)stringMsg,marker );  /* formats and writes the data into message */
-    va_end(marker); /* reset pointer to NULL */
-
-    /* do the actual print */
-#ifdef NO_FILE
-    __android_log_print(ANDROID_LOG_INFO, "M4OSA_TRACE_traceFunction", "%s", (char*)message);
-#else /* NO_FILE     */
-    __android_log_print(ANDROID_LOG_INFO, "M4OSA_TRACE_traceFunction", "%s", "%s at %lu in %s",
-                                            (char *)message, line, (char*)file);
-#endif /* NO_FILE     */
-
-}
-
diff --git a/libvideoeditor/osal/src/MODULE_LICENSE_APACHE2 b/libvideoeditor/osal/src/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/libvideoeditor/osal/src/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/libvideoeditor/osal/src/NOTICE b/libvideoeditor/osal/src/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/libvideoeditor/osal/src/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
-   Copyright (c) 2005-2008, The Android Open Source Project
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
diff --git a/libvideoeditor/vss/3gpwriter/Android.mk b/libvideoeditor/vss/3gpwriter/Android.mk
deleted file mode 100755
index 5053e7d..0000000
--- a/libvideoeditor/vss/3gpwriter/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Types.h b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Types.h
deleted file mode 100755
index 5f9d16b..0000000
--- a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Types.h
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file    M4MP4W_Types.h
- * @brief   Definition of types for the core MP4 writer
- ******************************************************************************
- */
-
-#ifndef M4MP4W_TYPES_H
-#define M4MP4W_TYPES_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#include "NXPSW_CompilerSwitches.h"
-
-#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
-
-/* includes */
-#include "M4OSA_Types.h"
-#include "M4OSA_FileWriter.h"
-#include "M4OSA_FileReader.h"
-#include "M4SYS_Stream.h"
-
-/**
- ******************************************************************************
- * structure    M4MP4C_FtypBox
- * @brief       Information to build the 'ftyp' atom
- ******************************************************************************
- */
-#define M4MPAC_FTYP_TAG 0x66747970 /* 'ftyp' */
-#define M4MPAC_MAX_COMPATIBLE_BRANDS 10
-typedef struct
-{
-    /* All brand fields are actually char[4] stored in big-endian integer format */
-
-    M4OSA_UInt32    major_brand;         /* generally '3gp4'            */
-    M4OSA_UInt32    minor_version;       /* generally '0000' or 'x.x '  */
-    M4OSA_UInt32    nbCompatibleBrands;  /* number of compatible brands */
-    M4OSA_UInt32    compatible_brands[M4MPAC_MAX_COMPATIBLE_BRANDS];   /* array of max compatible
-                                                                       brands */
-} M4MP4C_FtypBox;
-
-
-/**
- ******************************************************************************
- * structure    M4MP4W_memAddr
- * @brief        Buffer structure for the MP4 writer
- ******************************************************************************
- */
-typedef struct
-{
-    M4OSA_UInt32        size;
-    M4OSA_MemAddr32    addr;
-} M4MP4W_memAddr;
-
-/**
- ******************************************************************************
- * Time type for the core MP4 writer
- ******************************************************************************
- */
-typedef M4OSA_UInt32 M4MP4W_Time32;
-
-/**
- ******************************************************************************
- * enumeration   M4MP4W_State
- * @brief        This enum defines the core MP4 writer states
- * @note         These states are used internaly, but can be retrieved from outside
- *               the writer.
- ******************************************************************************
- */
-typedef enum
-{
-    M4MP4W_opened            = 0x100,
-    M4MP4W_ready             = 0x200,
-    M4MP4W_writing           = 0x300,
-    M4MP4W_writing_startAU   = 0x301,
-    M4MP4W_closed            = 0x400
-} M4MP4W_State;
-
-/**
- ******************************************************************************
- * enumeration    M4MP4W_OptionID
- * @brief        This enum defines the core MP4 writer options
- * @note        These options give parameters for the core MP4 writer
- ******************************************************************************
- */
-typedef enum
-{
-    M4MP4W_maxAUperChunk        = 0xC101,
-    M4MP4W_maxChunkSize         = 0xC102,
-    M4MP4W_maxChunkInter        = 0xC103,
-    M4MP4W_preWriteCallBack     = 0xC104,
-    M4MP4W_postWriteCallBack    = 0xC105,
-    M4MP4W_maxAUsize            = 0xC106,
-    M4MP4W_IOD                  = 0xC111,
-    M4MP4W_ESD                  = 0xC112,
-    M4MP4W_SDP                  = 0xC113,
-    M4MP4W_trackSize            = 0xC114,
-    M4MP4W_MOOVfirst            = 0xC121,
-    M4MP4W_V2_MOOF              = 0xC131,
-    M4MP4W_V2_tblCompres        = 0xC132,
-    /*warning: unspecified options:*/
-    M4MP4W_maxFileSize          = 0xC152,
-    M4MP4W_CamcoderVersion      = 0xC153, /*000 to 999 !*/
-    M4MP4W_estimateAudioSize    = 0xC154, /*audio AUs are processed after the video, */
-    /*this option MUST NOT be set if non constant audio
-    frame size (e.g. if SID)*/
-    M4MP4W_embeddedString       = 0xC155,
-    M4MP4W_integrationTag       = 0xC156,
-    M4MP4W_maxFileDuration      = 0xC157,
-    M4MP4W_setFtypBox           = 0xC158,
-    M4MP4W_DSI                  = 0xC159,
-    /* H.264 trimming */
-    M4MP4W_MUL_PPS_SPS          = 0xC160,
-    /* H.264 trimming */
-} M4MP4W_OptionID;
-
-/**
- ******************************************************************************
- * Audio & video stream IDs
- ******************************************************************************
- */
-#define AudioStreamID 1
-#define VideoStreamID 2
-
-/**
- ******************************************************************************
- * Default parameters values, that can be modified by M4MP4W_setOption
- ******************************************************************************
- */
-#define M4MP4W_DefaultWidth 320
-#define M4MP4W_DefaultHeight 240
-#define M4MP4W_DefaultMaxAuSize  4096 /*bytes*/
-#define M4MP4W_DefaultMaxChunkSize 100000 /*bytes*/
-#define M4MP4W_DefaultInterleaveDur 0 /*bytes*/
-
-
-/**
- ******************************************************************************
- * structure    M4MP4W_StreamIDsize
- * @brief        Video plane size
- ******************************************************************************
- */
-typedef struct
-{
-    M4SYS_StreamID streamID;
-    M4OSA_UInt16    height;
-    M4OSA_UInt16    width;
-} M4MP4W_StreamIDsize;
-
-/**
- ******************************************************************************
- * structure    M4MP4W_TrackData
- * @brief       Internal core MP4 writer track structure
- ******************************************************************************
- */
-typedef struct
-{
-    M4SYS_StreamType    trackType;
-    M4OSA_UInt32        timescale;          /* T (video=1000), (AMR8=8000), (AMR16=16000)*/
-    M4OSA_UInt32        sampleSize;         /* S (video=0)*/
-    M4OSA_UInt32        sttsTableEntryNb;   /* J (audio=1)*/
-    M4MP4W_Time32        lastCTS;           /* CTS of the previous AU,
-                                               init to 0.Gives duration at the end.*/
-    M4OSA_UInt32        sampleNb;           /* K (audio=F)*/
-} M4MP4W_TrackData;
-
-/**
- ******************************************************************************
- * structure    M4MP4W_AudioTrackData
- * @brief       Internal core MP4 writer audio specific structure
- ******************************************************************************
- */
-typedef struct
-{
-    M4MP4W_State            microState;
-    M4MP4W_TrackData        CommonData;
-    M4OSA_UChar**           Chunk;
-    M4OSA_UInt32*           chunkSizeTable;
-#ifndef _M4MP4W_MOOV_FIRST
-    M4OSA_UInt32*           chunkOffsetTable;
-#endif /*_M4MP4W_MOOV_FIRST*/
-    M4OSA_UInt32*           chunkSampleNbTable;
-    M4OSA_UInt32*           chunkTimeMsTable;
-    M4OSA_UInt32            currentChunk;       /* Init to 0*/
-    M4OSA_UInt32            currentPos;         /* Init to 0 */
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-    M4OSA_UInt32            currentStsc;        /* Init to 0 */
-#endif
-    M4MP4W_Time32           sampleDuration;     /* Check (AMR8=160), (AMR16=320)*/
-    M4OSA_UInt32            MaxChunkSize;       /* Init to M4MP4W_Mp4FileData.MaxChunkSize*/
-    M4OSA_UInt32            MaxAUSize;          /* Init to M4MP4W_Mp4FileData.MaxAUSize*/
-    M4OSA_UInt32            LastAllocatedChunk;
-    /* previously, audio au size was supposed constant,
-     * which is actually not the case if silences (sid).*/
-    /* at first audio au, sampleSize is set. It is later reset to 0 if non constant size.*/
-    /* So sampleSize should be tested to know weither or not there is a TABLE_STSZ. */
-    M4OSA_UInt32*           TABLE_STSZ; /* table size is 4K*/
-    M4OSA_UInt32            nbOfAllocatedStszBlocks;
-    M4OSA_UInt32*           TABLE_STTS;
-    M4OSA_UInt32            nbOfAllocatedSttsBlocks;
-    M4OSA_UInt32            maxBitrate;     /*not used in amr case*/
-    M4OSA_UInt32            avgBitrate;     /*not used in amr case*/
-    M4OSA_UChar*            DSI;            /* Decoder Specific Info: May be M4OSA_NULL
-                                            (defaulted) for AMR */
-    M4OSA_UInt8             dsiSize;        /* DSI size, always 9 bytes for AMR */
-} M4MP4W_AudioTrackData;
-
-
-/**
- ******************************************************************************
- * structure    M4MP4W_VideoTrackData
- * @brief        Internal core MP4 writer video specific structure
- ******************************************************************************
- */
-typedef struct
-{
-    M4MP4W_State            microState;
-    M4MP4W_TrackData        CommonData;
-    M4OSA_UChar**           Chunk;
-    M4OSA_UInt32*           chunkSizeTable;
-#ifndef _M4MP4W_MOOV_FIRST
-    M4OSA_UInt32*           chunkOffsetTable;
-#endif /*_M4MP4W_MOOV_FIRST*/
-    M4OSA_UInt32*           chunkSampleNbTable;
-    M4MP4W_Time32*          chunkTimeMsTable;
-    M4OSA_UInt32            currentChunk;            /* Init to 0*/
-    M4OSA_UInt32            currentPos ;             /* Init to 0*/
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-    M4OSA_UInt32            currentStsc;             /* Init to 0*/
-#endif
-    M4OSA_UInt32            stssTableEntryNb ;       /* N*/
-    M4OSA_UInt16            width;                   /* X*/
-    M4OSA_UInt16            height;                  /* Y*/
-    M4OSA_UInt32*           TABLE_STTS;              /* table size is J*/
-    M4OSA_UInt32            nbOfAllocatedSttsBlocks;
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-    M4OSA_UInt16*           TABLE_STSZ;              /* table size is 2K*/
-#else
-    M4OSA_UInt32*           TABLE_STSZ;              /* table size is 4K*/
-#endif
-    M4OSA_UInt32            nbOfAllocatedStszBlocks;
-    M4OSA_UInt32*           TABLE_STSS;              /* table size is N*/
-    M4OSA_UInt32            nbOfAllocatedStssBlocks;
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-    M4OSA_UInt32            MaxAUperChunk;           /*Init to 0, i.e. not used*/
-#endif
-    M4OSA_UInt32            MaxChunkSize;            /*Init to M4MP4W_Mp4FileData.MaxChunkSize*/
-    M4OSA_UInt32            MaxAUSize;               /*Init to M4MP4W_Mp4FileData.MaxAUSize*/
-    M4OSA_UInt32            LastAllocatedChunk;
-    M4OSA_UInt32            maxBitrate;
-    M4OSA_UInt32            avgBitrate;
-    M4OSA_UChar*            DSI;            /* Decoder Specific Info: May be M4OSA_NULL
-                                            (defaulted) for H263*/
-    M4OSA_UInt8             dsiSize;        /* DSI size, always 7 bytes for H263 */
-} M4MP4W_VideoTrackData;
-
-/**
- ******************************************************************************
- * structure    M4MP4W_Mp4FileData
- * @brief       Internal core MP4 writer private context structure
- ******************************************************************************
- */
-typedef struct
-{
-    M4MP4W_State                  state;
-    M4OSA_Char*                   url;
-    M4OSA_UInt32                  duration;    /* D in ms, max duration of audio&video*/
-    M4OSA_UInt32                  filesize;    /* actual filesize in bytes*/
-    M4MP4W_AudioTrackData*        audioTrackPtr;
-    M4OSA_Bool                    hasAudio;
-    M4MP4W_VideoTrackData*        videoTrackPtr;
-    M4OSA_Bool                    hasVideo;
-    M4OSA_UInt32                  MaxChunkSize;       /* Init to 100000*/
-    M4OSA_UInt32                  MaxAUSize;          /* Init to 4096*/
-    M4OSA_UInt32                  MaxFileSize;        /* Init to 0, i.e. not used*/
-    M4MP4W_Time32                 InterleaveDur;      /* Init to 0, i.e. not used, ms*/
-    /* M4MP4W_WriteCallBack            PreWriteCallBack;*/    /*Init to M4OSA_NULL*/
-    /* M4MP4W_WriteCallBack            PostWriteCallBack;*/ /*Init to M4OSA_NULL*/
-    M4OSA_FileWriterPointer*      fileWriterFunctions;
-    M4OSA_FileReadPointer*        fileReaderFunctions;
-    M4OSA_UInt32                  camcoderVersion;
-    M4OSA_Bool                    estimateAudioSize;  /* default is false*/
-    M4OSA_UInt32                  audioMsChunkDur;    /* in ms, set only if estimateAudioSize
-                                                         is true*/
-    M4OSA_UInt32                  audioMsStopTime;    /* time to stop audio, set only if
-                                                         estimateAudioSize is true*/
-    M4OSA_Context                 fileWriterContext;
-#ifndef _M4MP4W_MOOV_FIRST
-    M4OSA_UInt32                  absoluteCurrentPos; /* new field for offset update*/
-#endif /*_M4MP4W_MOOV_FIRST*/
-    M4OSA_UChar*                  embeddedString;     /* 16 bytes string, default value
-                                                         writen if NULL*/
-    M4OSA_UChar*                  integrationTag;     /* 60 bytes string, memset to 0 if NULL */
-    M4OSA_UInt32                  MaxFileDuration;    /* Init to 0, i.e. not used*/
-    M4MP4C_FtypBox                ftyp;               /* ftyp atom, if not defined set major_brand
-                                                            = 0, will use default box */
-#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
-    M4OSA_Char*                    safetyFileUrl;
-    M4OSA_Bool                        cleanSafetyFile;
-#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
-    M4OSA_Bool                               bMULPPSSPS;
-} M4MP4W_Mp4FileData;
-
-#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /*M4MP4W_TYPES_H*/
-
diff --git a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Utils.h b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Utils.h
deleted file mode 100755
index fbe7abb..0000000
--- a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Utils.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file    M4MP4W_Utils.h
- * @brief   Utilities and private functions declaration for the MP4 writer
- ******************************************************************************
- */
-
-#ifndef M4MP4W_UTILS_H
-#define M4MP4W_UTILS_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#include "NXPSW_CompilerSwitches.h"
-
-#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
-
-/* includes */
-#include "M4OSA_Types.h"
-#include "M4OSA_FileWriter.h"
-
-
-/**
- ******************************************************************************
- * Utility functions to write data in big endian
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_putByte(M4OSA_UChar c,    M4OSA_FileWriterPointer* fileFunction,
-                         M4OSA_Context context);
-M4OSA_ERR M4MP4W_putBE16(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
-                         M4OSA_Context context);
-M4OSA_ERR M4MP4W_putBE24(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
-                         M4OSA_Context context);
-M4OSA_ERR M4MP4W_putBE32(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
-                         M4OSA_Context context);
-
-/**
- ******************************************************************************
- * Write a bulk of data into the specified file, size is given in bytes
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_putBlock(const M4OSA_UChar* Block, M4OSA_UInt32 size,
-                          M4OSA_FileWriterPointer* fileFunction, M4OSA_Context context);
-
-/**
- ******************************************************************************
- * Convert the 'nb' unsigned integers in 'tab' table from LE into BE
- ******************************************************************************
- */
-void M4MP4W_table32ToBE(M4OSA_UInt32* tab, M4OSA_UInt32 nb);
-
-/**
- ******************************************************************************
- * Convert an unsigned 32 bits integer from LE into BE
- ******************************************************************************
- */
-void M4MP4W_convertInt32BE(M4OSA_UInt32* valPtr);
-
-/**
- ******************************************************************************
- * Re-allocation function
- ******************************************************************************
- */
-void* M4MP4W_realloc(M4OSA_MemAddr32 ptr, M4OSA_UInt32 oldSize, M4OSA_UInt32 newSize);
-
-/**
- ******************************************************************************
- * De-allocate the context
- * This method is no longer in the writer external interface, but is called from
- * the function M4MP4W_closeWrite
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_freeContext(M4OSA_Context context);
-
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-/**
- ******************************************************************************
- * Put Hi and Lo u16 part in a u32 variable
- ******************************************************************************
- */
-M4OSA_Void M4MP4W_put32_Hi(M4OSA_UInt32* tab, M4OSA_UInt16 Hi);
-M4OSA_Void M4MP4W_put32_Lo(M4OSA_UInt32* tab, M4OSA_UInt16 Lo);
-M4OSA_UInt16 M4MP4W_get32_Hi(M4OSA_UInt32* tab);
-M4OSA_UInt16 M4MP4W_get32_Lo(M4OSA_UInt32* tab);
-#endif
-
-#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /*M4MP4W_UTILS_H*/
-
diff --git a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Writer.h b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Writer.h
deleted file mode 100755
index b73a223..0000000
--- a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Writer.h
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file    M4MP4W_Writer.h
- * @brief   Core MP4 writer interface
- * @note    This file declares the MP4 writer interface functions.
- *          The MP4 writer specific types are defined in file M4MP4W_Types.h
- ******************************************************************************
- */
-#ifndef M4MP4W_WRITER_H
-#define M4MP4W_WRITER_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#include "NXPSW_CompilerSwitches.h"
-
-#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
-
-/* includes */
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_FileWriter.h"
-#include "M4OSA_FileReader.h"
-#include "M4SYS_AccessUnit.h"
-#include "M4MP4W_Types.h"
-
-/**
- ******************************************************************************
- * MP4W Errors & Warnings definition
- ******************************************************************************
- */
-#define M4WAR_MP4W_OVERSIZE         M4OSA_ERR_CREATE(M4_WAR, M4MP4_WRITER ,0x000001)
-#define M4WAR_MP4W_NOT_EVALUABLE    M4OSA_ERR_CREATE(M4_WAR, M4MP4_WRITER ,0x000002)
-
-/**
- ******************************************************************************
- * @brief    Get MP4W version
- * @param    major            (OUT) Pointer to the 'major' version number.
- * @param    minor            (OUT) Pointer to the 'minor' version number.
- * @param    revision         (OUT) Pointer to the 'revision' number.
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    At least one parameter is null
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_getVersion(M4OSA_UInt8* major,
-                            M4OSA_UInt8* minor,
-                            M4OSA_UInt8* revision);
-
-/**
- ******************************************************************************
- * @brief    Initiation of the MP4 file creation
- * @param    contextPtr             (OUT) Pointer to the MP4 writer context to create.
- * @param    outputFileDescriptor   (IN)  Descriptor of the output file to open.
- * @param    fileWriterFunction     (IN)  Pointer to structure containing the set of
- *                                          OSAL file write functions.
- * @param    tempFileDescriptor     (IN)  Descriptor of the temporary file to open.
- * @param    fileReaderFunction     (IN)  Pointer to structure containing the set of
- *                                          OSAL file read functions.
- * @return    M4NO_ERROR:         No error
- * @return    M4ERR_PARAMETER:    At least one parameter is null or incorrect
- * @return    M4ERR_ALLOC:        Memory allocation failed
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_openWrite( M4OSA_Context*                  contextPtr,
-                            void*                           outputFileDescriptor,
-                            M4OSA_FileWriterPointer*        fileWriterFunction,
-                            void*                           tempFileDescriptor,
-                            M4OSA_FileReadPointer*          fileReaderFunction );
-
-/**
- ******************************************************************************
- * @brief    Add a new track
- * @param    context              (IN/OUT)  MP4 writer context.
- * @param    streamDescPtr        (IN)      Pointer to the structure containing the
-                                            parameters for the new track.
- * @return    M4NO_ERROR:         No error
- * @return    M4ERR_PARAMETER:    At least one parameter is null or incorrect
- * @return    M4ERR_ALLOC:        Memory allocation failed
- * @return    M4ERR_STATE:        Invalid state
- * @return    M4ERR_BAD_CONTEXT:  An audio (resp.video) stream has already been added
- *                                to this context while attempting to add another one,
- *                                which is forbidden.
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_addStream( M4OSA_Context                context,
-                            M4SYS_StreamDescription*     streamDescPtr);
-
-/**
- ******************************************************************************
- * @brief   Signal to the core MP4 writer that there is no more tracks to add
- * @param   context             (IN/OUT) MP4 writer context.
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is null or incorrect
- * @return  M4ERR_ALLOC:        Memory allocation failed
- * @return  M4ERR_STATE:        Invalid state
- * @return  M4ERR_BAD_CONTEXT:  Audio size estimation is required but not two streams
- *                              have been added.
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_startWriting( M4OSA_Context context);
-
-/**
- ******************************************************************************
- * @brief   Asks the core MP4 writer to initiate the access unit creation in
- *          the streamID track
- * @param   context             (IN/OUT) MP4 writer context.
- * @param   streamID            (IN) Stream ID of the track.
- * @param   auPtr               (IN/OUT) Access unit.
- * @return    M4NO_ERROR:         No error
- * @return    M4ERR_PARAMETER:    At least one parameter is null or incorrect
- * @return    M4ERR_BAD_STREAM_ID:Unknown stream ID
- * @return    M4ERR_ALLOC:        Memory allocation failed
- * @return    M4ERR_STATE:        Invalid state
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_startAU( M4OSA_Context        context,
-                          M4SYS_StreamID       streamID,
-                          M4SYS_AccessUnit*    auPtr);
-
-/**
- ******************************************************************************
- * @brief   Ask the core MP4 writer to write the access unit in the streamID track
- * @note    If M4MP4W_WAR_OVERSIZE is returned, M4MP4W_startAU must not be called anymore,
- *          but directly M4MP4W_closeWrite().
- * @param   context             (IN/OUT)   MP4 writer context.
- * @param   streamID            (IN)       Stream ID of the track.
- * @param   auPtr               (IN/OUT)   Access unit.
- * @return    M4NO_ERROR:                 No error
- * @return    M4ERR_PARAMETER:            At least one parameter is null or incorrect
- * @return    M4ERR_BAD_STREAM_ID:        Unknown stream ID
- * @return    M4ERR_ALLOC:                Memory allocation failed
- * @return    M4ERR_STATE:                Invalid state
- * @return    M4WAR_MP4W_NOT_EVALUABLE:   It is not possible to evaluate audio size if audio
- *                                        samples don't have a constant size.
- * @return    M4WAR_MP4W_OVERSIZE:        Max file size was reached
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_processAU( M4OSA_Context        context,
-                            M4SYS_StreamID       streamID,
-                            M4SYS_AccessUnit*    auPtr);
-
-/**
- ******************************************************************************
- * @brief     Close the MP4 file
- * @note      In previous versions of the MP4 writer, the M4MP4W_freeContext method
- *            was in the interface, which is not the case anymore.
- *            The context is now always deallocated in the M4MP4W_closeWrite function.
- * @param     context             (IN/OUT) MP4 writer context.
- * @return    M4NO_ERROR:         No error
- * @return    M4ERR_PARAMETER:    At least one parameter is null or incorrect
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_closeWrite( M4OSA_Context context);
-
-/**
- ******************************************************************************
- * @brief    Ask the core MP4 writer to return the value associated with the optionID
- * @param    context                (IN)    MP4 writer context.
- * @param    option                 (IN)    Option ID.
- * @param    valuePtr               (OUT)   Pointer to the option value.
- * @return    M4NO_ERROR:             No error
- * @return    M4ERR_PARAMETER:        At least one parameter is null or incorrect
- * @return    M4ERR_NOT_IMPLEMENTED:  Not implemented in the current version
- * @return    M4ERR_BAD_OPTION_ID:    Unknown optionID
- * @return    M4ERR_BAD_STREAM_ID:    Bad stream ID in the option value
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_getOption( M4OSA_Context        context,
-                            M4OSA_OptionID        option,
-                            M4OSA_DataOption    *valuePtr);
-
-/**
- ******************************************************************************
- * @brief    Ask the core MP4 writer to set the value associated with the optionID.
- * @param    context              (IN/OUT)  MP4 writer context.
- * @param    option               (IN)      Option ID.
- * @param    value                (IN)      Option value.
- * @return    M4NO_ERROR:             No error
- * @return    M4ERR_PARAMETER:        At least one parameter is null or incorrect
- * @return    M4ERR_NOT_IMPLEMENTED:  Not implemented in the current version
- * @return    M4ERR_BAD_OPTION_ID:    Unknown optionID
- * @return    M4ERR_BAD_STREAM_ID:    Bad stream ID in the option value
- * @return    M4ERR_ALLOC:            A memory allocation failed
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_setOption( M4OSA_Context       context,
-                            M4OSA_OptionID      option,
-                            M4OSA_DataOption    value);
-
-/**
- ******************************************************************************
- * @brief    Ask the core MP4 writer to return its state.
- * @note     By selecting a specific streamID (not null), the caller can obtain
- *           the state of a specific stream. By using 0 as streamID the returned
- *           state is not stream specific.
- * @param    context                (IN/OUT) MP4 writer context.
- * @param    context                (IN)     Pointer to the state enumeration.
- * @param    context                (IN/OUT) streamID of the stream to retrieve the
- *                                           micro-state (0 for global state).
- * @return    M4NO_ERROR:             No error
- * @return    M4ERR_BAD_STREAM_ID:    Unknown stream ID
- * @return    M4ERR_PARAMETER:        At least one parameter is null or incorrect
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_getState( M4OSA_Context    context,
-                           M4MP4W_State*    statePtr,
-                           M4SYS_StreamID   streamID);
-
-/**
- ******************************************************************************
- * @brief    Get the currently expected file size
- * @param    context             (IN/OUT) MP4 writer context.
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    At least one parameter is null
- ******************************************************************************
- */
-M4OSA_ERR M4MP4W_getCurrentFileSize( M4OSA_Context        context,
-                                     M4OSA_UInt32*        currentFileSize);
-
-#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-
-#endif /*M4MP4W_WRITER_H*/
-
diff --git a/libvideoeditor/vss/3gpwriter/src/Android.mk b/libvideoeditor/vss/3gpwriter/src/Android.mk
deleted file mode 100755
index 8ab32ba..0000000
--- a/libvideoeditor/vss/3gpwriter/src/Android.mk
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-# Copyright (C) 2011 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH:= $(call my-dir)
-
-#
-# lib3gpwriter
-#
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE:= libvideoeditor_3gpwriter
-
-LOCAL_SRC_FILES:=          \
-      M4MP4W_Interface.c \
-      M4MP4W_Utils.c \
-      M4MP4W_Writer.c
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_SHARED_LIBRARIES := \
-    libcutils             \
-    libutils              \
-    libvideoeditor_osal   \
-
-LOCAL_C_INCLUDES += \
-    $(TOP)/frameworks/av/libvideoeditor/osal/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/3gpwriter/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/common/inc
-
-LOCAL_SHARED_LIBRARIES += libdl
-
-# All of the shared libraries we link against.
-LOCAL_LDLIBS := \
-    -lpthread -ldl
-
-LOCAL_CFLAGS += -Wno-multichar \
-    -DDUPLICATE_STTS_IN_LAST_AU
-
-include $(BUILD_STATIC_LIBRARY)
-
diff --git a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Interface.c b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Interface.c
deleted file mode 100755
index c2c5250..0000000
--- a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Interface.c
+++ /dev/null
@@ -1,914 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file    M4MP4W_Interface.c
- * @brief    3GPP file writer interface
- * @note    This implementation follows the common interface defined
- *          in "M4WRITER_common.h".
- ******************************************************************************
-*/
-
-#include "NXPSW_CompilerSwitches.h"
-
-/**
- * OSAL includes */
-#include "M4OSA_Types.h"            /**< OSAL basic types definiton */
-#include "M4OSA_FileWriter.h"        /**< Include for OSAL file accesses implementation */
-#include "M4OSA_Memory.h"            /**< Include for OSAL memory accesses implementation */
-#include "M4OSA_Debug.h"            /**< OSAL debug tools */
-
-/**
- * Writer includes */
-#include "M4WRITER_common.h"        /**< Definition of the writer common interface that
-                                          this module follows */
-
-#ifdef _M4MP4W_USE_CST_MEMORY_WRITER
-#include "M4MP4W_Types_CstMem.h"    /**< MP4/3GP core writer types */
-#include "M4MP4W_Writer_CstMem.h"    /**< MP4/3GP core writer functions */
-#else
-#include "M4MP4W_Types.h"            /**< MP4/3GP core writer types */
-#include "M4MP4W_Writer.h"            /**< MP4/3GP core writer functions */
-#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
-
-/**
- * Specific errors for this module */
-#define M4WRITER_3GP_ERR_UNSUPPORTED_STREAM_TYPE \
-                M4OSA_ERR_CREATE(M4_ERR, M4WRITER_3GP, 0x000001)
-
-
-/**
- ******************************************************************************
- * structure    M4WRITER_3GP_InternalContext
- * @brief        This structure defines the writer context (private)
- * @note        This structure is used for all writer calls to store the context
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_Context    pMP4Context;    /**< MP4 writer context */
-    M4OSA_UInt32    maxAUsizes;        /**< the maximum AU size possible */
-} M4WRITER_3GP_InternalContext;
-
-
-/******************************************************************************
- * M4OSA_ERR M4WRITER_3GP_openWrite(M4WRITER_Context* pContext, void* pWhat,
- *                                   M4OSA_FileWriterPointer* pFileWriterPointer)
- * @brief    Open a writer session.
- * @note
- * @param    pContext:     (OUT) Execution context of the 3GP writer, allocated by this function.
- * @param    outputFileDescriptor (IN)  Descriptor of the output file to create.
- * @param    fileWriterFunction     (IN)  Pointer to structure containing the set of OSAL
- *                                       file write functions.
- * @param    tempFileDescriptor     (IN)  Descriptor of the temporary file to open
- *                                        (NULL if not used)
- * @param    fileReaderFunction     (IN)  Pointer to structure containing the set of OSAL file read
- *                                      functions (NULL if not used)
- * @return    M4NO_ERROR:  there is no error
- * @return    M4ERR_ALLOC: there is no more available memory
- * @return    M4ERR_PARAMETER: pContext or pFilePtrFct is M4OSA_NULL (debug only)
- * @return    any error returned by the MP4 core writer openWrite (Its coreID is M4MP4_WRITER)
- ******************************************************************************
-*/
-M4OSA_ERR M4WRITER_3GP_openWrite( M4WRITER_Context* pContext,
-                                  void* outputFileDescriptor,
-                                  M4OSA_FileWriterPointer* pFileWriterPointer,
-                                  void* tempFileDescriptor,
-                                  M4OSA_FileReadPointer* pFileReaderPointer )
-{
-    M4WRITER_3GP_InternalContext* apContext;
-    M4OSA_ERR err;
-
-    M4OSA_TRACE1_0("M4WRITER_3GP_openWrite");
-
-    /**
-     *    Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext),M4ERR_PARAMETER,
-         "M4WRITER_3GP_openWrite: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWriterPointer),M4ERR_PARAMETER,
-         "M4WRITER_3GP_openWrite: pFileWriterPointer is M4OSA_NULL");
-
-    /**
-     *    Allocate memory for the context */
-    *pContext=M4OSA_NULL;
-    apContext = (M4WRITER_3GP_InternalContext*)M4OSA_32bitAlignedMalloc(
-                    sizeof(M4WRITER_3GP_InternalContext),
-                    M4WRITER_3GP,
-                    (M4OSA_Char *)"M4WRITER_3GP_InternalContext");
-
-    if (M4OSA_NULL == apContext)
-    {
-        M4OSA_TRACE1_0("M4WRITER_3GP_openWrite:\
-             unable to allocate context, returning M4ERR_ALLOC");
-        return (M4OSA_ERR)M4ERR_ALLOC;
-    }
-
-    /**
-     *    Reset context variables */
-    apContext->pMP4Context = M4OSA_NULL;
-    apContext->maxAUsizes = 0;
-
-    /**
-     *    Return the writer context */
-    *pContext = (M4WRITER_Context *)apContext;
-
-    /**
-     *    Launch the openWrite of the MP4 writer */
-    M4OSA_TRACE3_0("M4WRITER_3GP_openWrite: calling M4MP4W_openWrite()");
-
-    err = M4MP4W_openWrite(&apContext->pMP4Context, outputFileDescriptor,
-            pFileWriterPointer, tempFileDescriptor, pFileReaderPointer );
-
-    if (M4OSA_ERR_IS_ERROR(err))
-    {
-        M4OSA_TRACE1_1("M4WRITER_3GP_openWrite: "
-                       "M4MP4W_openWrite returns error 0x%x", err);
-    }
-
-    M4OSA_TRACE2_1("M4WRITER_3GP_openWrite: returning 0x%x", err);
-
-    return err;
-}
-
-
-/******************************************************************************
- * M4OSA_ERR M4WRITER_3GP_startWriting(M4WRITER_Context pContext)
- * @brief    Indicates to the writer that the setup session is ended and that
- *          we will start to write.
- * @note
- * @param     pContext:   (IN) Execution context of the 3GP writer,
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
- * @return    any error returned by the MP4 core writer startWriting (Its
- *            coreID is M4MP4_WRITER)
- ******************************************************************************
-*/
-M4OSA_ERR M4WRITER_3GP_startWriting(M4WRITER_Context pContext)
-{
-    M4WRITER_3GP_InternalContext* apContext =
-                (M4WRITER_3GP_InternalContext*)pContext;
-
-    M4OSA_ERR err;
-
-    M4OSA_TRACE1_1("M4WRITER_3GP_startWriting: pContext=0x%x", pContext);
-
-    /**
-     *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
-         "M4WRITER_3GP_startWriting: pContext is M4OSA_NULL");
-
-    /**
-     *    Call the MP4 core writer */
-    M4OSA_TRACE3_0("M4WRITER_3GP_startWriting: calling M4MP4W_startWriting()");
-    err = M4MP4W_startWriting(apContext->pMP4Context);
-    if (M4OSA_ERR_IS_ERROR(err))
-    {
-        M4OSA_TRACE1_1("M4MP4W_startWriting returns error 0x%x", err);
-    }
-
-    M4OSA_TRACE2_1("M4WRITER_3GP_startWriting: returning 0x%x", err);
-    return err;
-}
-
-
-/******************************************************************************
- * M4OSA_ERR M4WRITER_3GP_addStream(
- *     M4WRITER_Context pContext,
- *     M4SYS_StreamDescription *pStreamDescription)
- * @brief     Add a stream (audio or video).
- * @note      Decoder specific info properties are correctly set before calling
- *            the core writer add function
- * @param     pContext:   (IN) Execution context of the 3GP writer,
- * @param     streamDescription:    (IN) stream description.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext or pStreamDescription is M4OSA_NULL
- *            (debug only)
- * @return    any error returned by the MP4 core writer addStream
- *            (Its coreID is M4MP4_WRITER)
- ******************************************************************************
-*/
-M4OSA_ERR M4WRITER_3GP_addStream(M4WRITER_Context pContext,
-                                 M4SYS_StreamDescription* pStreamDescription)
-{
-    M4WRITER_3GP_InternalContext *apContext =
-        (M4WRITER_3GP_InternalContext *)pContext;
-
-    M4OSA_ERR err;
-    M4WRITER_StreamVideoInfos *pVideoInfo = M4OSA_NULL;
-    M4WRITER_StreamAudioInfos *pAudioInfo = M4OSA_NULL;
-    M4MP4W_StreamIDsize sizeValue;
-
-    M4OSA_TRACE1_2("M4WRITER_3GP_addStream: pContext=0x%x, "
-                   "pStreamDescription=0x%x",
-                   pContext, pStreamDescription);
-
-    /**
-     *    Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
-         "M4WRITER_3GP_addStream: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamDescription),M4ERR_PARAMETER,
-         "M4WRITER_3GP_addStream: pStreamDescription is M4OSA_NULL");
-
-    /**
-     *    Adapt audio/video stream infos */
-    switch (pStreamDescription->streamType)
-    {
-        case M4SYS_kMPEG_4:
-        case M4SYS_kH264:
-        case M4SYS_kH263:
-            M4OSA_TRACE3_1("M4WRITER_3GP_addStream: "
-                    "adding a Video stream (streamType=0x%x)",
-                    pStreamDescription->streamType);
-            /**
-             *    Common descriptions */
-            pStreamDescription->streamID = VideoStreamID;    /**< The only values checked by our
-                                                                  core writer are streamID */
-            pStreamDescription->timeScale = 1000;            /**< and timeScale */
-
-/* Not recommended for video editing -> write explicitely the 'bitr' box into 'd263' */
-/* Rem : it is REL 5 of 3gpp documentation */
-//            /**
-//             * Average bit-rate must not be set in H263 to be compatible with Platform4 */
-//            if (M4SYS_kH263 == pStreamDescription->streamType)
-//            {
-//                pStreamDescription->averageBitrate = -1;
-//            }
-
-            /**
-             *    Decoder specific info */
-            pVideoInfo = (M4WRITER_StreamVideoInfos *)pStreamDescription->decoderSpecificInfo;
-            pStreamDescription->decoderSpecificInfoSize = pVideoInfo->Header.Size;
-            pStreamDescription->decoderSpecificInfo = (M4OSA_MemAddr32)pVideoInfo->Header.pBuf;
-            M4OSA_TRACE3_2("M4WRITER_3GP_addStream: Video: DSI=0x%x, DSIsize=%d",
-                 pVideoInfo->Header.pBuf, pVideoInfo->Header.Size);
-            break;
-
-        case M4SYS_kAMR:
-        case M4SYS_kAMR_WB:
-        case M4SYS_kAAC:
-        case M4SYS_kEVRC:
-            M4OSA_TRACE3_1("M4WRITER_3GP_addStream: adding an Audio stream (streamType=0x%x)",
-                 pStreamDescription->streamType);
-            /**
-             *    Common descriptions */
-            pStreamDescription->streamID = AudioStreamID;    /**< The only value checked by our
-                                                                 core writer is streamID */
-
-            /**
-             *    Decoder specific info */
-            pAudioInfo = (M4WRITER_StreamAudioInfos *)pStreamDescription->decoderSpecificInfo;
-            pStreamDescription->decoderSpecificInfoSize = pAudioInfo->Header.Size;
-            pStreamDescription->decoderSpecificInfo = (M4OSA_MemAddr32)pAudioInfo->Header.pBuf;
-            M4OSA_TRACE3_2("M4WRITER_3GP_addStream: Audio: DSI=0x%x, DSIsize=%d",
-                 pAudioInfo->Header.pBuf, pAudioInfo->Header.Size);
-            break;
-
-        default:
-            M4OSA_TRACE1_1("M4WRITER_3GP_addStream:\
-                 returning M4WRITER_3GP_ERR_UNSUPPORTED_STREAM_TYPE (streamType=0x%x)",
-                     pStreamDescription->streamType);
-            return (M4OSA_ERR)M4WRITER_3GP_ERR_UNSUPPORTED_STREAM_TYPE;
-            break;
-    }
-
-    /**
-     *    Call the MP4 core writer */
-    M4OSA_TRACE3_0("M4WRITER_3GP_addStream: calling M4MP4W_addStream()");
-    err = M4MP4W_addStream(apContext->pMP4Context,pStreamDescription);
-    if (M4OSA_ERR_IS_ERROR(err))
-    {
-        M4OSA_TRACE1_1("M4WRITER_3GP_addStream: M4MP4W_addStream returns error 0x%x", err);
-        M4OSA_TRACE1_1("M4WRITER_3GP_addStream: returning 0x%x", err);
-        return (err);
-    }
-
-    /**
-     *    For Video, set the M4MP4W_trackSize Option */
-    switch (pStreamDescription->streamType)
-    {
-        case M4SYS_kMPEG_4:
-        case M4SYS_kH264:
-        case M4SYS_kH263:
-            sizeValue.streamID = VideoStreamID;
-            sizeValue.height = (M4OSA_UInt16)(pVideoInfo->height);
-            sizeValue.width  = (M4OSA_UInt16)(pVideoInfo->width);
-            M4OSA_TRACE3_2("M4WRITER_3GP_addStream: Video: height=%d, width=%d",
-                 sizeValue.height, sizeValue.width);
-
-            M4OSA_TRACE3_0("M4WRITER_3GP_addStream: calling M4MP4W_setOption(M4MP4W_trackSize)");
-            err = M4MP4W_setOption( apContext->pMP4Context, M4MP4W_trackSize,
-                 (M4OSA_DataOption)&sizeValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4WRITER_3GP_addStream: M4MP4W_setOption returns error 0x%x",
-                     err);
-            }
-            break;
-        default:
-            break;
-    }
-
-    M4OSA_TRACE2_1("M4WRITER_3GP_addStream: returning 0x%x", err);
-    return err;
-}
-
-
-/******************************************************************************
- * M4OSA_ERR M4WRITER_3GP_closeWrite(M4WRITER_Context pContext)
- * @brief    Close the writer. The context is freed here.
- * @note
- * @param     pContext:   (IN) Execution context of the 3GP writer,
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
- * @return    any error returned by the MP4 core writer closeWrite (Its coreID
- *            is M4MP4_WRITER)
- ******************************************************************************
-*/
-M4OSA_ERR M4WRITER_3GP_closeWrite(M4WRITER_Context pContext)
-{
-    M4WRITER_3GP_InternalContext* apContext=(M4WRITER_3GP_InternalContext*)pContext;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4OSA_TRACE1_1("M4WRITER_3GP_closeWrite called with pContext=0x%x", pContext);
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
-         "M4WRITER_3GP_closeWrite: pContext is M4OSA_NULL");
-
-    /**
-     *    Call the MP4 core writer */
-    if (M4OSA_NULL != apContext->pMP4Context)
-    {
-        M4OSA_TRACE3_0("M4WRITER_3GP_closeWrite: calling M4MP4W_closeWrite()");
-        err = M4MP4W_closeWrite(apContext->pMP4Context);
-        if (M4OSA_ERR_IS_ERROR(err))
-        {
-            M4OSA_TRACE1_1("M4WRITER_3GP_closeWrite: M4MP4W_closeWrite returns error 0x%x", err);
-        }
-    }
-
-    /**
-     *    Deallocate our own context */
-    free(apContext);
-
-    M4OSA_TRACE2_1("M4WRITER_3GP_closeWrite: returning 0x%x", err);
-    return err;
-}
-
-
-/******************************************************************************
- * M4OSA_ERR M4WRITER_3GP_setOption(
- *        M4WRITER_Context pContext, M4OSA_UInt32 optionID,
- *        M4OSA_DataOption optionValue)
- * @brief     This function asks the writer to set the value associated with
- *            the optionID. The caller is responsible for allocating/
- *            de-allocating the memory of the value field.
- * @note      The options handled by the component depend on the implementation
- *            of the component.
- * @param     pContext:     (IN) Execution context of the 3GP writer,
- * @param     pptionId:     (IN) ID of the option to set.
- * @param     OptionValue : (IN) Value of the option to set.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
- * @return    M4ERR_BAD_OPTION_ID: the ID of the option is not valid.
- * @return    any error returned by the MP4 core writer setOption (Its coreID
- *            is M4MP4_WRITER)
- ******************************************************************************
-*/
-M4OSA_ERR M4WRITER_3GP_setOption(
-        M4WRITER_Context pContext, M4OSA_UInt32 optionID,
-        M4OSA_DataOption optionValue)
-{
-    M4WRITER_3GP_InternalContext* apContext =
-            (M4WRITER_3GP_InternalContext*)pContext;
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4MP4W_memAddr memval;
-    M4SYS_StreamIDValue optval;
-
-    M4OSA_TRACE2_3("M4WRITER_3GP_setOption: pContext=0x%x, optionID=0x%x,\
-         optionValue=0x%x", pContext, optionID, optionValue);
-
-    /**
-     *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL==apContext),M4ERR_PARAMETER,
-         "M4WRITER_3GP_setOption: pContext is M4OSA_NULL");
-
-    switch (optionID)
-    {
-        /**
-         *    Maximum Access Unit size */
-        case M4WRITER_kMaxAUSize:
-            M4OSA_TRACE2_0("setting M4WRITER_kMaxAUSize option");
-            err = M4MP4W_setOption(
-                    apContext->pMP4Context,M4MP4W_maxAUsize, optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_maxAUsize) "
-                               "returns error 0x%x", err);
-            }
-            break;
-        /**
-         *    Maximum chunck size */
-        case M4WRITER_kMaxChunckSize:
-            M4OSA_TRACE2_0("setting M4WRITER_kMaxChunckSize option");
-            err = M4MP4W_setOption(
-                apContext->pMP4Context,M4MP4W_maxChunkSize, optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_maxChunkSize)\
-                     returns error 0x%x", err);
-            }
-            break;
-        /**
-         *    File string signature */
-        case M4WRITER_kEmbeddedString:
-            M4OSA_TRACE2_0("setting M4WRITER_kEmbeddedString option");
-            /* The given M4OSA_DataOption must actually
-               be a text string */
-            memval.addr = (M4OSA_MemAddr32)optionValue;
-            /**< this is max string size copied by the core */
-            memval.size = 16;
-            err = M4MP4W_setOption(
-                apContext->pMP4Context,M4MP4W_embeddedString, &memval);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_embeddedString)\
-                     returns error 0x%x", err);
-            }
-            break;
-        /**
-         *    File integration tag */
-        case M4WRITER_kIntegrationTag:
-            M4OSA_TRACE2_0("setting M4WRITER_kIntegrationTag option");
-            /* The given M4OSA_DataOption must actually
-               be a text string */
-            memval.addr = (M4OSA_MemAddr32)optionValue;
-            /**< this is max string size copied by the core */
-            memval.size = strlen((const char *)optionValue);
-            err = M4MP4W_setOption(
-                apContext->pMP4Context,M4MP4W_integrationTag, &memval);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_integrationTag)"
-                               " returns error 0x%x", err);
-            }
-            break;
-        /**
-         *    File version signature */
-        case M4WRITER_kEmbeddedVersion:
-            M4OSA_TRACE2_0("setting M4WRITER_kEmbeddedVersion option");
-            /* The given M4OSA_DataOption must actually
-               be a version number */
-
-            /**< Here 0 means both streams */
-            optval.streamID = 0;
-            /**< version number */
-            optval.value = *(M4OSA_UInt32*)optionValue;
-            err = M4MP4W_setOption(
-                apContext->pMP4Context,M4MP4W_CamcoderVersion, &optval);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_CamcoderVersion)"
-                               " returns error 0x%x", err);
-            }
-            break;
-        /**
-         *    Some options are read-only */
-        case M4WRITER_kFileSize:
-        case M4WRITER_kFileSizeAudioEstimated:
-            M4OSA_TRACE2_1("trying to set a read-only option! (ID=0x%x)",
-                    optionID);
-            return (M4OSA_ERR)M4ERR_READ_ONLY;
-            break;
-        /**
-         *    Maximum filesize limitation */
-        case M4WRITER_kMaxFileSize:
-            M4OSA_TRACE2_0("setting M4WRITER_kMaxFileSize option");
-            err = M4MP4W_setOption(
-                apContext->pMP4Context,M4MP4W_maxFileSize, optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_maxFileSize)\
-                     returns error 0x%x", err);
-            }
-            break;
-
-        /**
-         *    Maximum file duration limitation */
-        case M4WRITER_kMaxFileDuration:
-            M4OSA_TRACE2_0("setting M4WRITER_kMaxFileDuration option");
-            err = M4MP4W_setOption(
-                apContext->pMP4Context,M4MP4W_maxFileDuration, optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_setOption(M4WRITER_kMaxFileDuration)"
-                               " returns error 0x%x", err);
-            }
-            break;
-
-        /**
-         *    Set 'ftyp' atom */
-        case M4WRITER_kSetFtypBox:
-            M4OSA_TRACE2_0("setting M4WRITER_kSetFtypBox option");
-            err = M4MP4W_setOption(
-                apContext->pMP4Context, M4MP4W_setFtypBox, optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_setFtypBox)\
-                     returns error 0x%x", err);
-            }
-            break;
-
-        /**
-         *    Decoder Specific Info */
-        case M4WRITER_kDSI:
-            M4OSA_TRACE2_0("setting M4WRITER_kDSI option");
-            err = M4MP4W_setOption(
-                apContext->pMP4Context, M4MP4W_DSI, optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_DSI)\
-                     returns error 0x%x", err);
-            }
-            break;
-        /*+ H.264 Trimming  */
-        case M4WRITER_kMUL_PPS_SPS:
-            M4OSA_TRACE2_0("setting M4WRITER_kMUL_PPS_SPS option");
-            err = M4MP4W_setOption(
-                apContext->pMP4Context, M4MP4W_MUL_PPS_SPS, optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_DSI)\
-                     returns error 0x%x", err);
-            }
-            break;
-        /*- H.264 Trimming  */
-
-        /**
-         *    Unknown option */
-        default:
-            M4OSA_TRACE2_1("trying to set an unknown option!\
-                 (optionID=0x%x)", optionID);
-            return (M4OSA_ERR)M4ERR_BAD_OPTION_ID;
-            break;
-    }
-
-    M4OSA_TRACE3_1("M4WRITER_3GP_setOption: returning 0x%x", err);
-    return err;
-}
-
-
-/******************************************************************************
- * M4OSA_ERR M4WRITER_3GP_getOption(
- *     M4WRITER_Context pContext, M4OSA_UInt32 optionID,
- *     M4OSA_DataOption optionValue)
- * @brief     This function asks the writer to return the value associated with
- *            the optionID. The caller is responsible for allocating/
- *            de-allocating the memory of the value field.
- * @note      The options handled by the component depend on the implementation
- *            of the component.
- * @param     pContext:     (IN) Execution context of the 3GP writer,
- * @param     OptionId:      (IN) Id of the option to get.
- * @param     pOptionValue: (OUT) Value of the option to get.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
- * @return    M4ERR_BAD_OPTION_ID: the ID of the option is not valid.
- * @return    M4ERR_NOT_IMPLEMENTED: This option is not implemented yet.
- * @return    any error returned by the MP4 core writer getOption (Its coreID
- *            is M4MP4_WRITER)
- ******************************************************************************
-*/
-M4OSA_ERR M4WRITER_3GP_getOption(
-        M4WRITER_Context pContext, M4OSA_UInt32 optionID,
-        M4OSA_DataOption optionValue)
-{
-    M4WRITER_3GP_InternalContext* apContext =
-            (M4WRITER_3GP_InternalContext*)pContext;
-
-    M4OSA_ERR err;
-
-    M4OSA_TRACE2_3("M4WRITER_3GP_getOption: pContext=0x%x, optionID=0x%x,\
-         optionValue=0x%x", pContext, optionID, optionValue);
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
-         "M4WRITER_3GP_getOption: pContext is M4OSA_NULL");
-
-    switch (optionID)
-    {
-        /**
-         *    Maximum Access Unit size */
-        case M4WRITER_kMaxAUSize:
-            M4OSA_TRACE2_0("getting M4WRITER_kMaxAUSize option");
-            err = M4MP4W_getOption(apContext->pMP4Context,M4MP4W_maxAUsize,
-                (M4OSA_DataOption*)&optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_getOption(M4MP4W_maxAUsize)"
-                               " returns error 0x%x", err);
-            }
-            break;
-        /**
-         *    Maximum chunck size */
-        case M4WRITER_kMaxChunckSize:
-            M4OSA_TRACE2_0("getting M4WRITER_kMaxChunckSize option");
-            err = M4MP4W_getOption(apContext->pMP4Context,M4MP4W_maxChunkSize,
-                (M4OSA_DataOption*)&optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_getOption(M4MP4W_maxChunkSize)\
-                     returns error 0x%x", err);
-            }
-            break;
-        /**
-         *    The file size option */
-        case M4WRITER_kFileSize:
-            M4OSA_TRACE2_0("getting M4WRITER_kFileSize option");
-            /* get the current file size */
-            err = M4MP4W_getCurrentFileSize(
-                apContext->pMP4Context, (M4OSA_UInt32*)optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_getCurrentFileSize"
-                               " returns error 0x%x", err);
-            }
-            break;
-        /**
-         *    The file size with audio option has its own function call
-              in the MP4 core writer */
-        case M4WRITER_kFileSizeAudioEstimated:
-            M4OSA_TRACE2_0("getting M4WRITER_kFileSizeAudioEstimated option");
-            /* get the current file size ... */
-            err = M4MP4W_getCurrentFileSize(
-                apContext->pMP4Context, (M4OSA_UInt32*)optionValue);
-            if (M4OSA_ERR_IS_ERROR(err))
-            {
-                M4OSA_TRACE1_1("M4MP4W_getCurrentFileSize"
-                               " returns error 0x%x", err);
-            }
-            //no more needed 3gp writer has its own mecanism
-            ///* ... add the estimated next max AU size */
-            //*((M4OSA_UInt32*)optionValue) += apContext->maxAUsizes;
-            break;
-        /**
-         *    Unknown option */
-        default:
-            M4OSA_TRACE2_1("trying to get an unknown option!\
-                 (optionID=0x%x)", optionID);
-            return    (M4OSA_ERR)M4ERR_BAD_OPTION_ID;
-            break;
-    }
-
-    M4OSA_TRACE3_1("M4WRITER_3GP_getOption: returning 0x%x", err);
-    return err;
-}
-
-
-/******************************************************************************
- * M4OSA_ERR M4WRITER_3GP_startAU(
- *          M4WRITER_Context pContext, M4SYS_StreamID streamID,
- *          M4SYS_AccessUnit* pAU)
- * @brief     Prepare an Access Unit to be ready to store data
- * @note
- * @param     pContext: (IN) Execution context of the 3GP writer,
- * @param     streamID: (IN) Id of the stream to which the Access Unit
- *            is related.
- * @param     pAU:      (IN/OUT) Access Unit to be prepared.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext or pAU is M4OSA_NULL (debug only)
- * @return    M4ERR_BAD_STREAM_ID: streamID is not VideoStreamID nor
- *            AudioStreamID (debug only)
- * @return    any error returned by the MP4 core writer startAU (Its coreID
- *            is M4MP4_WRITER)
- ******************************************************************************
-*/
-M4OSA_ERR M4WRITER_3GP_startAU(
-        M4WRITER_Context pContext, M4SYS_StreamID streamID,
-        M4SYS_AccessUnit* pAU)
-{
-    M4WRITER_3GP_InternalContext* apContext =
-            (M4WRITER_3GP_InternalContext*)pContext;
-
-    M4OSA_ERR err;
-
-    M4OSA_TRACE2_3("M4WRITER_3GP_startAU: pContext=0x%x, streamID=%d, pAU=0x%x",
-         pContext, streamID, pAU);
-
-    /**
-     *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext), M4ERR_PARAMETER,
-         "M4WRITER_3GP_startAU: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pAU), M4ERR_PARAMETER,
-         "M4WRITER_3GP_startAU: pAU is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(
-         ((VideoStreamID != streamID) && (AudioStreamID != streamID)),
-         M4ERR_BAD_STREAM_ID,
-         "M4WRITER_3GP_processAU: Wrong streamID");
-
-    /**
-     * Call the MP4 writer */
-    M4OSA_TRACE3_0("M4WRITER_3GP_startAU: calling M4MP4W_startAU()");
-    err = M4MP4W_startAU(apContext->pMP4Context, streamID, pAU);
-    if (M4OSA_ERR_IS_ERROR(err))
-    {
-        M4OSA_TRACE1_1("M4MP4W_startAU returns error 0x%x", err);
-    }
-
-    M4OSA_TRACE3_2("AU: dataAddress=0x%x, size=%d",
-         pAU->dataAddress, pAU->size);
-
-    /* Convert oversize to a request toward VES automaton */
-    if (M4WAR_MP4W_OVERSIZE == err)
-    {
-        err = M4WAR_WRITER_STOP_REQ;
-    }
-
-    M4OSA_TRACE3_1("M4WRITER_3GP_startAU: returning 0x%x", err);
-    return err;
-}
-
-
-/******************************************************************************
- * M4OSA_ERR M4WRITER_3GP_processAU(
- *          M4WRITER_Context pContext, M4SYS_StreamID streamID,
- *          M4SYS_AccessUnit* pAU)
- * @brief     Write an Access Unit
- * @note
- * @param     pContext: (IN) Execution context of the 3GP writer,
- * @param     streamID: (IN) Id of the stream to which the Access Unit
- *            is related.
- * @param     pAU:      (IN/OUT) Access Unit to be written
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext or pAU is M4OSA_NULL (debug only)
- * @return    M4ERR_BAD_STREAM_ID: streamID is not VideoStreamID nor
- *            AudioStreamID (debug only)
- * @return    any error returned by the MP4 core writer processAU
- *            (Its coreID is M4MP4_WRITER)
- ******************************************************************************
-*/
-M4OSA_ERR M4WRITER_3GP_processAU(
-        M4WRITER_Context pContext, M4SYS_StreamID streamID,
-        M4SYS_AccessUnit* pAU)
-{
-    M4WRITER_3GP_InternalContext* apContext =
-        (M4WRITER_3GP_InternalContext*)pContext;
-
-    M4OSA_ERR err;
-
-    M4OSA_TRACE2_3("M4WRITER_3GP_processAU: "
-                   "pContext=0x%x, streamID=%d, pAU=0x%x",
-                    pContext, streamID, pAU);
-
-    /**
-     *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext), M4ERR_PARAMETER,
-         "M4WRITER_3GP_processAU: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pAU), M4ERR_PARAMETER,
-         "M4WRITER_3GP_processAU: pAU is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(
-         ((VideoStreamID != streamID) && (AudioStreamID != streamID)),
-         M4ERR_BAD_STREAM_ID,
-         "M4WRITER_3GP_processAU: Wrong streamID");
-
-    M4OSA_TRACE3_4("M4WRITER_3GP_processAU: AU: "
-         "dataAddress=0x%x, size=%d, CTS=%d, nbFrag=%d",
-         pAU->dataAddress, pAU->size, (M4OSA_UInt32)pAU->CTS, pAU->nbFrag);
-
-    if(pAU->size > apContext->maxAUsizes)
-    {
-        apContext->maxAUsizes = pAU->size;
-    }
-    /**
-     * Call the MP4 writer */
-    M4OSA_TRACE3_0("M4WRITER_3GP_processAU: calling M4MP4W_processAU()");
-    err = M4MP4W_processAU(apContext->pMP4Context, streamID, pAU);
-    if (M4OSA_ERR_IS_ERROR(err))
-    {
-        M4OSA_TRACE1_1("M4MP4W_processAU returns error 0x%x", err);
-    }
-
-    /* Convert oversize to a request toward VES automaton */
-    if(M4WAR_MP4W_OVERSIZE == err)
-    {
-        err = M4WAR_WRITER_STOP_REQ;
-    }
-
-    M4OSA_TRACE3_1("M4WRITER_3GP_processAU: returning 0x%x", err);
-    return err;
-}
-
-
-/******************************************************************************
- * M4OSA_ERR M4WRITER_3GP_getInterfaces(
- *      M4WRITER_OutputFileType* Type,
- *      M4WRITER_GlobalInterface** SrcGlobalInterface,
- *      M4WRITER_DataInterface** SrcDataInterface)
- * @brief     Get the 3GPP writer common interface
- * @note      Retrieves the set of functions needed to use the 3GPP writer.
- *            It follows the common writer interface.
- * @param     Type: (OUT) return the type of this writer. Will always be
- *            M4WRITER_k3GPP.
- * @param     SrcGlobalInterface: (OUT) Main set of function to use this
- *            3GPP writer
- * @param     SrcDataInterface:   (OUT) Set of function related to datas
- *            to use this 3GPP writer
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_ALLOC: there is no more available memory
- * @return    M4ERR_PARAMETER: At least one of the parameters is M4OSA_NULL
- *            (debug only)
- ******************************************************************************
-*/
-M4OSA_ERR M4WRITER_3GP_getInterfaces(
-        M4WRITER_OutputFileType* Type,
-        M4WRITER_GlobalInterface** SrcGlobalInterface,
-        M4WRITER_DataInterface** SrcDataInterface)
-{
-    M4WRITER_GlobalInterface *pGlobal;
-    M4WRITER_DataInterface *pData;
-
-    M4OSA_TRACE2_3("M4WRITER_3GP_getInterfaces: "
-         "Type=0x%x, SrcGlobalInterface=0x%x,\
-         SrcDataInterface=0x%x", Type, SrcGlobalInterface, SrcDataInterface);
-
-    /**
-     *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == Type), M4ERR_PARAMETER,
-         "M4WRITER_3GP_getInterfaces: Type is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == SrcGlobalInterface), M4ERR_PARAMETER,
-         "M4WRITER_3GP_getInterfaces: SrcGlobalInterface is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == SrcDataInterface), M4ERR_PARAMETER,
-         "M4WRITER_3GP_getInterfaces: SrcDataInterface is M4OSA_NULL");
-
-    /**
-     *    Set the output type */
-    *Type = M4WRITER_k3GPP;
-
-    /**
-     *    Allocate the global interface structure */
-    pGlobal = (M4WRITER_GlobalInterface*)M4OSA_32bitAlignedMalloc(
-                sizeof(M4WRITER_GlobalInterface),
-                M4WRITER_3GP, (M4OSA_Char *)"M4WRITER_GlobalInterface");
-    if (M4OSA_NULL == pGlobal)
-    {
-        M4OSA_TRACE1_0("unable to allocate M4WRITER_GlobalInterface,\
-             returning M4ERR_ALLOC");
-        *SrcGlobalInterface = M4OSA_NULL;
-        *SrcDataInterface = M4OSA_NULL;
-        return (M4OSA_ERR)M4ERR_ALLOC;
-    }
-
-    /**
-     *    Allocate the data interface structure */
-    pData =
-        (M4WRITER_DataInterface *)M4OSA_32bitAlignedMalloc(sizeof(M4WRITER_DataInterface),
-        M4WRITER_3GP, (M4OSA_Char *)"M4WRITER_DataInterface");
-    if (M4OSA_NULL == pData)
-    {
-        M4OSA_TRACE1_0("unable to allocate M4WRITER_DataInterface,\
-             returning M4ERR_ALLOC");
-        free(pGlobal);
-        *SrcGlobalInterface = M4OSA_NULL;
-        *SrcDataInterface = M4OSA_NULL;
-        return (M4OSA_ERR)M4ERR_ALLOC;
-    }
-
-    /**
-     *    Fill the global interface structure */
-    pGlobal->pFctOpen = M4WRITER_3GP_openWrite;
-    pGlobal->pFctAddStream = M4WRITER_3GP_addStream;
-    pGlobal->pFctStartWriting = M4WRITER_3GP_startWriting;
-    pGlobal->pFctCloseWrite = M4WRITER_3GP_closeWrite;
-    pGlobal->pFctSetOption = M4WRITER_3GP_setOption;
-    pGlobal->pFctGetOption = M4WRITER_3GP_getOption;
-
-    /**
-     *    Fill the data interface structure */
-    pData->pStartAU = M4WRITER_3GP_startAU;
-    pData->pProcessAU = M4WRITER_3GP_processAU;
-
-    /**
-     *    Set the return values */
-    *SrcGlobalInterface = pGlobal;
-    *SrcDataInterface = pData;
-
-    M4OSA_TRACE2_0("M4WRITER_3GP_getInterfaces: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
diff --git a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Utils.c b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Utils.c
deleted file mode 100755
index 62e2ad0..0000000
--- a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Utils.c
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
-******************************************************************************
- * @file    M4MP4W_Utils.c
- * @brief   Utilities and private functions for the MP4 writer
-******************************************************************************
-*/
-
-#include "NXPSW_CompilerSwitches.h"
-
-#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
-
-#include "M4MP4W_Utils.h"
-#include "M4OSA_Error.h"
-#include "M4MP4W_Types.h"
-
-#define ERR_CHECK(exp, err) if (!(exp)) { return err; }
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_putByte(M4OSA_UChar c, M4OSA_FileWriterPointer* fileFunction,
-                         M4OSA_Context context)
-/*******************************************************************************/
-{
-    M4OSA_ERR err = fileFunction->writeData(context, (M4OSA_MemAddr8)&c, 1);
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_putBE16(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
-                         M4OSA_Context context)
-/*******************************************************************************/
-{
-    M4OSA_ERR err;
-    err = M4MP4W_putByte((M4OSA_UChar)(val >> 8), fileFunction, context);
-    ERR_CHECK(err == M4NO_ERROR, err);
-    err = M4MP4W_putByte((M4OSA_UChar)val, fileFunction, context);
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_putBE24(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
-                         M4OSA_Context context)
-/*******************************************************************************/
-{
-    M4OSA_ERR err;
-    err = M4MP4W_putByte((M4OSA_UChar)(val >> 16), fileFunction, context);
-    ERR_CHECK(err == M4NO_ERROR, err);
-    err = M4MP4W_putByte((M4OSA_UChar)(val >> 8), fileFunction, context);
-    ERR_CHECK(err == M4NO_ERROR, err);
-    err = M4MP4W_putByte((M4OSA_UChar)val, fileFunction, context);
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_putBE32(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
-                         M4OSA_Context context)
-/*******************************************************************************/
-{
-    M4OSA_ERR err;
-    err = M4MP4W_putByte((M4OSA_UChar)(val >> 24), fileFunction, context);
-    ERR_CHECK(err == M4NO_ERROR, err);
-    err = M4MP4W_putByte((M4OSA_UChar)(val >> 16), fileFunction, context);
-    ERR_CHECK(err == M4NO_ERROR, err);
-    err = M4MP4W_putByte((M4OSA_UChar)(val >> 8), fileFunction, context);
-    ERR_CHECK(err == M4NO_ERROR, err);
-    err = M4MP4W_putByte((M4OSA_UChar)val, fileFunction, context);
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_putBlock(const M4OSA_UChar* Block, M4OSA_UInt32 size,
-                           M4OSA_FileWriterPointer* fileFunction, M4OSA_Context context)
-/*******************************************************************************/
-{
-    M4OSA_ERR err = fileFunction->writeData(context, (M4OSA_MemAddr8)Block, size);
-    return err;
-}
-
-/*******************************************************************************/
-void M4MP4W_convertInt32BE(M4OSA_UInt32* valPtr)
-/*******************************************************************************/
-{
-    M4OSA_UChar a, b;
-    M4OSA_UChar* c = (M4OSA_UChar*)valPtr;
-    a       = *(c);
-    b       = *(c+1);
-    *(c)   = *(c+3);
-    *(c+1) = *(c+2);
-    *(c+2) = b;
-    *(c+3) = a;
-}
-
-/*******************************************************************************/
-void M4MP4W_table32ToBE(M4OSA_UInt32* tab, M4OSA_UInt32 nb)
-/*******************************************************************************/
-{
-    M4OSA_UInt32 i;
-    for (i=0; i<nb; i++)
-        M4MP4W_convertInt32BE(&(tab)[i]);
-}
-
-/*******************************************************************************/
-void* M4MP4W_realloc(M4OSA_MemAddr32 ptr, M4OSA_UInt32 oldSize, M4OSA_UInt32 newSize)
-/*******************************************************************************/
-{
-    M4OSA_MemAddr32 ptr2 = (M4OSA_MemAddr32)M4OSA_32bitAlignedMalloc(newSize, M4MP4_WRITER,
-                                                          (M4OSA_Char *)"realloc");
-    if (M4OSA_NULL != ptr2)
-    {
-        memcpy((void *)ptr2, (void *)ptr, oldSize);
-    }
-    free(ptr);
-    return ptr2;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_freeContext(M4OSA_Context context)
-/*******************************************************************************/
-{
-#ifdef _M4MP4W_MOOV_FIRST
-    M4OSA_UInt32 i;
-#endif /*_M4MP4W_MOOV_FIRST*/
-    M4MP4W_Mp4FileData* mMp4FileDataPtr = (M4MP4W_Mp4FileData*)context;
-    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
-
-    /*freeContext is now called after closeWrite*/
-    ERR_CHECK( mMp4FileDataPtr->state == M4MP4W_closed, M4ERR_STATE);
-    mMp4FileDataPtr->state = M4MP4W_closed;
-
-    if (mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL)
-    {
-        /*delete also other chunks if any*/
-        /*for (i=0; i<=mMp4FileDataPtr->audioTrackPtr->currentChunk; i++)*/
-
-#ifdef _M4MP4W_MOOV_FIRST
-        for (i=0; i<=mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk; i++)
-        {
-            free(mMp4FileDataPtr->audioTrackPtr->Chunk[i]);
-        }
-#else
-        if ((M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->Chunk) &&
-             (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->Chunk[0]))
-        {
-            free(mMp4FileDataPtr->audioTrackPtr->Chunk[0]);
-        }
-        if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable)
-        {
-            free(mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable);
-        }
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-        /*now dynamic*/
-        if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->Chunk)
-        {
-            free(mMp4FileDataPtr->audioTrackPtr->Chunk);
-        }
-        if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkSizeTable)
-        {
-            free(mMp4FileDataPtr->audioTrackPtr->chunkSizeTable);
-        }
-        if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable)
-        {
-            free(mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable);
-        }
-        if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable)
-        {
-            free(mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable);
-        }
-
-        if (mMp4FileDataPtr->audioTrackPtr->TABLE_STTS != M4OSA_NULL)
-        {
-            free(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS);
-        }
-
-        if (mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ != M4OSA_NULL)
-        {
-            free(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ);
-        }
-
-        if (mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL)
-        {
-            free(mMp4FileDataPtr->audioTrackPtr->DSI);
-            mMp4FileDataPtr->audioTrackPtr->DSI = M4OSA_NULL;
-        }
-
-        free(mMp4FileDataPtr->audioTrackPtr);
-        mMp4FileDataPtr->audioTrackPtr = M4OSA_NULL;
-    }
-    if (mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL)
-    {
-        /*delete also other chunks if any*/
-        /*for (i=0; i<=mMp4FileDataPtr->videoTrackPtr->currentChunk; i++)*/
-
-#ifdef _M4MP4W_MOOV_FIRST
-        for (i=0; i<=mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk; i++)
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->Chunk[i]);
-        }
-#else
-        if ((M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->Chunk) &&
-             (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->Chunk[0]))
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->Chunk[0]);
-        }
-        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable)
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable);
-        }
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-        /*now dynamic*/
-        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->Chunk)
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->Chunk);
-        }
-        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkSizeTable)
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->chunkSizeTable);
-        }
-        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable)
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable);
-        }
-        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable)
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable);
-        }
-
-        if (mMp4FileDataPtr->videoTrackPtr->DSI != M4OSA_NULL)
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->DSI);
-            mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
-        }
-
-        /*now dynamic*/
-        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->TABLE_STTS)
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS);
-        }
-        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ)
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ);
-        }
-        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->TABLE_STSS)
-        {
-            free(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS);
-        }
-
-        free(mMp4FileDataPtr->videoTrackPtr);
-        mMp4FileDataPtr->videoTrackPtr = M4OSA_NULL;
-    }
-
-    if (mMp4FileDataPtr->embeddedString != M4OSA_NULL)
-    {
-        free(mMp4FileDataPtr->embeddedString);
-        mMp4FileDataPtr->embeddedString = M4OSA_NULL;
-    }
-
-    free(mMp4FileDataPtr);
-
-    return M4NO_ERROR;
-}
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-/*******************************************************************************/
-M4OSA_Void M4MP4W_put32_Hi(M4OSA_UInt32* tab, M4OSA_UInt16 Hi)
-/*******************************************************************************/
-{
-    *tab &= 0xFFFF;
-    *tab |= Hi<<16;
-}
-
-/*******************************************************************************/
-M4OSA_Void M4MP4W_put32_Lo(M4OSA_UInt32* tab, M4OSA_UInt16 Lo)
-/*******************************************************************************/
-{
-    *tab &= 0xFFFF0000;
-    *tab |= Lo;
-}
-
-/*******************************************************************************/
-M4OSA_UInt16 M4MP4W_get32_Hi(M4OSA_UInt32* tab)
-/*******************************************************************************/
-{
-    return (*tab >> 16) & 0xFFFF;
-}
-
-/*******************************************************************************/
-M4OSA_UInt16 M4MP4W_get32_Lo(M4OSA_UInt32* tab)
-/*******************************************************************************/
-{
-    return *tab & 0xFFFF;
-}
-#endif
-
-#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
-
diff --git a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c
deleted file mode 100755
index cdfc441..0000000
--- a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c
+++ /dev/null
@@ -1,5376 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4MP4W_Writer.c
- * @brief   Implementation of the core MP4 writer
- ******************************************************************************
- */
-
-#include "NXPSW_CompilerSwitches.h"
-
-#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
-
-#include "M4OSA_Error.h"
-#include "M4OSA_Debug.h"
-#include "M4MP4W_Writer.h"
-#include "M4MP4W_Utils.h"
-
-/* Check optimisation flags : BEGIN */
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-#ifdef _M4MP4W_MOOV_FIRST
-#error "_M4MP4W_OPTIMIZE_FOR_PHONE should not be used with _M4MP4W_MOOV_FIRST"
-
-#endif
-
-#endif
-
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
-#error "_M4MP4W_UNBUFFERED_VIDEO should be used with _M4MP4W_OPTIMIZE_FOR_PHONE"
-
-#endif
-
-#endif
-/* Check optimisation flags : END */
-
-#ifndef _M4MP4W_DONT_USE_TIME_H
-#include <time.h>
-
-#endif /*_M4MP4W_DONT_USE_TIME_H*/
-
-/*MACROS*/
-#define MAJOR_VERSION 3
-#define MINOR_VERSION 3
-#define REVISION 0
-
-#define ERR_CHECK(exp, err) if (!(exp)) { return err; }
-#define CLEANUPonERR(func) if ((err = func) != M4NO_ERROR) goto cleanup
-
-#define max(a,b) (((a) > (b)) ? (a) : (b))
-
-/***************/
-/*Static blocks*/
-/***************/
-
-/*CommonBlocks*/
-
-const M4OSA_UChar Default_ftyp [] =
-{
-    0x00, 0x00, 0x00, 0x18, 'f', 't', 'y', 'p', '3', 'g', 'p', '7', 0x00, 0x00,
-    0x03, 0x00, '3', 'g', 'p', '7', 'i', 's', 'o', 'm'
-};
-
-const M4OSA_UChar CommonBlock2 [] =
-{
-    'm', 'd', 'a', 't'
-};
-
-const M4OSA_UChar CommonBlock3 [] =
-{
-    'm', 'o', 'o', 'v', 0x00, 0x00, 0x00, 0x6C, 'm', 'v', 'h', 'd', 0x00,
-    0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar CommonBlock4 [] =
-{
-    0x00, 0x00, 0x03, 0xE8
-};
-
-const M4OSA_UChar CommonBlock5 [] =
-{
-    0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03
-};
-
-const M4OSA_UChar CommonBlock6 [] =
-{
-    't', 'r', 'a', 'k', 0x00, 0x00, 0x00, 0x5C, 't', 'k', 'h', 'd', 0x00,
-    0x00, 0x00, 0x01
-};
-
-const M4OSA_UChar CommonBlock7 [] =
-{
-    0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar CommonBlock7bis [] =
-{
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x40, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar CommonBlock8 [] =
-{
-    'm', 'd', 'i', 'a', 0x00, 0x00, 0x00, 0x20, 'm', 'd', 'h', 'd', 0x00,
-    0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar CommonBlock9 [] =
-{
-    0x55, 0xC4, 0x00, 0x00
-};
-
-const M4OSA_UChar CommonBlock10 [] =
-{
-    'm', 'i', 'n', 'f', 0x00, 0x00, 0x00, 0x24, 'd', 'i', 'n', 'f', 0x00,
-    0x00, 0x00, 0x1C, 'd', 'r', 'e', 'f', 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x01, 0x00, 0x00, 0x00, 0x0C, 'u', 'r', 'l', ' ', 0x00, 0x00, 0x00,
-    0x01
-};
-
-const M4OSA_UChar CommonBlock11 [] =
-{
-    's', 't', 'b', 'l'
-};
-
-const M4OSA_UChar CommonBlock12 [] =
-{
-    's', 't', 't', 's', 0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar SampleDescriptionHeader [] =
-{
-    's', 't', 's', 'd', 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
-};
-
-const M4OSA_UChar SampleDescriptionEntryStart [] =
-{
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar CommonBlock15 [] =
-{
-    's', 't', 's', 'z', 0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar CommonBlock16 [] =
-{
-    's', 't', 's', 'c', 0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar CommonBlock17 [] =
-{
-    's', 't', 'c', 'o', 0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar BlockSignatureSkipHeader [] =
-{
-    0x00, 0x00, 0x00, 0x5E, 's', 'k', 'i', 'p'
-};
-/* due to current limitations, size must be 16 */
-const M4OSA_UChar BlockSignatureSkipDefaultEmbeddedString [] =
-{
-    'N', 'X', 'P', 'S', 'W', ' ', 'C', 'A', 'M', 'C', 'O', 'R', 'D', 'E',
-    'R', ' '
-};
-/* follows the version (like " 3.0.2"), then " -- " */
-/* due to current limitations, size must be 60 */
-const M4OSA_UChar BlockSignatureSkipDefaultIntegrationTag [] =
-{
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-/*VideoBlocks*/
-/* 320*240, now no longer hardcoded */
-/* const M4OSA_UChar VideoBlock1[] =
-    { 0x01, 0x40, 0x00, 0x00, 0x00, 0xF0, 0x00, 0x00 }; */
-const M4OSA_UChar VideoBlock1_1 [] =
-{
-    0x00, 0x00, 0x00, 0x21, 'h', 'd', 'l', 'r', 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 'v', 'i', 'd', 'e', 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar SampleDescriptionEntryVideoBoilerplate1 [] =
-{
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar SampleDescriptionEntryVideoBoilerplate2 [] =
-{
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x18, 0xFF, 0xFF
-};
-
-const M4OSA_UChar VideoBlock4 [] =
-{
-    's', 't', 's', 's', 0x00, 0x00, 0x00, 0x00
-}; /*STSS*/
-
-const M4OSA_UChar VideoBlock5 [] =
-{
-    0x00, 0x00, 0x00, 0x14, 'v', 'm', 'h', 'd', 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar VideoResolutions [] =
-{
-    0x00, 0x48, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00
-};
-
-/*Mp4vBlocks*/
-const M4OSA_UChar Mp4vBlock1 [] =
-{
-    'm', 'p', '4', 'v'
-};
-
-const M4OSA_UChar Mp4vBlock3 [] =
-{
-    0x20, 0x11
-};
-
-/*H263Blocks*/
-const M4OSA_UChar H263Block1 [] =
-{
-    's', '2', '6', '3'
-};
-
-const M4OSA_UChar H263Block2 [] =
-{
-    0x00, 0x00, 0x00, 0x0F, 'd', '2', '6', '3'
-};
-
-const M4OSA_UChar H263Block2_bitr [] =
-{
-    0x00, 0x00, 0x00, 0x1F, 'd', '2', '6', '3'
-};
-
-const M4OSA_UChar H263Block3 [] =
-{
-    'P', 'H', 'L', 'P', 0x00, 0x0A, 0x00
-};
-
-const M4OSA_UChar H263Block4 [] =
-{
-    0x00, 0x00, 0x00, 0x10, 'b', 'i', 't', 'r'
-};
-
-/*H264Blocks*/
-const M4OSA_UChar H264Block1 [] =
-{
-    'a', 'v', 'c', '1'
-};
-
-/* Store the avcC field, the version (=1),
-    the profile (=66), the compatibility (=0), */
-
-/* the level (=10),111111 + NAL field Size (= 4 - 1),
-    111 + number of PPS (=1) */
-
-const M4OSA_UChar H264Block2 [] =
-{
-        // Remove the hardcoded DSI values of H264Block2
-        'a' , 'v' , 'c' , 'C'
-};
-
-/*AMRBlocks*/
-const M4OSA_UChar AMRBlock1 [] =
-{
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar AMRBlock1_1 [] =
-{
-    0x00, 0x00, 0x00, 0x21, 'h', 'd', 'l', 'r', 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 's', 'o', 'u', 'n', 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar AudioSampleDescEntryBoilerplate [] =
-{
-    0x00, 0x02, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00
-};
-
-const M4OSA_UChar AMRDSIHeader [] =
-{
-    0x00, 0x00, 0x00, 0x11, 'd', 'a', 'm', 'r'
-};
-
-const M4OSA_UChar AMRDefaultDSI [] =
-{
-    'P', 'H', 'L', 'P', 0x00, 0x00, 0x80, 0x00, 0x01
-};
-
-const M4OSA_UChar AMRBlock4 [] =
-{
-    0x00, 0x00, 0x00, 0x10, 's', 'm', 'h', 'd', 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00
-};
-
-/*AMR8Blocks*/
-const M4OSA_UChar AMR8Block1 [] =
-{
-    's', 'a', 'm', 'r'
-};
-
-/*AMR16Blocks*/
-/*const M4OSA_UChar AMR16Block1[] = { 's', 'a', 'w', 'b'};*/
-
-/*AACBlocks*/
-const M4OSA_UChar AACBlock1 [] =
-{
-    'm', 'p', '4', 'a'
-};
-
-const M4OSA_UChar AACBlock2 [] =
-{
-    0x40, 0x15
-};
-
-/*MPEGConfigBlocks (AAC & MP4V)*/
-const M4OSA_UChar MPEGConfigBlock0 [] =
-{
-    'e', 's', 'd', 's', 0x00, 0x00, 0x00, 0x00, 0x03
-};
-
-const M4OSA_UChar MPEGConfigBlock1 [] =
-{
-    0x00, 0x00, 0x00, 0x04
-};
-
-const M4OSA_UChar MPEGConfigBlock2 [] = { 0x05 };
-const M4OSA_UChar MPEGConfigBlock3 [] =
-{
-    0x06, 0x01, 0x02
-};
-
-/*EVRCBlocks*/
-const M4OSA_UChar EVRCBlock3_1 [] =
-{
-    0x00, 0x00, 0x00, 0x0E, 'd', 'e', 'v', 'c'
-};
-
-const M4OSA_UChar EVRCBlock3_2 [] =
-{
-    'P', 'H', 'L', 'P', 0x00, 0x00
-};
-
-/*EVRC8Blocks*/
-const M4OSA_UChar EVRC8Block1 [] =
-{
-    's', 'e', 'v', 'c'
-};
-
-/***********/
-/* Methods */
-/***********/
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_getVersion(M4OSA_UInt8 *major, M4OSA_UInt8 *minor,
-                            M4OSA_UInt8 *revision )
-/*******************************************************************************/
-{
-    ERR_CHECK(M4OSA_NULL != major, M4ERR_PARAMETER);
-    ERR_CHECK(M4OSA_NULL != minor, M4ERR_PARAMETER);
-    ERR_CHECK(M4OSA_NULL != revision, M4ERR_PARAMETER);
-
-    *major = MAJOR_VERSION;
-    *minor = MINOR_VERSION;
-    *revision = REVISION;
-
-    return M4NO_ERROR;
-}
-
-static M4OSA_UInt32 M4MP4W_STTS_ALLOC_SIZE;
-static M4OSA_UInt32 M4MP4W_STSZ_ALLOC_SIZE;
-static M4OSA_UInt32 M4MP4W_STSS_ALLOC_SIZE;
-static M4OSA_UInt32 M4MP4W_CHUNK_ALLOC_NB;
-static M4OSA_UInt32 M4MP4W_STTS_AUDIO_ALLOC_SIZE;
-static M4OSA_UInt32 M4MP4W_STSZ_AUDIO_ALLOC_SIZE;
-static M4OSA_UInt32 M4MP4W_CHUNK_AUDIO_ALLOC_NB;
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-/* stsc[ ] table is splitted at 12 bits */
-#define M4MP4W_VIDEO_MAX_AU_PER_CHUNK 4095 /* 0=notused */
-
-#else
-#define M4MP4W_VIDEO_MAX_AU_PER_CHUNK 10   /* 0=notused */
-
-#endif
-
-#endif
-
-/*******************************************************************************/
-
-M4OSA_ERR M4MP4W_initializeAllocationParameters(M4MP4W_Mp4FileData *Ptr )
-/*******************************************************************************/
-{
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-    M4OSA_UInt32 maxMemory, vesMemory;
-    M4OSA_UInt32 nbVideoFrames, nbAudioFrames;
-    M4OSA_UInt32 averageVideoChunk;
-
-    /*-----------*/
-    /* NB_FRAMES */
-    /*-----------*/
-
-    /* magical formula : memory = vesMemory + 12 * framerate * duration */
-
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-
-    vesMemory = 0x32000; /* 200 kB */
-
-#else
-
-    vesMemory = 0x3E800; /* 250 kB */
-
-#endif
-
-#define VIDEO_POOL_MEMORY 1000000
-
-    maxMemory = VIDEO_POOL_MEMORY;
-
-    if (maxMemory < vesMemory) {
-        return M4ERR_ALLOC;
-    }
-
-    nbVideoFrames = ( maxMemory - vesMemory) / 12;
-
-    M4OSA_TRACE1_1("M4MP4W: %d images max", nbVideoFrames);
-
-    /* VIDEO */
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-    /* assume an average of 25 fpc : reference = 15 fps * 2s * 0.8 */
-
-    averageVideoChunk = 2500;
-
-#else
-
-    if (M4MP4W_VIDEO_MAX_AU_PER_CHUNK > 0)
-    {
-        averageVideoChunk = 100 * M4MP4W_VIDEO_MAX_AU_PER_CHUNK - 20
-            * (M4MP4W_VIDEO_MAX_AU_PER_CHUNK - 1); /* margin 20% */
-    }
-    else
-    {
-        /* assume an average of 50 fpc */
-        averageVideoChunk = 5000;
-    }
-
-#endif
-
-    M4MP4W_STTS_ALLOC_SIZE = nbVideoFrames * sizeof(M4OSA_UInt32);
-    M4MP4W_STSZ_ALLOC_SIZE = nbVideoFrames * sizeof(M4OSA_UInt16);
-    M4MP4W_STSS_ALLOC_SIZE = nbVideoFrames * sizeof(
-        M4OSA_UInt32); /* very conservative (all images are intra) */
-
-    M4MP4W_CHUNK_ALLOC_NB = ( nbVideoFrames * 100) / averageVideoChunk + 1;
-
-    /* AUDIO */
-
-    nbAudioFrames = nbVideoFrames;
-    /* audio is 5 fps, which is the smallest framerate for video */
-
-    M4MP4W_STTS_AUDIO_ALLOC_SIZE = 100; /* compressed */
-    M4MP4W_STSZ_AUDIO_ALLOC_SIZE = 100; /* compressed */
-
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-
-    M4MP4W_CHUNK_AUDIO_ALLOC_NB = nbAudioFrames / 10 + 1;
-
-#else
-
-    M4MP4W_CHUNK_AUDIO_ALLOC_NB = nbAudioFrames / 38 + 1;
-
-#endif
-
-    return M4NO_ERROR;
-
-#else
-
-    /* VIDEO 5 min at 25 fps null-enc */
-
-    M4MP4W_STTS_ALLOC_SIZE = 20000;
-    M4MP4W_STSZ_ALLOC_SIZE = 18000;
-    M4MP4W_STSS_ALLOC_SIZE = 5000;
-    M4MP4W_CHUNK_ALLOC_NB = 500;
-
-    /* AUDIO 2 min aac+ null-enc */
-
-    M4MP4W_STTS_AUDIO_ALLOC_SIZE = 32000;
-    M4MP4W_STSZ_AUDIO_ALLOC_SIZE = 20000;
-    M4MP4W_CHUNK_AUDIO_ALLOC_NB = 1000;
-
-    return M4NO_ERROR;
-
-#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
-
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_openWrite(M4OSA_Context *contextPtr,
-                           void *outputFileDescriptor,
-                           M4OSA_FileWriterPointer *fileWriterFunction,
-                           void *tempFileDescriptor,
-                           M4OSA_FileReadPointer *fileReaderFunction )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = M4OSA_NULL;
-
-    ERR_CHECK(M4OSA_NULL != contextPtr, M4ERR_PARAMETER);
-    ERR_CHECK(M4OSA_NULL != outputFileDescriptor, M4ERR_PARAMETER);
-    ERR_CHECK(M4OSA_NULL != fileWriterFunction, M4ERR_PARAMETER);
-#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
-    /* Optional, feature won't be used if NULL */
-
-    M4OSA_TRACE2_1("tempFileDescriptor = %p", tempFileDescriptor);
-
-    if (M4OSA_NULL == tempFileDescriptor)
-    {
-        M4OSA_TRACE1_0(
-            "tempFileDescriptor is NULL, RESERVED_MOOV_DISK_SPACE feature not used");
-    }
-
-#else /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
-    /* Not used : ERR_CHECK(M4OSA_NULL != tempFileDescriptor, M4ERR_PARAMETER); */
-#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
-    /* Not used : ERR_CHECK(M4OSA_NULL != fileReaderFunction, M4ERR_PARAMETER); */
-
-    /* The context reuse mode was suppressed*/
-
-    mMp4FileDataPtr =
-        (M4MP4W_Mp4FileData *)M4OSA_32bitAlignedMalloc(sizeof(M4MP4W_Mp4FileData),
-        M4MP4_WRITER, (M4OSA_Char *)"MP4 writer context");
-    ERR_CHECK(mMp4FileDataPtr != M4OSA_NULL, M4ERR_ALLOC);
-    mMp4FileDataPtr->url = outputFileDescriptor;
-    mMp4FileDataPtr->audioTrackPtr = M4OSA_NULL;
-    mMp4FileDataPtr->videoTrackPtr = M4OSA_NULL;
-    mMp4FileDataPtr->MaxChunkSize = M4MP4W_DefaultMaxChunkSize; /*default  */
-    mMp4FileDataPtr->MaxAUSize = M4MP4W_DefaultMaxAuSize;       /*default  */
-    mMp4FileDataPtr->InterleaveDur =
-        M4MP4W_DefaultInterleaveDur; /*default = 0, i.e. not used*/
-    mMp4FileDataPtr->MaxFileSize = 0; /*default = 0, i.e. not used*/
-    mMp4FileDataPtr->camcoderVersion = 0; /*default is " 0.0.0"*/
-    mMp4FileDataPtr->embeddedString =
-        M4OSA_NULL; /*default is in BlockSignatureSkipDefaultEmbeddedString */
-    mMp4FileDataPtr->integrationTag = M4OSA_NULL; /*default is 0 */
-    mMp4FileDataPtr->MaxFileDuration = 0; /*default = 0, i.e. not used*/
-
-    mMp4FileDataPtr->fileWriterFunctions = fileWriterFunction;
-    mMp4FileDataPtr->hasAudio = M4OSA_FALSE;
-    mMp4FileDataPtr->hasVideo = M4OSA_FALSE;
-    mMp4FileDataPtr->state = M4MP4W_opened;
-    mMp4FileDataPtr->duration = 0; /*i*/
-    /*patch for integrationTag 174 -> 238 (+64)*/
-    mMp4FileDataPtr->filesize =
-        238; /*initialization with constant part in ftyp+mdat+moov+skip*/
-
-    mMp4FileDataPtr->estimateAudioSize = M4OSA_FALSE;
-    mMp4FileDataPtr->audioMsChunkDur =
-        0; /*set and used only when estimateAudioSize is true*/
-    mMp4FileDataPtr->audioMsStopTime =
-        0; /*set and used only when estimateAudioSize is true*/
-
-    mMp4FileDataPtr->fileWriterContext = M4OSA_NULL;
-    /* + CRLV6775 -H.264 trimming */
-    mMp4FileDataPtr->bMULPPSSPS = M4OSA_FALSE;
-    /* - CRLV6775 -H.264 trimming */
-
-#ifndef _M4MP4W_MOOV_FIRST
-
-    mMp4FileDataPtr->absoluteCurrentPos =
-        32; /*init with ftyp + beginning of mdat size*/
-
-#endif
-
-#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
-
-    mMp4FileDataPtr->safetyFileUrl = tempFileDescriptor;
-    mMp4FileDataPtr->cleanSafetyFile =
-        M4OSA_FALSE; /* No need to clean it just yet. */
-
-#endif               /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
-
-    /* ftyp atom */
-
-    memset((void *) &mMp4FileDataPtr->ftyp,0,
-        sizeof(mMp4FileDataPtr->ftyp));
-
-    *contextPtr = mMp4FileDataPtr;
-
-    M4MP4W_initializeAllocationParameters(mMp4FileDataPtr);
-
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_addStream(M4OSA_Context context,
-                           M4SYS_StreamDescription *streamDescPtr )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-
-    ERR_CHECK(M4OSA_NULL != context, M4ERR_PARAMETER);
-
-    ERR_CHECK(( mMp4FileDataPtr->state == M4MP4W_opened)
-        || (mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
-    mMp4FileDataPtr->state = M4MP4W_ready;
-
-    switch (streamDescPtr->streamType)
-    {
-        case M4SYS_kAMR:
-        case M4SYS_kAAC:
-        case M4SYS_kEVRC:
-            /*Audio*/
-            ERR_CHECK(streamDescPtr->streamID == AudioStreamID,
-                M4ERR_PARAMETER);
-
-            /*check if an audio track has already been added*/
-            ERR_CHECK(mMp4FileDataPtr->hasAudio == M4OSA_FALSE,
-                M4ERR_BAD_CONTEXT);
-
-            /*check if alloc need to be done*/
-            if (mMp4FileDataPtr->audioTrackPtr == M4OSA_NULL)
-            {
-                mMp4FileDataPtr->audioTrackPtr = (M4MP4W_AudioTrackData
-                    *)M4OSA_32bitAlignedMalloc(sizeof(M4MP4W_AudioTrackData),
-                    M4MP4_WRITER, (M4OSA_Char *)"M4MP4W_AudioTrackData");
-                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL,
-                    M4ERR_ALLOC);
-
-                /**
-                * We must init these pointers in case an alloc bellow fails */
-                mMp4FileDataPtr->audioTrackPtr->Chunk = M4OSA_NULL;
-                mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable = M4OSA_NULL;
-                mMp4FileDataPtr->audioTrackPtr->chunkSizeTable = M4OSA_NULL;
-                mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable = M4OSA_NULL;
-                mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable = M4OSA_NULL;
-                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS = M4OSA_NULL;
-                mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ = M4OSA_NULL;
-                mMp4FileDataPtr->audioTrackPtr->DSI = M4OSA_NULL;
-
-                /*now dynamic*/
-
-#ifdef _M4MP4W_MOOV_FIRST
-
-                mMp4FileDataPtr->audioTrackPtr->Chunk =
-                    (M4OSA_UChar ** )M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
-                    * sizeof(M4OSA_UChar *),
-                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->Chunk");
-                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk != M4OSA_NULL,
-                    M4ERR_ALLOC);
-
-#else
-
-                mMp4FileDataPtr->audioTrackPtr->Chunk =
-                    (M4OSA_UChar ** )M4OSA_32bitAlignedMalloc(sizeof(M4OSA_UChar *),
-                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->Chunk");
-                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk != M4OSA_NULL,
-                    M4ERR_ALLOC);
-                mMp4FileDataPtr->audioTrackPtr->Chunk[0] = M4OSA_NULL;
-
-                mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
-                    * sizeof(M4OSA_UInt32),
-                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkOffsetTable");
-                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable
-                    != M4OSA_NULL, M4ERR_ALLOC);
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_STTS_AUDIO_ALLOC_SIZE,
-                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->TABLE_STTS");
-                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS
-                    != M4OSA_NULL, M4ERR_ALLOC);
-                mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks = 1;
-
-                mMp4FileDataPtr->audioTrackPtr->chunkSizeTable =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
-                    * sizeof(M4OSA_UInt32),
-                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkSizeTable");
-                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSizeTable
-                    != M4OSA_NULL, M4ERR_ALLOC);
-                mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
-                    * sizeof(M4OSA_UInt32),
-                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkSampleNbTable");
-                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable
-                    != M4OSA_NULL, M4ERR_ALLOC);
-                mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
-                    * sizeof(M4OSA_UInt32),
-                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkTimeMsTable");
-                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable
-                    != M4OSA_NULL, M4ERR_ALLOC);
-
-                mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk = 0;
-            }
-            mMp4FileDataPtr->hasAudio = M4OSA_TRUE;
-            mMp4FileDataPtr->filesize += 402;
-            mMp4FileDataPtr->audioTrackPtr->MaxChunkSize =
-                mMp4FileDataPtr->MaxChunkSize; /* init value */
-            mMp4FileDataPtr->audioTrackPtr->MaxAUSize =
-                mMp4FileDataPtr->MaxAUSize;
-            mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS = 0;
-            mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb = 0;
-            mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize = 0;
-            mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb = 1;
-            mMp4FileDataPtr->audioTrackPtr->CommonData.timescale =
-                streamDescPtr->timeScale;
-            mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[0] = 0;     /*init*/
-            mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable[0] = 0; /*init*/
-            mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable[0] = 0;   /*init*/
-            mMp4FileDataPtr->audioTrackPtr->currentChunk =
-                0; /*1st chunk is Chunk[0]*/
-            mMp4FileDataPtr->audioTrackPtr->currentPos = 0;
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-            mMp4FileDataPtr->audioTrackPtr->currentStsc = 0;
-
-#endif
-
-            mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_ready;
-            mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks = 0;
-            mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ = M4OSA_NULL;
-
-            mMp4FileDataPtr->audioTrackPtr->avgBitrate =
-                streamDescPtr->averageBitrate;
-            mMp4FileDataPtr->audioTrackPtr->maxBitrate =
-                streamDescPtr->maxBitrate;
-
-            if (streamDescPtr->streamType == M4SYS_kAMR)
-            {
-
-                mMp4FileDataPtr->audioTrackPtr->CommonData.trackType =
-                    M4SYS_kAMR;
-                ERR_CHECK(streamDescPtr->timeScale == 8000, M4ERR_PARAMETER);
-                mMp4FileDataPtr->audioTrackPtr->sampleDuration =
-                    160; /*AMR8+timescale=8000 => sample duration 160 constant*/
-
-                /*Use given DSI if passed, else use default value*/
-                if (streamDescPtr->decoderSpecificInfoSize != 0)
-                {
-                    /*amr DSI is 9 bytes long !*/
-                    mMp4FileDataPtr->audioTrackPtr->dsiSize =
-                        9; /*always 9 for amr*/
-                    ERR_CHECK(streamDescPtr->decoderSpecificInfoSize == 9,
-                        M4ERR_PARAMETER);
-                    mMp4FileDataPtr->audioTrackPtr->DSI =
-                        (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(9, M4MP4_WRITER,
-                        (M4OSA_Char *)"audioTrackPtr->DSI");
-                    ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL,
-                        M4ERR_ALLOC);
-                    memcpy(
-                        (void *)mMp4FileDataPtr->audioTrackPtr->DSI,
-                        (void *)streamDescPtr->decoderSpecificInfo,
-                        9);
-                }
-                else
-                {
-                    mMp4FileDataPtr->audioTrackPtr->DSI =
-                        M4OSA_NULL; /*default static block will be used*/
-                    mMp4FileDataPtr->audioTrackPtr->dsiSize =
-                        0; /*but the actual static dsi is 9 bytes !*/
-                }
-            }
-            else if (streamDescPtr->streamType == M4SYS_kEVRC)
-            {
-
-                mMp4FileDataPtr->audioTrackPtr->CommonData.trackType =
-                    M4SYS_kEVRC;
-                ERR_CHECK(streamDescPtr->timeScale == 8000, M4ERR_PARAMETER);
-                mMp4FileDataPtr->audioTrackPtr->sampleDuration =
-                    160; /*EVRC+timescale=8000 => sample duration 160 constant*/
-
-                /*Use given DSI if passed, else use default value*/
-                if (streamDescPtr->decoderSpecificInfoSize != 0)
-                {
-                    /*evrc DSI is 6 bytes long !*/
-                    mMp4FileDataPtr->audioTrackPtr->dsiSize =
-                        6; /*always 6 for evrc*/
-                    ERR_CHECK(streamDescPtr->decoderSpecificInfoSize == 6,
-                        M4ERR_PARAMETER);
-                    mMp4FileDataPtr->audioTrackPtr->DSI =
-                        (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(6, M4MP4_WRITER,
-                        (M4OSA_Char *)"audioTrackPtr->DSI");
-                    ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL,
-                        M4ERR_ALLOC);
-                    memcpy(
-                        (void *)mMp4FileDataPtr->audioTrackPtr->DSI,
-                        (void *)streamDescPtr->decoderSpecificInfo,
-                        6);
-                }
-                else
-                {
-                    mMp4FileDataPtr->audioTrackPtr->DSI =
-                        M4OSA_NULL; /*default static block will be used*/
-                    mMp4FileDataPtr->audioTrackPtr->dsiSize =
-                        0; /*but the actual static dsi is 6 bytes !*/
-                }
-            }
-            else /*M4SYS_kAAC*/
-            {
-                /*avg bitrate should be set*/
-                ERR_CHECK(streamDescPtr->averageBitrate != -1, M4ERR_PARAMETER);
-                ERR_CHECK(streamDescPtr->maxBitrate != -1, M4ERR_PARAMETER);
-
-                mMp4FileDataPtr->audioTrackPtr->CommonData.trackType =
-                    M4SYS_kAAC;
-                mMp4FileDataPtr->audioTrackPtr->sampleDuration =
-                    0; /*don't know for aac, so set 0*/
-
-                mMp4FileDataPtr->audioTrackPtr->dsiSize =
-                    (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
-
-                if (mMp4FileDataPtr->audioTrackPtr->dsiSize != 0)
-                {
-                    mMp4FileDataPtr->audioTrackPtr->DSI =
-                        (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(
-                        streamDescPtr->decoderSpecificInfoSize,
-                        M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->DSI");
-                    ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL,
-                        M4ERR_ALLOC);
-                    memcpy(
-                        (void *)mMp4FileDataPtr->audioTrackPtr->DSI,
-                        (void *)streamDescPtr->decoderSpecificInfo,
-                        streamDescPtr->decoderSpecificInfoSize);
-                }
-                else
-                {
-                    /*no dsi: return bad parameter ?*/
-                    return M4ERR_PARAMETER;
-                }
-            }
-
-            break;
-
-        case (M4SYS_kMPEG_4):
-        case (M4SYS_kH264):
-        case (M4SYS_kH263):
-            /*Video*/
-            ERR_CHECK(streamDescPtr->streamID == VideoStreamID,
-                M4ERR_PARAMETER);
-
-            /*check if a video track has already been added*/
-            ERR_CHECK(mMp4FileDataPtr->hasVideo == M4OSA_FALSE,
-                M4ERR_BAD_CONTEXT);
-
-            /*check if alloc need to be done*/
-            if (mMp4FileDataPtr->videoTrackPtr == M4OSA_NULL)
-            {
-                mMp4FileDataPtr->videoTrackPtr = (M4MP4W_VideoTrackData
-                    *)M4OSA_32bitAlignedMalloc(sizeof(M4MP4W_VideoTrackData),
-                    M4MP4_WRITER, (M4OSA_Char *)"M4MP4W_VideoTrackData");
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL,
-                    M4ERR_ALLOC);
-
-                /**
-                * We must init these pointers in case an alloc bellow fails */
-                mMp4FileDataPtr->videoTrackPtr->Chunk = M4OSA_NULL;
-                mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable = M4OSA_NULL;
-                mMp4FileDataPtr->videoTrackPtr->chunkSizeTable = M4OSA_NULL;
-                mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable = M4OSA_NULL;
-                mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable = M4OSA_NULL;
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS = M4OSA_NULL;
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ = M4OSA_NULL;
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STSS = M4OSA_NULL;
-                mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
-
-                /*now dynamic*/
-
-#ifdef _M4MP4W_MOOV_FIRST
-
-                mMp4FileDataPtr->videoTrackPtr->Chunk =
-                    (M4OSA_UChar ** )M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_ALLOC_NB
-                    * sizeof(M4OSA_UChar *),
-                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->Chunk");
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
-                    M4ERR_ALLOC);
-
-#else
-                /*re-use the same chunk and flush it when full*/
-
-                mMp4FileDataPtr->videoTrackPtr->Chunk =
-                    (M4OSA_UChar ** )M4OSA_32bitAlignedMalloc(sizeof(M4OSA_UChar *),
-                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->Chunk");
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
-                    M4ERR_ALLOC);
-                mMp4FileDataPtr->videoTrackPtr->Chunk[0] = M4OSA_NULL;
-
-                mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_ALLOC_NB
-                    * sizeof(M4OSA_UInt32),
-                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkOffsetTable");
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable
-                    != M4OSA_NULL, M4ERR_ALLOC);
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
-                    M4ERR_ALLOC);
-                mMp4FileDataPtr->videoTrackPtr->chunkSizeTable =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_ALLOC_NB
-                    * sizeof(M4OSA_UInt32),
-                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkSizeTable");
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSizeTable
-                    != M4OSA_NULL, M4ERR_ALLOC);
-                mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_ALLOC_NB
-                    * sizeof(M4OSA_UInt32),
-                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkSampleNbTable");
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable
-                    != M4OSA_NULL, M4ERR_ALLOC);
-                mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable =
-                    (M4MP4W_Time32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_ALLOC_NB
-                    * sizeof(M4MP4W_Time32),
-                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkTimeMsTable");
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable
-                    != M4OSA_NULL, M4ERR_ALLOC);
-
-                mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk = 0;
-                /*tables are now dynamic*/
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_STTS_ALLOC_SIZE,
-                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STTS");
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS
-                    != M4OSA_NULL, M4ERR_ALLOC);
-                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks = 1;
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ =
-                    (M4OSA_UInt16 *)M4OSA_32bitAlignedMalloc(M4MP4W_STSZ_ALLOC_SIZE,
-                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STSZ");
-
-#else
-
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_STSZ_ALLOC_SIZE,
-                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STSZ");
-
-#endif
-
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ
-                    != M4OSA_NULL, M4ERR_ALLOC);
-                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks = 1;
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STSS =
-                    (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_STSS_ALLOC_SIZE,
-                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STSS");
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS
-                    != M4OSA_NULL, M4ERR_ALLOC);
-                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks = 1;
-            }
-            mMp4FileDataPtr->hasVideo = M4OSA_TRUE;
-            mMp4FileDataPtr->filesize += 462;
-            mMp4FileDataPtr->videoTrackPtr->width = M4MP4W_DefaultWidth;
-            mMp4FileDataPtr->videoTrackPtr->height = M4MP4W_DefaultHeight;
-            mMp4FileDataPtr->videoTrackPtr->MaxAUSize =
-                mMp4FileDataPtr->MaxAUSize;
-            mMp4FileDataPtr->videoTrackPtr->CommonData.trackType =
-                streamDescPtr->streamType;
-            mMp4FileDataPtr->videoTrackPtr->MaxChunkSize =
-                mMp4FileDataPtr->MaxChunkSize; /* init value */
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-            mMp4FileDataPtr->videoTrackPtr->MaxAUperChunk =
-                M4MP4W_VIDEO_MAX_AU_PER_CHUNK;
-
-#endif
-
-            ERR_CHECK(streamDescPtr->timeScale == 1000, M4ERR_PARAMETER);
-            mMp4FileDataPtr->videoTrackPtr->CommonData.timescale = 1000;
-            mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS = 0;
-            mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb = 0;
-            mMp4FileDataPtr->videoTrackPtr->CommonData.sampleSize = 0;
-            mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb = 1;
-            mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[0] = 0;     /*init*/
-            mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable[0] = 0; /*init*/
-            mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable[0] = 0;   /*init*/
-            mMp4FileDataPtr->videoTrackPtr->currentChunk =
-                0; /*1st chunk is Chunk[0]*/
-            mMp4FileDataPtr->videoTrackPtr->currentPos = 0;
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-            mMp4FileDataPtr->videoTrackPtr->currentStsc = 0;
-
-#endif
-
-            mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb = 0;
-            mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_ready;
-
-            if (streamDescPtr->streamType == M4SYS_kH263)
-            {
-                if (( streamDescPtr->averageBitrate == -1)
-                    || (streamDescPtr->maxBitrate == -1))
-                {
-                    /*the bitrate will not be written if the bitrate information
-                     is not fully set */
-                    mMp4FileDataPtr->videoTrackPtr->avgBitrate = -1;
-                    mMp4FileDataPtr->videoTrackPtr->maxBitrate = -1;
-                }
-                else
-                {
-                    /*proprietary storage of h263 bitrate.
-                     Warning: not the actual bitrate (bit set to 1).*/
-                    mMp4FileDataPtr->videoTrackPtr->avgBitrate =
-                        streamDescPtr->averageBitrate;
-                    mMp4FileDataPtr->videoTrackPtr->maxBitrate =
-                        streamDescPtr->maxBitrate;
-                }
-
-                if (( 0 != streamDescPtr->decoderSpecificInfoSize)
-                    && (M4OSA_NULL != streamDescPtr->decoderSpecificInfo))
-                {
-                    /*decoder specific info size is supposed to be always 7 bytes long */
-                    ERR_CHECK(streamDescPtr->decoderSpecificInfoSize == 7,
-                        M4ERR_PARAMETER);
-                    mMp4FileDataPtr->videoTrackPtr->dsiSize =
-                        (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
-                    mMp4FileDataPtr->videoTrackPtr->DSI =
-                        (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(
-                        streamDescPtr->decoderSpecificInfoSize,
-                        M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
-                    ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI != M4OSA_NULL,
-                        M4ERR_ALLOC);
-                    memcpy(
-                        (void *)mMp4FileDataPtr->videoTrackPtr->DSI,
-                        (void *)streamDescPtr->decoderSpecificInfo,
-                        streamDescPtr->decoderSpecificInfoSize);
-                }
-                else
-                {
-                    /*use the default dsi*/
-                    mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
-                    mMp4FileDataPtr->videoTrackPtr->dsiSize = 0;
-                }
-            }
-
-            if (streamDescPtr->streamType == M4SYS_kMPEG_4)
-            {
-                mMp4FileDataPtr->filesize += 22; /*extra bytes (from h263)*/
-                /* allow DSI to be M4OSA_NULL, in which case the actual DSI will be
-                 set by setOption. */
-                if (( 0 == streamDescPtr->decoderSpecificInfoSize)
-                    || (M4OSA_NULL == streamDescPtr->decoderSpecificInfo))
-                {
-                    mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
-                    mMp4FileDataPtr->videoTrackPtr->dsiSize = 0;
-                }
-                else
-                {
-                    /*MP4V specific*/
-                    /*decoder specific info size is supposed to be always <
-                        105 so that ESD size can be coded with 1 byte*/
-                    /*(this should not be restrictive because dsi is always shorter !)*/
-                    ERR_CHECK(streamDescPtr->decoderSpecificInfoSize < 105,
-                        M4ERR_PARAMETER);
-                    mMp4FileDataPtr->videoTrackPtr->dsiSize =
-                        (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
-                    mMp4FileDataPtr->videoTrackPtr->DSI =
-                        (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(
-                        streamDescPtr->decoderSpecificInfoSize,
-                        M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
-                    ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI != M4OSA_NULL,
-                        M4ERR_ALLOC);
-                    memcpy(
-                        (void *)mMp4FileDataPtr->videoTrackPtr->DSI,
-                        (void *)streamDescPtr->decoderSpecificInfo,
-                        streamDescPtr->decoderSpecificInfoSize);
-                    mMp4FileDataPtr->filesize +=
-                        streamDescPtr->decoderSpecificInfoSize;
-                }
-                /*avg bitrate should be set*/
-                ERR_CHECK(streamDescPtr->averageBitrate != -1, M4ERR_PARAMETER);
-                mMp4FileDataPtr->videoTrackPtr->avgBitrate =
-                    streamDescPtr->averageBitrate;
-                mMp4FileDataPtr->videoTrackPtr->maxBitrate =
-                    streamDescPtr->averageBitrate;
-            }
-
-            if (streamDescPtr->streamType == M4SYS_kH264)
-            {
-                /* H264 specific information */
-                mMp4FileDataPtr->videoTrackPtr->avgBitrate =
-                    streamDescPtr->averageBitrate;
-                mMp4FileDataPtr->videoTrackPtr->maxBitrate =
-                    streamDescPtr->maxBitrate;
-
-                if ((0 != streamDescPtr->decoderSpecificInfoSize)
-                    && (M4OSA_NULL != streamDescPtr->decoderSpecificInfo))
-                {
-                    /* + H.264 trimming */
-                    if (M4OSA_TRUE == mMp4FileDataPtr->bMULPPSSPS)
-                    {
-                        M4OSA_UInt16 SPSLength, PPSLength;
-                        M4OSA_UInt16 *DSI;
-                        /* Store the DSI size */
-                        mMp4FileDataPtr->videoTrackPtr->dsiSize =
-                            (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize
-                            - 24;
-
-                        /* Copy the DSI (SPS + PPS) */
-                        mMp4FileDataPtr->videoTrackPtr->DSI =
-                            (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(
-                            streamDescPtr->decoderSpecificInfoSize,
-                            M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
-                        ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
-                            != M4OSA_NULL, M4ERR_ALLOC);
-
-                        DSI =
-                            (M4OSA_UInt16 *)streamDescPtr->decoderSpecificInfo;
-                        SPSLength = DSI[6];
-                        PPSLength = DSI[10];
-                        memcpy(
-                            (void *)mMp4FileDataPtr->videoTrackPtr->DSI,
-                            (void *)((streamDescPtr->
-                            decoderSpecificInfo)+12), 2);
-                        memcpy(
-                            (void *)((mMp4FileDataPtr->videoTrackPtr->
-                            DSI)+2), (void *)((streamDescPtr->
-                            decoderSpecificInfo)+28), SPSLength);
-
-                        memcpy(
-                            (void *)((mMp4FileDataPtr->videoTrackPtr->
-                            DSI)+2 + SPSLength),
-                            (void *)((streamDescPtr->
-                            decoderSpecificInfo)+20), 2);
-                        memcpy(
-                            (void *)((mMp4FileDataPtr->videoTrackPtr->
-                            DSI)+4 + SPSLength),
-                            (void *)((streamDescPtr->
-                            decoderSpecificInfo)+28 + SPSLength),
-                            PPSLength);
-                        /* - H.264 trimming */
-                    }
-                    else
-                    {
-                        /* Store the DSI size */
-                        mMp4FileDataPtr->videoTrackPtr->dsiSize =
-                            (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
-
-                        /* Copy the DSI (SPS + PPS) */
-                        mMp4FileDataPtr->videoTrackPtr->DSI =
-                            (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(
-                            streamDescPtr->decoderSpecificInfoSize,
-                            M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
-                        ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
-                            != M4OSA_NULL, M4ERR_ALLOC);
-                        memcpy(
-                            (void *)mMp4FileDataPtr->videoTrackPtr->DSI,
-                            (void *)streamDescPtr->
-                            decoderSpecificInfo,
-                            streamDescPtr->decoderSpecificInfoSize);
-                    }
-                }
-                else
-                {
-                    /*use the default dsi*/
-                    mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
-                    mMp4FileDataPtr->videoTrackPtr->dsiSize = 0;
-                }
-            }
-            break;
-
-        default:
-            err = M4ERR_PARAMETER;
-    }
-
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_startWriting( M4OSA_Context context )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt32 fileModeAccess = M4OSA_kFileWrite | M4OSA_kFileCreate;
-    M4OSA_UInt32 i;
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
-
-    ERR_CHECK((mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
-    mMp4FileDataPtr->state = M4MP4W_writing;
-
-    /*audio microstate */
-    /*    if (mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL)*/
-    if (mMp4FileDataPtr->hasAudio)
-    {
-        ERR_CHECK((mMp4FileDataPtr->audioTrackPtr->microState == M4MP4W_ready),
-            M4ERR_STATE);
-        mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_writing;
-
-        /* First audio chunk allocation */
-        mMp4FileDataPtr->audioTrackPtr->Chunk[0] = (M4OSA_UChar
-            *)M4OSA_32bitAlignedMalloc(mMp4FileDataPtr->audioTrackPtr->MaxChunkSize,
-            M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->Chunk[0]");
-        ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk[0] != M4OSA_NULL,
-            M4ERR_ALLOC);
-    }
-
-    /*video microstate*/
-    /*    if (mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL)*/
-    if (mMp4FileDataPtr->hasVideo)
-    {
-        ERR_CHECK((mMp4FileDataPtr->videoTrackPtr->microState == M4MP4W_ready),
-            M4ERR_STATE);
-        mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_writing;
-
-        /* First video chunk allocation */
-        mMp4FileDataPtr->videoTrackPtr->Chunk[0] = (M4OSA_UChar
-            *)M4OSA_32bitAlignedMalloc(mMp4FileDataPtr->videoTrackPtr->MaxChunkSize,
-            M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->Chunk[0]");
-        ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk[0] != M4OSA_NULL,
-            M4ERR_ALLOC);
-    }
-
-    if (mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
-    {
-        /*set audioMsChunkDur (duration in ms before a new chunk is created)
-         for audio size estimation*/
-        ERR_CHECK(mMp4FileDataPtr->hasVideo, M4ERR_BAD_CONTEXT);
-        ERR_CHECK(mMp4FileDataPtr->hasAudio, M4ERR_BAD_CONTEXT);
-
-        mMp4FileDataPtr->audioMsChunkDur =
-            20 * mMp4FileDataPtr->audioTrackPtr->MaxChunkSize
-            / mMp4FileDataPtr->audioTrackPtr->MaxAUSize;
-
-        if (( mMp4FileDataPtr->InterleaveDur != 0)
-            && (mMp4FileDataPtr->InterleaveDur
-            < 20 *mMp4FileDataPtr->audioTrackPtr->MaxChunkSize
-            / mMp4FileDataPtr->audioTrackPtr->MaxAUSize))
-        {
-            mMp4FileDataPtr->audioMsChunkDur = mMp4FileDataPtr->InterleaveDur;
-        }
-    }
-
-#ifndef _M4MP4W_MOOV_FIRST
-
-    /*open file in write binary mode*/
-
-    err = mMp4FileDataPtr->fileWriterFunctions->openWrite(
-        &mMp4FileDataPtr->fileWriterContext,
-        mMp4FileDataPtr->url, fileModeAccess);
-    ERR_CHECK((M4NO_ERROR == err), err);
-
-    /*ftyp atom*/
-    if (mMp4FileDataPtr->ftyp.major_brand != 0)
-    {
-        /* Put customized ftyp box */
-        err =
-            M4MP4W_putBE32(16 + (mMp4FileDataPtr->ftyp.nbCompatibleBrands * 4),
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext);
-        ERR_CHECK((M4NO_ERROR == err), err);
-        err = M4MP4W_putBE32(M4MPAC_FTYP_TAG,
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext);
-        ERR_CHECK((M4NO_ERROR == err), err);
-        err = M4MP4W_putBE32(mMp4FileDataPtr->ftyp.major_brand,
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext);
-        ERR_CHECK((M4NO_ERROR == err), err);
-        err = M4MP4W_putBE32(mMp4FileDataPtr->ftyp.minor_version,
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext);
-        ERR_CHECK((M4NO_ERROR == err), err);
-
-        for ( i = 0; i < mMp4FileDataPtr->ftyp.nbCompatibleBrands; i++ )
-        {
-            err = M4MP4W_putBE32(mMp4FileDataPtr->ftyp.compatible_brands[i],
-                mMp4FileDataPtr->fileWriterFunctions,
-                mMp4FileDataPtr->fileWriterContext);
-            ERR_CHECK((M4NO_ERROR == err), err);
-        }
-    }
-    else
-    {
-        /* Put default ftyp box */
-        err = M4MP4W_putBlock(Default_ftyp, sizeof(Default_ftyp),
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext);
-        ERR_CHECK((M4NO_ERROR == err), err);
-    }
-
-    /*init mdat value with 0 but the right value is set just before the file is closed*/
-    err = M4MP4W_putBE32(0, mMp4FileDataPtr->fileWriterFunctions,
-        mMp4FileDataPtr->fileWriterContext);
-    ERR_CHECK((M4NO_ERROR == err), err);
-    err = M4MP4W_putBlock(CommonBlock2, sizeof(CommonBlock2),
-        mMp4FileDataPtr->fileWriterFunctions,
-        mMp4FileDataPtr->fileWriterContext);
-    ERR_CHECK((M4NO_ERROR == err), err);
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
-
-    if (0 != mMp4FileDataPtr->MaxFileSize
-        && M4OSA_NULL != mMp4FileDataPtr->safetyFileUrl)
-    {
-        M4OSA_ERR err2 = M4NO_ERROR;
-        M4OSA_Context safetyFileContext = M4OSA_NULL;
-        M4OSA_UInt32 safetyFileSize = 0, addendum = 0;
-        M4OSA_UChar dummyData[100]; /* To fill the safety file with */
-
-        err =
-            mMp4FileDataPtr->fileWriterFunctions->openWrite(&safetyFileContext,
-            mMp4FileDataPtr->safetyFileUrl, fileModeAccess);
-        ERR_CHECK((M4NO_ERROR == err), err);
-
-        mMp4FileDataPtr->cleanSafetyFile = M4OSA_TRUE;
-
-        /* 10% seems to be a reasonable worst case, but also provision for 1kb of moov overhead.*/
-        safetyFileSize = 1000 + (mMp4FileDataPtr->MaxFileSize * 10 + 99) / 100;
-
-        /* Here we add space to take into account the fact we have to flush any pending
-        chunk in closeWrite, this space is the sum of the maximum chunk sizes, for each
-        track. */
-
-#ifndef _M4MP4W_UNBUFFERED_VIDEO
-
-        if (mMp4FileDataPtr->hasVideo)
-        {
-            safetyFileSize += mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
-        }
-
-#endif
-
-        if (mMp4FileDataPtr->hasAudio)
-        {
-            safetyFileSize += mMp4FileDataPtr->audioTrackPtr->MaxChunkSize;
-        }
-
-        memset((void *)dummyData, 0xCA,sizeof(dummyData)); /* For extra safety. */
-
-        for ( i = 0;
-            i < (safetyFileSize + sizeof(dummyData) - 1) / sizeof(dummyData);
-            i++ )
-        {
-            err = mMp4FileDataPtr->fileWriterFunctions->writeData(
-                safetyFileContext, dummyData, sizeof(dummyData));
-
-            if (M4NO_ERROR != err)
-                break;
-            /* Don't return from the function yet, as we need to close the file first. */
-        }
-
-        /* I don't need to keep it open. */
-        err2 =
-            mMp4FileDataPtr->fileWriterFunctions->closeWrite(safetyFileContext);
-
-        if (M4NO_ERROR != err)
-        {
-            return err;
-        }
-        else
-            ERR_CHECK((M4NO_ERROR == err2), err2);
-
-        M4OSA_TRACE1_0("Safety file correctly created");
-    }
-#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
-
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_newAudioChunk( M4OSA_Context context,
-                               M4OSA_UInt32 *leftSpaceInChunk )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-    M4OSA_Double scale_audio;
-
-#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-    M4OSA_UInt32 reallocNb;
-
-#endif
-
-    /* video only */
-
-    if (mMp4FileDataPtr->audioTrackPtr == M4OSA_NULL)
-        return M4NO_ERROR;
-
-    M4OSA_TRACE1_0(" M4MP4W_newAudioChunk - flush audio");
-    M4OSA_TRACE1_2("current chunk = %d  offset = 0x%x",
-        mMp4FileDataPtr->audioTrackPtr->currentChunk,
-        mMp4FileDataPtr->absoluteCurrentPos);
-
-    scale_audio = 1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
-
-#ifndef _M4MP4W_MOOV_FIRST
-    /*flush chunk*/
-
-    err = M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[0],
-        mMp4FileDataPtr->audioTrackPtr->currentPos,
-        mMp4FileDataPtr->fileWriterFunctions,
-        mMp4FileDataPtr->fileWriterContext);
-
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_FilePosition temp = mMp4FileDataPtr->absoluteCurrentPos;
-        M4OSA_TRACE2_1(
-            "M4MP4W_newAudioChunk: putBlock error when flushing chunk: %#X",
-            err);
-        /* Ouch, we got an error writing to the file, but we need to properly react so that the
-         state is still consistent and we can properly close the file so that what has been
-         recorded so far is not lost. Yay error recovery! */
-
-        /* First, we do not know where we are in the file. Put us back at where we were before
-        attempting to write the data. That way, we're consistent with the chunk state data. */
-        err = mMp4FileDataPtr->fileWriterFunctions->seek(
-            mMp4FileDataPtr->fileWriterContext,
-            M4OSA_kFileSeekBeginning, &temp);
-
-        M4OSA_TRACE2_3(
-            "Backtracking to position 0x%08X, seek returned %d and position %08X",
-            mMp4FileDataPtr->absoluteCurrentPos, err, temp);
-
-        /* Then, do not update any info whatsoever in the writing state. This will have the
-         consequence that it will be as if the chunk has not been flushed yet, and therefore
-         it will be done as part of closeWrite (where there could be room to do so,
-         if some emergency room is freed for that purpose). */
-
-        /* And lastly (for here), return that we've reached the limit of available space. */
-
-        return M4WAR_MP4W_OVERSIZE;
-    }
-
-    /*update chunk offset*/
-    mMp4FileDataPtr->audioTrackPtr->
-        chunkOffsetTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
-        mMp4FileDataPtr->absoluteCurrentPos;
-
-    /*add chunk size to absoluteCurrentPos*/
-    mMp4FileDataPtr->absoluteCurrentPos +=
-        mMp4FileDataPtr->audioTrackPtr->currentPos;
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-    /*update chunk info */
-
-    mMp4FileDataPtr->audioTrackPtr->
-        chunkSizeTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
-        mMp4FileDataPtr->audioTrackPtr->currentPos;
-    mMp4FileDataPtr->audioTrackPtr->
-        chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
-        mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS;
-
-    mMp4FileDataPtr->audioTrackPtr->currentChunk += 1;
-    /*if audio amount of data is not estimated*/
-    if (mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
-        mMp4FileDataPtr->filesize += 16;
-
-    /*alloc new chunk*/
-    /*only if not already allocated*/
-    if (mMp4FileDataPtr->audioTrackPtr->currentChunk
-            > mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk)
-    {
-        /*update LastAllocatedChunk ( -> = currentChunk)*/
-        mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk += 1;
-
-        /*max nb of chunk is now dynamic*/
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-        if (mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk
-            + 3 > M4MP4W_CHUNK_AUDIO_ALLOC_NB)
-        {
-            M4OSA_TRACE1_0("M4MP4W_newAudioChunk : audio chunk table is full");
-            return M4WAR_MP4W_OVERSIZE;
-        }
-
-#else
-
-        if (((mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk)
-            % M4MP4W_CHUNK_AUDIO_ALLOC_NB) == 0)
-        {
-            reallocNb = mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk
-                + M4MP4W_CHUNK_AUDIO_ALLOC_NB;
-
-#ifdef _M4MP4W_MOOV_FIRST
-
-            mMp4FileDataPtr->audioTrackPtr->Chunk =
-                (M4OSA_UChar ** )M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->Chunk,
-                ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
-                * sizeof(M4OSA_UChar *),
-                reallocNb * sizeof(M4OSA_UChar *));
-            ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk != M4OSA_NULL,
-                M4ERR_ALLOC);
-
-#else
-
-            mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable =
-                (M4OSA_UInt32 *)M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
-                chunkOffsetTable,
-                ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
-                * sizeof(M4OSA_UInt32),
-                reallocNb * sizeof(M4OSA_UInt32));
-            ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable
-                != M4OSA_NULL, M4ERR_ALLOC);
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-            mMp4FileDataPtr->audioTrackPtr->chunkSizeTable =
-                (M4OSA_UInt32 *)M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
-                chunkSizeTable,
-                ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
-                * sizeof(M4OSA_UInt32),
-                reallocNb * sizeof(M4OSA_UInt32));
-            ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSizeTable
-                != M4OSA_NULL, M4ERR_ALLOC);
-
-            mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable =
-                (M4OSA_UInt32 *)M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
-                chunkSampleNbTable,
-                ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
-                * sizeof(M4OSA_UInt32),
-                reallocNb * sizeof(M4OSA_UInt32));
-            ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable
-                != M4OSA_NULL, M4ERR_ALLOC);
-
-            mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable =
-                (M4MP4W_Time32 *)M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
-                chunkTimeMsTable,
-                ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
-                * sizeof(M4MP4W_Time32),
-                reallocNb * sizeof(M4MP4W_Time32));
-            ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable
-                != M4OSA_NULL, M4ERR_ALLOC);
-        }
-#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
-
-#ifdef _M4MP4W_MOOV_FIRST
-
-        mMp4FileDataPtr->audioTrackPtr->
-            Chunk[mMp4FileDataPtr->audioTrackPtr->currentChunk] = (M4OSA_UChar
-            *)M4OSA_32bitAlignedMalloc(mMp4FileDataPtr->audioTrackPtr->MaxChunkSize,
-            M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->currentChunk");
-        ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->
-            Chunk[mMp4FileDataPtr->audioTrackPtr->currentChunk]
-        != M4OSA_NULL, M4ERR_ALLOC);
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-    }
-
-    /*update leftSpaceInChunk, currentPos and currentChunkDur*/
-    *leftSpaceInChunk = mMp4FileDataPtr->audioTrackPtr->MaxChunkSize;
-    mMp4FileDataPtr->audioTrackPtr->currentPos = 0;
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-    /* check wether to use a new stsc or not */
-
-    if (mMp4FileDataPtr->audioTrackPtr->currentStsc > 0)
-    {
-        if (( mMp4FileDataPtr->audioTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->
-            currentStsc] & 0xFFF) != (mMp4FileDataPtr->audioTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentStsc
-            - 1] & 0xFFF))
-            mMp4FileDataPtr->audioTrackPtr->currentStsc += 1;
-    }
-    else
-        mMp4FileDataPtr->audioTrackPtr->currentStsc += 1;
-
-    /* max nb of chunk is now dynamic */
-    if (mMp4FileDataPtr->audioTrackPtr->currentStsc
-        + 3 > M4MP4W_CHUNK_AUDIO_ALLOC_NB)
-    {
-        M4OSA_TRACE1_0("M4MP4W_newAudioChunk : audio stsc table is full");
-        return M4WAR_MP4W_OVERSIZE;
-    }
-
-    /* set nb of samples in the new chunk to 0 */
-    mMp4FileDataPtr->audioTrackPtr->
-        chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentStsc] =
-        0 + (mMp4FileDataPtr->audioTrackPtr->currentChunk << 12);
-
-#else
-    /*set nb of samples in the new chunk to 0*/
-
-    mMp4FileDataPtr->audioTrackPtr->
-        chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] = 0;
-
-#endif
-
-    /*set time of the new chunk to lastCTS (for initialization, but updated further to the
-    CTS of the last sample in the chunk)*/
-
-    mMp4FileDataPtr->audioTrackPtr->
-        chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
-        (M4OSA_UInt32)(mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS
-        * scale_audio);
-
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_newVideoChunk( M4OSA_Context context,
-                               M4OSA_UInt32 *leftSpaceInChunk )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-    M4OSA_Double scale_video;
-
-#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-    M4OSA_UInt32 reallocNb;
-
-#endif
-
-    /* audio only */
-
-    if (mMp4FileDataPtr->videoTrackPtr == M4OSA_NULL)
-        return M4NO_ERROR;
-
-    M4OSA_TRACE1_0("M4MP4W_newVideoChunk - flush video");
-    M4OSA_TRACE1_2("current chunk = %d  offset = 0x%x",
-        mMp4FileDataPtr->videoTrackPtr->currentChunk,
-        mMp4FileDataPtr->absoluteCurrentPos);
-
-    scale_video = 1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
-
-#ifndef _M4MP4W_MOOV_FIRST
-
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-    /* samples are already written to file */
-#else
-    /*flush chunk*/
-
-    err = M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[0],
-        mMp4FileDataPtr->videoTrackPtr->currentPos,
-        mMp4FileDataPtr->fileWriterFunctions,
-        mMp4FileDataPtr->fileWriterContext);
-
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_FilePosition temp = mMp4FileDataPtr->absoluteCurrentPos;
-        M4OSA_TRACE2_1(
-            "M4MP4W_newVideoChunk: putBlock error when flushing chunk: %#X",
-            err);
-        /* Ouch, we got an error writing to the file, but we need to properly react so that the
-         state is still consistent and we can properly close the file so that what has been
-         recorded so far is not lost. Yay error recovery! */
-
-        /* First, we do not know where we are in the file. Put us back at where we were before
-        attempting to write the data. That way, we're consistent with the chunk state data. */
-        err = mMp4FileDataPtr->fileWriterFunctions->seek(
-            mMp4FileDataPtr->fileWriterContext,
-            M4OSA_kFileSeekBeginning, &temp);
-
-        M4OSA_TRACE2_3(
-            "Backtracking to position 0x%08X, seek returned %d and position %08X",
-            mMp4FileDataPtr->absoluteCurrentPos, err, temp);
-        /* Then, do not update any info whatsoever in the writing state. This will have the
-         consequence that it will be as if the chunk has not been flushed yet, and therefore it
-         will be done as part of closeWrite (where there could be room to do so, if some
-         emergency room is freed for that purpose). */
-
-        /* And lastly (for here), return that we've reached the limit of available space.
-         We don't care about the error originally returned by putBlock. */
-
-        return M4WAR_MP4W_OVERSIZE;
-    }
-
-#endif
-
-    /*update chunk offset*/
-
-    mMp4FileDataPtr->videoTrackPtr->
-        chunkOffsetTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
-        mMp4FileDataPtr->absoluteCurrentPos;
-
-    /*add chunk size to absoluteCurrentPos*/
-    mMp4FileDataPtr->absoluteCurrentPos +=
-        mMp4FileDataPtr->videoTrackPtr->currentPos;
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-    /*update chunk info before to go for a new one*/
-
-    mMp4FileDataPtr->videoTrackPtr->
-        chunkSizeTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
-        mMp4FileDataPtr->videoTrackPtr->currentPos;
-    mMp4FileDataPtr->videoTrackPtr->
-        chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
-        (M4OSA_UInt32)(mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
-        * scale_video);
-
-    mMp4FileDataPtr->videoTrackPtr->currentChunk += 1;
-    mMp4FileDataPtr->filesize += 16;
-
-    /*alloc new chunk*/
-    /*only if not already allocated*/
-    if (mMp4FileDataPtr->videoTrackPtr->currentChunk
-        > mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk)
-    {
-        /*update LastAllocatedChunk ( -> = currentChunk)*/
-        mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk += 1;
-
-        /*max nb of chunk is now dynamic*/
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-        if ( mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk
-            + 3 > M4MP4W_CHUNK_ALLOC_NB)
-        {
-            M4OSA_TRACE1_0("M4MP4W_newVideoChunk : video chunk table is full");
-            return M4WAR_MP4W_OVERSIZE;
-        }
-
-#else
-
-        if (((mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk)
-            % M4MP4W_CHUNK_ALLOC_NB) == 0)
-        {
-            reallocNb = mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk
-                + M4MP4W_CHUNK_ALLOC_NB;
-
-#ifdef _M4MP4W_MOOV_FIRST
-
-            mMp4FileDataPtr->videoTrackPtr->Chunk =
-                (M4OSA_UChar ** )M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->Chunk,
-                ( reallocNb
-                - M4MP4W_CHUNK_ALLOC_NB) * sizeof(M4OSA_UChar *),
-                reallocNb * sizeof(M4OSA_UChar *));
-            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
-                M4ERR_ALLOC);
-
-#else
-
-            mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable =
-                (M4OSA_UInt32 *)M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
-                chunkOffsetTable, ( reallocNb - M4MP4W_CHUNK_ALLOC_NB)
-                * sizeof(M4OSA_UInt32),
-                reallocNb * sizeof(M4OSA_UInt32));
-            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable
-                != M4OSA_NULL, M4ERR_ALLOC);
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-            mMp4FileDataPtr->videoTrackPtr->chunkSizeTable =
-                (M4OSA_UInt32 *)M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
-                chunkSizeTable, ( reallocNb - M4MP4W_CHUNK_ALLOC_NB)
-                * sizeof(M4OSA_UInt32),
-                reallocNb * sizeof(M4OSA_UInt32));
-            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSizeTable
-                != M4OSA_NULL, M4ERR_ALLOC);
-
-            mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable =
-                (M4OSA_UInt32 *)M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
-                chunkSampleNbTable, ( reallocNb - M4MP4W_CHUNK_ALLOC_NB)
-                * sizeof(M4OSA_UInt32),
-                reallocNb * sizeof(M4OSA_UInt32));
-            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable
-                != M4OSA_NULL, M4ERR_ALLOC);
-
-            mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable =
-                (M4MP4W_Time32 *)M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
-                chunkTimeMsTable, ( reallocNb
-                - M4MP4W_CHUNK_ALLOC_NB) * sizeof(M4MP4W_Time32),
-                reallocNb * sizeof(M4MP4W_Time32));
-            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable
-                != M4OSA_NULL, M4ERR_ALLOC);
-        }
-#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
-
-#ifdef _M4MP4W_MOOV_FIRST
-
-        mMp4FileDataPtr->videoTrackPtr->
-            Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk] = (M4OSA_UChar
-            *)M4OSA_32bitAlignedMalloc(mMp4FileDataPtr->videoTrackPtr->MaxChunkSize,
-            M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->MaxChunkSize");
-        ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->
-            Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk]
-        != M4OSA_NULL, M4ERR_ALLOC);
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-    }
-
-    /*update leftSpaceInChunk, currentPos and currentChunkDur*/
-    *leftSpaceInChunk = mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
-    mMp4FileDataPtr->videoTrackPtr->currentPos = 0;
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-    /* check wether to use a new stsc or not */
-
-    if (mMp4FileDataPtr->videoTrackPtr->currentStsc > 0)
-    {
-        if ((mMp4FileDataPtr->videoTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->
-            currentStsc] & 0xFFF) != (mMp4FileDataPtr->videoTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentStsc
-            - 1] & 0xFFF))
-            mMp4FileDataPtr->videoTrackPtr->currentStsc += 1;
-    }
-    else
-        mMp4FileDataPtr->videoTrackPtr->currentStsc += 1;
-
-    /* max nb of chunk is now dynamic */
-    if (mMp4FileDataPtr->videoTrackPtr->currentStsc
-        + 3 > M4MP4W_CHUNK_ALLOC_NB)
-    {
-        M4OSA_TRACE1_0("M4MP4W_newVideoChunk : video stsc table is full");
-        return M4WAR_MP4W_OVERSIZE;
-    }
-
-    /* set nb of samples in the new chunk to 0 */
-    mMp4FileDataPtr->videoTrackPtr->
-        chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentStsc] =
-        0 + (mMp4FileDataPtr->videoTrackPtr->currentChunk << 12);
-
-#else
-    /*set nb of samples in the new chunk to 0*/
-
-    mMp4FileDataPtr->videoTrackPtr->
-        chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] = 0;
-
-#endif
-
-    /*set time of the new chunk to lastCTS (for initialization, but updated further to the
-    CTS of the last sample in the chunk)*/
-
-    mMp4FileDataPtr->videoTrackPtr->
-        chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
-        (M4OSA_UInt32)(mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
-        * scale_video);
-
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_startAU( M4OSA_Context context, M4SYS_StreamID streamID,
-                         M4SYS_AccessUnit *auPtr )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-
-    M4OSA_UInt32 leftSpaceInChunk;
-    M4MP4W_Time32 chunkDurMs;
-
-    M4OSA_Double scale_audio;
-    M4OSA_Double scale_video;
-
-    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
-    ERR_CHECK(auPtr != M4OSA_NULL, M4ERR_PARAMETER);
-
-    M4OSA_TRACE2_0("----- M4MP4W_startAU -----");
-
-    /*check macro state*/
-    ERR_CHECK((mMp4FileDataPtr->state == M4MP4W_writing), M4ERR_STATE);
-
-    if (streamID == AudioStreamID) /*audio stream*/
-    {
-        M4OSA_TRACE2_0("M4MP4W_startAU -> audio");
-
-        scale_audio =
-            1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
-
-        /*audio microstate*/
-        ERR_CHECK((mMp4FileDataPtr->audioTrackPtr->microState
-            == M4MP4W_writing), M4ERR_STATE);
-        mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_writing_startAU;
-
-        leftSpaceInChunk = mMp4FileDataPtr->audioTrackPtr->MaxChunkSize
-            - mMp4FileDataPtr->audioTrackPtr->currentPos;
-
-        M4OSA_TRACE2_2("audio %d  %d",
-            mMp4FileDataPtr->audioTrackPtr->currentPos, leftSpaceInChunk);
-
-        chunkDurMs =
-            (M4OSA_UInt32)(( mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS
-            * scale_audio) - mMp4FileDataPtr->audioTrackPtr->
-            chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->
-            currentChunk]);
-
-        if ((leftSpaceInChunk < mMp4FileDataPtr->audioTrackPtr->MaxAUSize)
-            || (( mMp4FileDataPtr->InterleaveDur != 0)
-            && (chunkDurMs >= mMp4FileDataPtr->InterleaveDur)))
-        {
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-            /* only if there is at least 1 video sample in the chunk */
-
-            if ((mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL)
-                && (mMp4FileDataPtr->videoTrackPtr->currentPos > 0))
-            {
-                /* close the opened video chunk before creating a new audio one */
-                err = M4MP4W_newVideoChunk(context, &leftSpaceInChunk);
-
-                if (err != M4NO_ERROR)
-                    return err;
-            }
-
-#endif
-            /* not enough space in current chunk: create a new one */
-
-            err = M4MP4W_newAudioChunk(context, &leftSpaceInChunk);
-
-            if (err != M4NO_ERROR)
-                return err;
-        }
-
-        auPtr->size = leftSpaceInChunk;
-
-#ifdef _M4MP4W_MOOV_FIRST
-
-        auPtr->dataAddress = (M4OSA_MemAddr32)(mMp4FileDataPtr->audioTrackPtr->
-            Chunk[mMp4FileDataPtr->audioTrackPtr->currentChunk]
-        + mMp4FileDataPtr->audioTrackPtr->currentPos);
-
-#else
-
-        auPtr->dataAddress =
-            (M4OSA_MemAddr32)(mMp4FileDataPtr->audioTrackPtr->Chunk[0]
-        + mMp4FileDataPtr->audioTrackPtr->currentPos);
-
-#endif                                   /*_M4MP4W_MOOV_FIRST*/
-
-    }
-    else if (streamID == VideoStreamID) /*video stream*/
-    {
-        M4OSA_TRACE2_0("M4MP4W_startAU -> video");
-
-        scale_video =
-            1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
-
-        /*video microstate*/
-        ERR_CHECK((mMp4FileDataPtr->videoTrackPtr->microState
-            == M4MP4W_writing), M4ERR_STATE);
-        mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_writing_startAU;
-
-        leftSpaceInChunk = mMp4FileDataPtr->videoTrackPtr->MaxChunkSize
-            - mMp4FileDataPtr->videoTrackPtr->currentPos;
-
-        chunkDurMs =
-            (M4OSA_UInt32)(( mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
-            * scale_video) - mMp4FileDataPtr->videoTrackPtr->
-            chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->
-            currentChunk]);
-
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-
-        leftSpaceInChunk = mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
-
-#endif
-
-        M4OSA_TRACE2_2("video %d  %d",
-            mMp4FileDataPtr->videoTrackPtr->currentPos, leftSpaceInChunk);
-
-        if (( leftSpaceInChunk < mMp4FileDataPtr->videoTrackPtr->MaxAUSize)
-            || (( mMp4FileDataPtr->InterleaveDur != 0)
-            && (chunkDurMs >= mMp4FileDataPtr->InterleaveDur))
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-            || (( mMp4FileDataPtr->videoTrackPtr->MaxAUperChunk != 0)
-            && (( mMp4FileDataPtr->videoTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->
-            currentStsc] & 0xFFF)
-            == mMp4FileDataPtr->videoTrackPtr->MaxAUperChunk))
-
-#endif
-
-            )
-        {
-            /*not enough space in current chunk: create a new one*/
-            err = M4MP4W_newVideoChunk(context, &leftSpaceInChunk);
-
-            if (err != M4NO_ERROR)
-                return err;
-        }
-
-        M4OSA_TRACE2_3("startAU: size 0x%x pos 0x%x chunk %u", auPtr->size,
-            mMp4FileDataPtr->videoTrackPtr->currentPos,
-            mMp4FileDataPtr->videoTrackPtr->currentChunk);
-
-        M4OSA_TRACE3_1("adr = 0x%p", auPtr->dataAddress);
-
-        if (auPtr->dataAddress)
-        {
-            M4OSA_TRACE3_3(" data = %08X %08X %08X", auPtr->dataAddress[0],
-                auPtr->dataAddress[1], auPtr->dataAddress[2]);
-        }
-
-        auPtr->size = leftSpaceInChunk;
-#ifdef _M4MP4W_MOOV_FIRST
-
-        if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
-            == M4SYS_kH264)
-            auPtr->dataAddress =
-            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->
-            Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk]
-        + mMp4FileDataPtr->videoTrackPtr->currentPos + 4);
-        else
-            auPtr->dataAddress =
-            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->
-            Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk]
-        + mMp4FileDataPtr->videoTrackPtr->currentPos);
-
-#else
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-
-        if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
-            == M4SYS_kH264)
-            auPtr->dataAddress =
-            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0] + 4);
-        else
-            auPtr->dataAddress =
-            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0]);
-
-#else
-
-        if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
-            == M4SYS_kH264)
-            auPtr->dataAddress =
-            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0]
-        + mMp4FileDataPtr->videoTrackPtr->currentPos
-            + 4); /* In H264, we must start by the length of the NALU, coded in 4 bytes */
-        else
-            auPtr->dataAddress =
-            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0]
-        + mMp4FileDataPtr->videoTrackPtr->currentPos);
-
-#endif /*_M4MP4W_UNBUFFERED_VIDEO*/
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-    }
-    else
-        return M4ERR_BAD_STREAM_ID;
-
-    M4OSA_TRACE1_3("M4MPW_startAU: start address:%p, size:%lu, stream:%d",
-        auPtr->dataAddress, auPtr->size, streamID);
-
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_processAU( M4OSA_Context context, M4SYS_StreamID streamID,
-                           M4SYS_AccessUnit *auPtr )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4MP4W_Time32 delta;
-    M4MP4W_Time32 lastSampleDur;
-    M4OSA_UInt32 i;
-    /*expectedSize is the max filesize to forecast when adding a new AU:*/
-    M4OSA_UInt32 expectedSize =
-        32; /*initialized with an estimation of the max metadata space needed for an AU.*/
-    M4OSA_Double scale_audio = 0.0;
-    M4OSA_Double scale_video = 0.0;
-
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
-
-    /*check macro state*/
-    ERR_CHECK((mMp4FileDataPtr->state == M4MP4W_writing), M4ERR_STATE);
-
-    M4OSA_TRACE2_0("M4MP4W_processAU");
-
-    if (streamID == AudioStreamID)
-        scale_audio =
-        1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
-
-    if (streamID == VideoStreamID)
-        scale_video =
-        1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
-
-    /* PL 27/10/2008: after the resurgence of the AAC 128 bug, I added a debug check that
-     the encoded data didn't overflow the available space in the AU */
-
-    switch( streamID )
-    {
-        case AudioStreamID:
-            M4OSA_DEBUG_IF1(auPtr->size
-                + mMp4FileDataPtr->audioTrackPtr->currentPos
-            > mMp4FileDataPtr->audioTrackPtr->MaxChunkSize,
-            M4ERR_CONTEXT_FAILED,
-            "Uh oh. Buffer overflow in the writer. Abandon ship!");
-            M4OSA_DEBUG_IF2(auPtr->size
-                > mMp4FileDataPtr->audioTrackPtr->MaxAUSize,
-                M4ERR_CONTEXT_FAILED,
-                "Oops. An AU went over the declared Max AU size.\
-                 You might wish to investigate that.");
-            break;
-
-        case VideoStreamID:
-            M4OSA_DEBUG_IF1(auPtr->size
-                + mMp4FileDataPtr->videoTrackPtr->currentPos
-                    > mMp4FileDataPtr->videoTrackPtr->MaxChunkSize,
-                    M4ERR_CONTEXT_FAILED,
-                    "Uh oh. Buffer overflow in the writer. Abandon ship!");
-            M4OSA_DEBUG_IF2(auPtr->size
-                    > mMp4FileDataPtr->videoTrackPtr->MaxAUSize,
-                    M4ERR_CONTEXT_FAILED,
-                    "Oops. An AU went over the declared Max AU size.\
-                     You might wish to investigate that.");
-            break;
-    }
-
-    /*only if not in the case audio with estimateAudioSize
-    (else, size already estimated at this point)*/
-    if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
-        || (streamID == VideoStreamID))
-    {
-        /*check filesize if needed*/
-        if (mMp4FileDataPtr->MaxFileSize != 0)
-        {
-            expectedSize += mMp4FileDataPtr->filesize + auPtr->size;
-
-            if ((streamID == VideoStreamID)
-                && (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
-                == M4SYS_kH264))
-            {
-                expectedSize += 4;
-            }
-
-            if (expectedSize > mMp4FileDataPtr->MaxFileSize)
-            {
-                M4OSA_TRACE1_0("processAU : !! FILESIZE EXCEEDED !!");
-
-                /* patch for autostop is MaxFileSize exceeded */
-                M4OSA_TRACE1_0("M4MP4W_processAU : stop at targeted filesize");
-                return M4WAR_MP4W_OVERSIZE;
-            }
-        }
-    }
-
-    /*case audioMsStopTime has already been set during video processing,
-     and now check it for audio*/
-    if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
-        && (streamID == AudioStreamID))
-    {
-        if (mMp4FileDataPtr->audioMsStopTime <= (auPtr->CTS *scale_audio))
-        {
-            /* bugfix: if a new chunk was just created, cancel it before to close */
-            if ((mMp4FileDataPtr->audioTrackPtr->currentChunk != 0)
-                && (mMp4FileDataPtr->audioTrackPtr->currentPos == 0))
-            {
-                mMp4FileDataPtr->audioTrackPtr->currentChunk--;
-            }
-            M4OSA_TRACE1_0("M4MP4W_processAU : audio stop time reached");
-            return M4WAR_MP4W_OVERSIZE;
-        }
-    }
-
-    if (streamID == AudioStreamID) /*audio stream*/
-    {
-        M4OSA_TRACE2_0("M4MP4W_processAU -> audio");
-
-        /*audio microstate*/
-        ERR_CHECK((mMp4FileDataPtr->audioTrackPtr->microState
-            == M4MP4W_writing_startAU), M4ERR_STATE);
-        mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_writing;
-
-        mMp4FileDataPtr->audioTrackPtr->currentPos += auPtr->size;
-        /* Warning: time conversion cast 64to32! */
-        delta = (M4MP4W_Time32)auPtr->CTS
-            - mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS;
-
-        /* DEBUG stts entries which are equal to 0 */
-        M4OSA_TRACE2_1("A_DELTA = %ld\n", delta);
-
-        if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
-            == 0) /*test if first AU*/
-        {
-            /*set au size*/
-            mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize = auPtr->size;
-
-            /*sample duration is a priori constant in audio case, */
-            /*but if an Au at least has different size, a stsz table will be created */
-
-            /*mMp4FileDataPtr->audioTrackPtr->sampleDuration = delta; */
-            /*TODO test sample duration? (should be 20ms in AMR8, 160 tics with timescale 8000) */
-        }
-        else
-        {
-            /*check if au size is constant (audio) */
-            /*0 sample size means non constant size*/
-            if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize != 0)
-            {
-                if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize
-                    != auPtr->size)
-                {
-                    /*first AU with different size => non constant size => STSZ table needed*/
-                    /*computation of the nb of block of size M4MP4W_STSZ_ALLOC_SIZE to allocate*/
-                    mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks =
-                        1 + mMp4FileDataPtr->audioTrackPtr->
-                        CommonData.sampleNb
-                        * 4 / M4MP4W_STSZ_AUDIO_ALLOC_SIZE;
-                    mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ =
-                        (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(
-                        mMp4FileDataPtr->audioTrackPtr->
-                        nbOfAllocatedStszBlocks
-                        * M4MP4W_STSZ_AUDIO_ALLOC_SIZE,
-                        M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->TABLE_STSZ");
-                    ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ
-                        != M4OSA_NULL, M4ERR_ALLOC);
-
-                    for ( i = 0;
-                        i < mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
-                        i++ )
-                    {
-                        mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ[i] =
-                            mMp4FileDataPtr->audioTrackPtr->
-                            CommonData.sampleSize;
-                    }
-                    mMp4FileDataPtr->audioTrackPtr->
-                        TABLE_STSZ[mMp4FileDataPtr->audioTrackPtr->
-                        CommonData.sampleNb] = auPtr->size;
-                    mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize =
-                        0; /*used as a flag in that case*/
-                    /*more bytes in the file in that case:*/
-                    if (mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
-                        mMp4FileDataPtr->filesize +=
-                        4 * mMp4FileDataPtr->audioTrackPtr->
-                        CommonData.sampleNb;
-                }
-            }
-            /*else table already exists*/
-            else
-            {
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-                if (4 *(mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb + 3)
-                    >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks
-                    *M4MP4W_STSZ_AUDIO_ALLOC_SIZE)
-                {
-                    M4OSA_TRACE1_0(
-                        "M4MP4W_processAU : audio stsz table is full");
-                    return M4WAR_MP4W_OVERSIZE;
-                }
-
-#else
-
-                if (4 *mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
-                    >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks
-                    *M4MP4W_STSZ_AUDIO_ALLOC_SIZE)
-                {
-                    mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks +=
-                        1;
-                    mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ =
-                        (M4OSA_UInt32 *)M4MP4W_realloc(
-                        (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
-                        TABLE_STSZ, ( mMp4FileDataPtr->audioTrackPtr->
-                        nbOfAllocatedStszBlocks - 1)
-                        * M4MP4W_STSZ_AUDIO_ALLOC_SIZE,
-                        mMp4FileDataPtr->audioTrackPtr->
-                        nbOfAllocatedStszBlocks
-                        * M4MP4W_STSZ_AUDIO_ALLOC_SIZE);
-                    ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ
-                        != M4OSA_NULL, M4ERR_ALLOC);
-                }
-
-#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
-
-                mMp4FileDataPtr->audioTrackPtr->
-                    TABLE_STSZ[mMp4FileDataPtr->audioTrackPtr->
-                    CommonData.sampleNb] = auPtr->size;
-
-                if (mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
-                    mMp4FileDataPtr->filesize += 4;
-            }
-        }
-
-        if (delta > mMp4FileDataPtr->audioTrackPtr->sampleDuration)
-        {
-            /* keep track of real sample duration*/
-            mMp4FileDataPtr->audioTrackPtr->sampleDuration = delta;
-        }
-
-        if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
-            == 0) /*test if first AU*/
-        {
-            mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[0] = 1;
-            mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[1] = 0;
-            mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb = 1;
-            mMp4FileDataPtr->filesize += 8;
-        }
-        else if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
-            == 1) /*test if second AU*/
-        {
-#ifndef DUPLICATE_STTS_IN_LAST_AU
-
-            mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[0] += 1;
-
-#endif /*DUPLICATE_STTS_IN_LAST_AU*/
-
-            mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[1] = delta;
-            mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb += 1;
-            mMp4FileDataPtr->filesize += 8;
-        }
-        else
-        {
-            /*retrieve last sample delta*/
-            lastSampleDur = mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2
-                * (mMp4FileDataPtr->audioTrackPtr->
-                CommonData.sttsTableEntryNb - 1) - 1];
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-            if (8 *(mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
-                + 3) >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks
-                *M4MP4W_STTS_AUDIO_ALLOC_SIZE)
-            {
-                M4OSA_TRACE1_0("M4MP4W_processAU : audio stts table is full");
-                return M4WAR_MP4W_OVERSIZE;
-            }
-
-#else
-
-            if (8 *mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
-                >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks
-                *M4MP4W_STTS_AUDIO_ALLOC_SIZE)
-            {
-                mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks += 1;
-                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS =
-                    (M4OSA_UInt32 *)M4MP4W_realloc(
-                    (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
-                    TABLE_STTS, ( mMp4FileDataPtr->audioTrackPtr->
-                    nbOfAllocatedSttsBlocks
-                    - 1) * M4MP4W_STTS_AUDIO_ALLOC_SIZE,
-                    mMp4FileDataPtr->audioTrackPtr->
-                    nbOfAllocatedSttsBlocks
-                    * M4MP4W_STTS_AUDIO_ALLOC_SIZE);
-                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS
-                    != M4OSA_NULL, M4ERR_ALLOC);
-            }
-
-#endif                                   /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
-
-            if (delta != lastSampleDur) /*new entry in the table*/
-            {
-                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2 *(
-                    mMp4FileDataPtr->audioTrackPtr->
-                    CommonData.sttsTableEntryNb - 1)] = 1;
-                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2 *(
-                    mMp4FileDataPtr->audioTrackPtr->
-                    CommonData.sttsTableEntryNb - 1) + 1] = delta;
-                mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb +=
-                    1;
-                mMp4FileDataPtr->filesize += 8;
-            }
-            else
-            {
-                /*increase of 1 the number of consecutive AUs with same duration*/
-                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2 *(
-                    mMp4FileDataPtr->audioTrackPtr->
-                    CommonData.sttsTableEntryNb - 1) - 2] += 1;
-            }
-        }
-        mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb += 1;
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-        mMp4FileDataPtr->audioTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentStsc] +=
-            1;
-
-#else
-
-        mMp4FileDataPtr->audioTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] +=
-            1;
-
-#endif
-        /* Warning: time conversion cast 64to32! */
-
-        mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS =
-            (M4MP4W_Time32)auPtr->CTS;
-    }
-    else if (streamID == VideoStreamID) /*video stream*/
-    {
-        M4OSA_TRACE2_0("M4MP4W_processAU -> video");
-
-        /* In h264, the size of the AU must be added to the data */
-        if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
-            == M4SYS_kH264)
-        {
-            /* Add the size of the NALU in BE */
-            M4OSA_MemAddr8 pTmpDataAddress = M4OSA_NULL;
-            auPtr->dataAddress -= 1;
-            pTmpDataAddress = (M4OSA_MemAddr8)auPtr->dataAddress;
-
-            // bit manipulation
-            *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size >> 24) & 0x000000FF);
-            *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size >> 16) & 0x000000FF);
-            *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size >> 8)  & 0x000000FF);
-            *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size)       & 0x000000FF);
-
-            auPtr->size += 4;
-        }
-
-        /*video microstate*/
-        ERR_CHECK((mMp4FileDataPtr->videoTrackPtr->microState
-            == M4MP4W_writing_startAU), M4ERR_STATE);
-        mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_writing;
-
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-        /* samples are written to file now */
-
-        err = M4MP4W_putBlock((M4OSA_UChar *)auPtr->dataAddress, auPtr->size,
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext);
-
-        if (err != M4NO_ERROR)
-        {
-            M4OSA_FilePosition temp = mMp4FileDataPtr->absoluteCurrentPos
-                + mMp4FileDataPtr->videoTrackPtr->currentPos;
-            M4OSA_TRACE2_1(
-                "M4MP4W_processAU: putBlock error when writing unbuffered video sample: %#X",
-                err);
-            /* Ouch, we got an error writing to the file, but we need to properly react so that
-             the state is still consistent and we can properly close the file so that what has
-              been recorded so far is not lost. Yay error recovery! */
-
-            /* First, we do not know where we are in the file. Put us back at where we were before
-            attempting to write the data. That way, we're consistent with the chunk and sample
-             state data.absoluteCurrentPos is only updated for chunks, it points to the beginning
-             of the chunk,therefore we need to add videoTrackPtr->currentPos to know where we
-             were in the file. */
-            err = mMp4FileDataPtr->fileWriterFunctions->seek(
-                mMp4FileDataPtr->fileWriterContext,
-                M4OSA_kFileSeekBeginning, &temp);
-
-            M4OSA_TRACE2_3(
-                "Backtracking to position 0x%08X, seek returned %d and position %08X",
-                mMp4FileDataPtr->absoluteCurrentPos
-                + mMp4FileDataPtr->videoTrackPtr->currentPos, err, temp);
-
-            /* Then, do not update any info whatsoever in the writing state. This will have the
-             consequence that it will be as if the sample has never been written, so the chunk
-             will be merely closed after the previous sample (the sample we attempted to write
-             here is lost). */
-
-            /* And lastly (for here), return that we've reached the limit of available space.
-             We don't care about the error originally returned by putBlock. */
-
-            return M4WAR_MP4W_OVERSIZE;
-        }
-
-#endif
-
-        if ((M4MP4W_Time32)auPtr->CTS < mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS) {
-            // Do not report as error, it will abort the entire filewrite. Just skip this frame.
-            M4OSA_TRACE1_0("Skip frame. Video frame has too old timestamp.");
-            return M4NO_ERROR;
-        }
-
-        mMp4FileDataPtr->videoTrackPtr->currentPos += auPtr->size;
-
-        /* Warning: time conversion cast 64to32! */
-        delta = (M4MP4W_Time32)auPtr->CTS
-            - mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS;
-
-        /* DEBUG stts entries which are equal to 0 */
-        M4OSA_TRACE2_1("V_DELTA = %ld\n", delta);
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-        if (2 *(mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb + 3)
-            >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks
-            *M4MP4W_STSZ_ALLOC_SIZE)
-        {
-            M4OSA_TRACE1_0("M4MP4W_processAU : video stsz table is full");
-            return M4WAR_MP4W_OVERSIZE;
-        }
-
-        mMp4FileDataPtr->videoTrackPtr->
-            TABLE_STSZ[mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb] =
-            (M4OSA_UInt16)auPtr->size;
-        mMp4FileDataPtr->filesize += 4;
-
-#else
-
-        if (4 *mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
-            >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks
-            *M4MP4W_STSZ_ALLOC_SIZE)
-        {
-            mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks += 1;
-
-            mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ =
-                (M4OSA_UInt32 *)M4MP4W_realloc(
-                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ,
-                ( mMp4FileDataPtr->videoTrackPtr->
-                nbOfAllocatedStszBlocks
-                - 1) * M4MP4W_STSZ_ALLOC_SIZE,
-                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks
-                * M4MP4W_STSZ_ALLOC_SIZE);
-
-            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ != M4OSA_NULL,
-                M4ERR_ALLOC);
-        }
-
-        mMp4FileDataPtr->videoTrackPtr->
-            TABLE_STSZ[mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb] =
-            auPtr->size;
-        mMp4FileDataPtr->filesize += 4;
-
-#endif
-
-        if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
-            == 0) /*test if first AU*/
-        {
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-            M4MP4W_put32_Lo(&mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0], 1);
-            M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0], 0);
-
-#else
-
-            mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0] = 1;
-            mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] = 0;
-
-#endif
-
-            mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb = 1;
-            mMp4FileDataPtr->filesize += 8;
-        }
-        else if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
-            == 1 ) /*test if second AU*/
-        {
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-            M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0],
-                (M4OSA_UInt16)delta);
-
-#else
-
-            mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] = delta;
-
-#endif
-
-        }
-        else
-        {
-            /*retrieve last sample delta*/
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-            lastSampleDur = M4MP4W_get32_Hi(&mMp4FileDataPtr->videoTrackPtr->
-                TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
-                CommonData.sttsTableEntryNb - 1]);
-
-            if (4 *(mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
-                + 3) >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks
-                *M4MP4W_STTS_ALLOC_SIZE)
-            {
-                M4OSA_TRACE1_0("M4MP4W_processAU : video stts table is full");
-                return M4WAR_MP4W_OVERSIZE;
-            }
-
-#else
-
-            lastSampleDur = mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
-                * (mMp4FileDataPtr->videoTrackPtr->
-                CommonData.sttsTableEntryNb - 1) + 1];
-
-            if (8 *mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
-                >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks
-                *M4MP4W_STTS_ALLOC_SIZE)
-            {
-                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks += 1;
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS =
-                    (M4OSA_UInt32 *)M4MP4W_realloc(
-                    (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
-                    TABLE_STTS, ( mMp4FileDataPtr->videoTrackPtr->
-                    nbOfAllocatedSttsBlocks
-                    - 1) * M4MP4W_STTS_ALLOC_SIZE,
-                    mMp4FileDataPtr->videoTrackPtr->
-                    nbOfAllocatedSttsBlocks
-                    * M4MP4W_STTS_ALLOC_SIZE);
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS
-                    != M4OSA_NULL, M4ERR_ALLOC);
-            }
-
-#endif                                   /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
-
-            if (delta != lastSampleDur) /*new entry in the table*/
-            {
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-                M4MP4W_put32_Lo(&mMp4FileDataPtr->videoTrackPtr->
-                    TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
-                    CommonData.sttsTableEntryNb], 1);
-                M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->
-                    TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
-                    CommonData.sttsTableEntryNb], (M4OSA_UInt16)delta);
-
-#else
-
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2 *(
-                    mMp4FileDataPtr->videoTrackPtr->
-                    CommonData.sttsTableEntryNb)] = 1;
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
-                    *(mMp4FileDataPtr->videoTrackPtr->
-                    CommonData.sttsTableEntryNb)+1] = delta;
-
-#endif
-
-                mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb +=
-                    1;
-                mMp4FileDataPtr->filesize += 8;
-            }
-            else
-            {
-                /*increase of 1 the number of consecutive AUs with same duration*/
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-                mMp4FileDataPtr->videoTrackPtr->
-                    TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
-                    CommonData.sttsTableEntryNb - 1] += 1;
-
-#else
-
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2 *(
-                    mMp4FileDataPtr->videoTrackPtr->
-                    CommonData.sttsTableEntryNb - 1)] += 1;
-
-#endif
-
-            }
-        }
-
-        mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb += 1;
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-        mMp4FileDataPtr->videoTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentStsc] +=
-            1;
-
-#else
-
-        mMp4FileDataPtr->videoTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] +=
-            1;
-
-#endif
-
-        if (auPtr->attribute == AU_RAP)
-        {
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-            if (4 *(mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb + 3)
-                >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks
-                *M4MP4W_STSS_ALLOC_SIZE)
-            {
-                M4OSA_TRACE1_0("M4MP4W_processAU : video stss table is full");
-                return M4WAR_MP4W_OVERSIZE;
-            }
-
-#else
-
-            if (4 *mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb
-                >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks
-                *M4MP4W_STSS_ALLOC_SIZE)
-            {
-                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks += 1;
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STSS =
-                    (M4OSA_UInt32 *)M4MP4W_realloc(
-                    (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
-                    TABLE_STSS, ( mMp4FileDataPtr->videoTrackPtr->
-                    nbOfAllocatedStssBlocks
-                    - 1) * M4MP4W_STSS_ALLOC_SIZE,
-                    mMp4FileDataPtr->videoTrackPtr->
-                    nbOfAllocatedStssBlocks
-                    * M4MP4W_STSS_ALLOC_SIZE);
-                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS
-                    != M4OSA_NULL, M4ERR_ALLOC);
-            }
-
-#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
-
-            mMp4FileDataPtr->videoTrackPtr->
-                TABLE_STSS[mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb] =
-                mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb;
-            mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb += 1;
-            mMp4FileDataPtr->filesize += 4;
-        }
-
-        /* Warning: time conversion cast 64to32! */
-        mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS =
-            (M4MP4W_Time32)auPtr->CTS;
-    }
-    else
-        return M4ERR_BAD_STREAM_ID;
-
-    /* I moved some state modification to after we know the sample has been written correctly. */
-    if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
-        && (streamID == VideoStreamID))
-    {
-        mMp4FileDataPtr->audioMsStopTime =
-            (M4MP4W_Time32)(auPtr->CTS * scale_video);
-    }
-
-    if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
-        || (streamID == VideoStreamID))
-    {
-        /*update fileSize*/
-        mMp4FileDataPtr->filesize += auPtr->size;
-    }
-
-    if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
-        && (streamID == VideoStreamID))
-    {
-        /*update filesize with estimated audio data that will be added later.    */
-        /*Warning: Assumption is made that:                                     */
-        /* - audio samples have constant size (e.g. no sid).                    */
-        /* - max audio sample size has been set, and is the actual sample size. */
-
-        ERR_CHECK(mMp4FileDataPtr->audioMsChunkDur != 0,
-            M4WAR_MP4W_NOT_EVALUABLE);
-        mMp4FileDataPtr->filesize -=
-            (M4OSA_UInt32)(( mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
-            * scale_video) * (0.05/*always 50 AMR samples per second*/
-            *(M4OSA_Double)mMp4FileDataPtr->audioTrackPtr->MaxAUSize
-            + 16/*additional data for a new chunk*/
-            / (M4OSA_Double)mMp4FileDataPtr->audioMsChunkDur));
-
-        mMp4FileDataPtr->filesize += (M4OSA_UInt32)(( auPtr->CTS * scale_video)
-            * (0.05/*always 50 AMR samples per second*/
-            *(M4OSA_Double)mMp4FileDataPtr->audioTrackPtr->MaxAUSize
-            + 16/*additional data for a new chunk*/
-            / (M4OSA_Double)mMp4FileDataPtr->audioMsChunkDur));
-    }
-
-    M4OSA_TRACE1_4("processAU : size 0x%x mode %d filesize %lu limit %lu",
-        auPtr->size, auPtr->attribute, mMp4FileDataPtr->filesize,
-        mMp4FileDataPtr->MaxFileSize);
-
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_closeWrite( M4OSA_Context context )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_ERR err2 = M4NO_ERROR, err3 = M4NO_ERROR;
-
-    /*Warning: test should be done here to ensure context->pContext is not M4OSA_NULL,
-     but C is not C++...*/
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-
-    M4OSA_UChar camcoder_maj, camcoder_min, camcoder_rev; /*camcoder version*/
-    M4OSA_Bool bAudio =
-        (( mMp4FileDataPtr->hasAudio)
-        && (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
-        != 0)); /*((mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL) &&
-                    (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb != 0));*/
-    M4OSA_Bool bVideo =
-        (( mMp4FileDataPtr->hasVideo)
-        && (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
-        != 0)); /*((mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL) &&
-                    (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb != 0));*/
-    M4OSA_Bool bH263 = M4OSA_FALSE;
-    M4OSA_Bool bH264 = M4OSA_FALSE;
-    M4OSA_Bool bMP4V = M4OSA_FALSE;
-    M4OSA_Bool bAAC = M4OSA_FALSE;
-    M4OSA_Bool bEVRC = M4OSA_FALSE;
-
-    /*intermediate variables*/
-    M4OSA_UInt32 A, B, N, AB4N;
-
-    /*Trak variables*/
-    M4OSA_UInt32 a_trakId = AudioStreamID; /*     (audio=1)*/
-    /* first trak offset is 32+moovSize, second equals 32+moovSize+1st_track_size*/
-    M4OSA_UInt32 a_trakOffset = 32;
-    M4OSA_UInt32 a_sttsSize = 24;          /* A (audio=24)*/
-    M4OSA_UInt32 a_stszSize = 20;          /* B (audio=20)*/
-    M4OSA_UInt32 a_trakSize = 402;         /*     (audio=402)*/
-    M4OSA_UInt32 a_mdiaSize = 302;         /*     (audio=302)*/
-    M4OSA_UInt32 a_minfSize = 229;         /*     (audio=229)*/
-    M4OSA_UInt32 a_stblSize = 169;         /*     (audio=169)*/
-    M4OSA_UInt32 a_stsdSize = 69;          /*     (audio=69 )*/
-    M4OSA_UInt32 a_esdSize = 53;           /*     (audio=53 )*/
-    M4OSA_UInt32 a_dataSize = 0;           /* temp: At the end, = currentPos*/
-    M4MP4W_Time32 a_trakDuration = 0;      /* equals lastCTS*/
-    M4MP4W_Time32 a_msTrakDuration = 0;
-    M4OSA_UInt32 a_stscSize = 28;          /* 16+12*nbchunksaudio*/
-    M4OSA_UInt32 a_stcoSize = 20;          /* 16+4*nbchunksaudio*/
-
-    M4OSA_UInt32 v_trakId = VideoStreamID; /* (video=2)*/
-    /* first trak offset is 32+moovSize, second equals 32+moovSize+1st_track_size*/
-    M4OSA_UInt32 v_trakOffset = 32;
-    M4OSA_UInt32 v_sttsSize = 0;      /* A (video=16+8J)*/
-    M4OSA_UInt32 v_stszSize = 0;      /* B (video=20+4K)*/
-    M4OSA_UInt32 v_trakSize = 0; /* (h263=A+B+4N+426), (mp4v=A+B+dsi+4N+448) */
-    M4OSA_UInt32 v_mdiaSize = 0; /* (h263=A+B+4N+326), (mp4v=A+B+dsi+4N+348) */
-    M4OSA_UInt32 v_minfSize = 0; /* (h263=A+B+4N+253), (mp4v=A+B+dsi+4N+275) */
-    M4OSA_UInt32 v_stblSize = 0; /* (h263=A+B+4N+189), (mp4v=A+B+dsi+4N+211) */
-    M4OSA_UInt32 v_stsdSize = 0;      /* (h263=117)        , (mp4v=139+dsi    )*/
-    M4OSA_UInt32 v_esdSize = 0;       /* (h263=101)        , (mp4v=153+dsi    )*/
-    M4OSA_UInt32 v_dataSize = 0;      /* temp: At the end, = currentPos*/
-    M4MP4W_Time32 v_trakDuration = 0; /* equals lastCTS*/
-    M4MP4W_Time32 v_msTrakDuration = 0;
-    M4OSA_UInt32 v_stscSize = 28;     /* 16+12*nbchunksvideo*/
-    M4OSA_UInt32 v_stcoSize = 20;     /* 16+4*nbchunksvideo*/
-
-    /*video variables*/
-    M4OSA_UInt32 v_stssSize = 0; /* 4*N+16     STSS*/
-
-    /*aac & mp4v temp variable*/
-    M4OSA_UInt8 dsi = 0;
-
-    /*H264 variables*/
-    M4OSA_UInt32 v_avcCSize = 0; /* dsi+15*/
-
-    /*MP4V variables*/
-    M4OSA_UInt32 v_esdsSize = 0;        /* dsi+37*/
-    M4OSA_UInt8 v_ESDescriptorSize =
-        0; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
-    M4OSA_UInt8 v_DCDescriptorSize = 0; /* dsi+15*/
-
-    /*AAC variables*/
-    M4OSA_UInt32 a_esdsSize = 0;        /* dsi+37*/
-    M4OSA_UInt8 a_ESDescriptorSize =
-        0; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
-    M4OSA_UInt8 a_DCDescriptorSize = 0; /* dsi+15*/
-
-    /*General variables*/
-
-    /* audio chunk size + video chunk size*/
-    M4OSA_UInt32 mdatSize = 8;
-    M4OSA_UInt32 moovSize = 116; /* 116 + 402(audio) +    (A+B+4N+426)(h263) or */
-    /*                        (A+B+dsi+4N+448)(mp4v)    */
-    M4OSA_UInt32 creationTime; /* C */
-
-    /*flag to set up the chunk interleave strategy*/
-    M4OSA_Bool bInterleaveAV =
-        (bAudio && bVideo && (mMp4FileDataPtr->InterleaveDur != 0));
-
-    M4OSA_Context fileWriterContext = mMp4FileDataPtr->fileWriterContext;
-
-    M4OSA_UInt32 i;
-
-    M4OSA_Double scale_audio = 0.0;
-    M4OSA_Double scale_video = 0.0;
-    M4MP4W_Time32 delta;
-
-#ifndef _M4MP4W_MOOV_FIRST
-
-    M4OSA_FilePosition moovPos, mdatPos;
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
-
-    /*macro state */
-    mMp4FileDataPtr->state = M4MP4W_closed;
-
-    /*if no data !*/
-    if ((!bAudio) && (!bVideo))
-    {
-        err = M4NO_ERROR; /*would be better to return a warning ?*/
-        goto cleanup;
-    }
-
-#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
-    /* Remove safety file to make room for what needs to be written out here
-    (chunk flushing and moov). */
-
-    if (M4OSA_TRUE == mMp4FileDataPtr->cleanSafetyFile)
-    {
-        M4OSA_Context tempContext;
-        err = mMp4FileDataPtr->fileWriterFunctions->openWrite(&tempContext,
-            mMp4FileDataPtr->safetyFileUrl,
-            M4OSA_kFileWrite | M4OSA_kFileCreate);
-
-        if (M4NO_ERROR != err)
-            goto cleanup;
-        err = mMp4FileDataPtr->fileWriterFunctions->closeWrite(tempContext);
-
-        if (M4NO_ERROR != err)
-            goto cleanup;
-        mMp4FileDataPtr->safetyFileUrl = M4OSA_NULL;
-        mMp4FileDataPtr->cleanSafetyFile = M4OSA_FALSE;
-    }
-
-#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
-
-    if (bVideo)
-    {
-        if ((M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable)
-            || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->chunkSizeTable)
-            || (M4OSA_NULL
-            == mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable)
-            || (M4OSA_NULL
-            == mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable)
-            || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ)
-            || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->TABLE_STTS)
-            || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->TABLE_STSS))
-        {
-            mMp4FileDataPtr->fileWriterFunctions->closeWrite(
-                fileWriterContext); /**< close the stream anyway */
-            M4MP4W_freeContext(context); /**< Free the context content */
-            return M4ERR_ALLOC;
-        }
-
-        /*video microstate*/
-        mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_closed;
-
-        /*current chunk is the last one and gives the total number of video chunks (-1)*/
-        for ( i = 0; i < mMp4FileDataPtr->videoTrackPtr->currentChunk; i++ )
-        {
-            v_dataSize += mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i];
-        }
-
-#ifndef _M4MP4W_MOOV_FIRST
-#ifndef _M4MP4W_UNBUFFERED_VIDEO
-        /*flush chunk*/
-
-        if (mMp4FileDataPtr->videoTrackPtr->currentPos > 0)
-        {
-            err = M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[0],
-                mMp4FileDataPtr->videoTrackPtr->currentPos,
-                mMp4FileDataPtr->fileWriterFunctions,
-                mMp4FileDataPtr->fileWriterContext);
-
-            if (M4NO_ERROR != err)
-                goto cleanup;
-        }
-
-#endif
-
-        M4OSA_TRACE1_0("flush video | CLOSE");
-        M4OSA_TRACE1_3("current chunk = %d  offset = 0x%x size = 0x%08X",
-            mMp4FileDataPtr->videoTrackPtr->currentChunk,
-            mMp4FileDataPtr->absoluteCurrentPos,
-            mMp4FileDataPtr->videoTrackPtr->currentPos);
-
-        /*update chunk offset*/
-        mMp4FileDataPtr->videoTrackPtr->
-            chunkOffsetTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
-            mMp4FileDataPtr->absoluteCurrentPos;
-
-        /*add chunk size to absoluteCurrentPos*/
-        mMp4FileDataPtr->absoluteCurrentPos +=
-            mMp4FileDataPtr->videoTrackPtr->currentPos;
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-        /*update last chunk size, and add this value to v_dataSize*/
-
-        mMp4FileDataPtr->videoTrackPtr->
-            chunkSizeTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
-            mMp4FileDataPtr->videoTrackPtr->currentPos;
-        v_dataSize +=
-            mMp4FileDataPtr->videoTrackPtr->currentPos; /*add last chunk size*/
-
-        v_trakDuration = mMp4FileDataPtr->videoTrackPtr->
-            CommonData.lastCTS; /* equals lastCTS*/
-
-        /* bugfix: if a new chunk was just created, cancel it before to close */
-        if ((mMp4FileDataPtr->videoTrackPtr->currentChunk != 0)
-            && (mMp4FileDataPtr->videoTrackPtr->currentPos == 0))
-        {
-            mMp4FileDataPtr->videoTrackPtr->currentChunk--;
-        }
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-
-        if ((mMp4FileDataPtr->videoTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->
-            currentStsc] & 0xFFF) == 0)
-        {
-            mMp4FileDataPtr->videoTrackPtr->currentStsc--;
-        }
-
-#endif /*_M4MP4W_UNBUFFERED_VIDEO*/
-
-        /* Last sample duration */
-        /* If we have the file duration we use it, else we duplicate the last AU */
-
-        if (mMp4FileDataPtr->MaxFileDuration > 0)
-        {
-            /* use max file duration to calculate delta of last AU */
-            delta = mMp4FileDataPtr->MaxFileDuration
-                - mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS;
-            v_trakDuration = mMp4FileDataPtr->MaxFileDuration;
-
-            if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb > 1)
-            {
-                /* if more than 1 frame, create a new stts entry (else already created) */
-                mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb++;
-            }
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-            M4MP4W_put32_Lo(&mMp4FileDataPtr->videoTrackPtr->
-                TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
-                CommonData.sttsTableEntryNb - 1], 1);
-            M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->
-                TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
-                CommonData.sttsTableEntryNb - 1], delta);
-
-#else
-
-            mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
-                *(mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
-                - 1)] = 1;
-            mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
-                *(mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
-                - 1) + 1] = delta;
-
-#endif
-
-        }
-        else
-        {
-            /* duplicate the delta of the previous frame */
-            if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb > 1)
-            {
-                /* if more than 1 frame, duplicate the stts entry (else already exists) */
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-                v_trakDuration +=
-                    M4MP4W_get32_Hi(&mMp4FileDataPtr->videoTrackPtr->
-                    TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
-                    CommonData.sttsTableEntryNb - 1]);
-                mMp4FileDataPtr->videoTrackPtr->
-                    TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
-                    CommonData.sttsTableEntryNb - 1] += 1;
-
-#else
-
-                v_trakDuration += mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
-                    * (mMp4FileDataPtr->videoTrackPtr->
-                    CommonData.sttsTableEntryNb - 1) + 1];
-                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2 *(
-                    mMp4FileDataPtr->videoTrackPtr->
-                    CommonData.sttsTableEntryNb - 1)] += 1;
-
-#endif
-
-            }
-            else
-            {
-                M4OSA_TRACE1_0("M4MP4W_closeWrite : ! videoTrackPtr,\
-                     cannot know the duration of the unique AU !");
-                /* If there is an audio track, we use it as a file duration
-                (and so, as AU duration...) */
-                if (mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL)
-                {
-                    M4OSA_TRACE1_0(
-                        "M4MP4W_closeWrite : ! Let's use the audio track duration !");
-                    mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] =
-                        (M4OSA_UInt32)(
-                        mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS
-                        * (1000.0 / mMp4FileDataPtr->audioTrackPtr->
-                        CommonData.timescale));
-                    v_trakDuration =
-                        mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1];
-                }
-                /* Else, we use a MAGICAL value (66 ms) */
-                else
-                {
-                    M4OSA_TRACE1_0(
-                        "M4MP4W_closeWrite : ! No audio track -> use magical value (66) !"); /*    */
-                    mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] = 66;
-                    v_trakDuration = 66;
-                }
-            }
-        }
-
-        /* Calculate table sizes */
-        A = v_sttsSize = 16 + 8 * mMp4FileDataPtr->videoTrackPtr->
-            CommonData.sttsTableEntryNb; /* A (video=16+8J)*/
-        B = v_stszSize = 20 + 4 * mMp4FileDataPtr->videoTrackPtr->
-            CommonData.sampleNb; /* B (video=20+4K)*/
-        N = mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb;
-        AB4N = A + B + 4 * N;
-
-        scale_video =
-            1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
-        v_msTrakDuration = (M4OSA_UInt32)(v_trakDuration * scale_video);
-
-        /*Convert integers in the table from LE into BE*/
-#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-        M4MP4W_table32ToBE(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ,
-            mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb);
-        M4MP4W_table32ToBE(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS,
-            2 * (mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb));
-
-#endif
-
-        M4MP4W_table32ToBE(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS,
-            mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb);
-
-        if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
-            == M4SYS_kH263)
-        {
-            bH263 = M4OSA_TRUE;
-            v_trakSize = AB4N + 426; /* (h263=A+B+4N+426)*/
-            v_mdiaSize = AB4N + 326; /* (h263=A+B+4N+326)*/
-            v_minfSize = AB4N + 253; /* (h263=A+B+4N+253)*/
-            v_stblSize = AB4N + 189; /* (h263=A+B+4N+189)*/
-            v_stsdSize = 117;        /* (h263=117)*/
-            v_esdSize = 101;         /* (h263=101)*/
-
-            moovSize += AB4N + 426;
-
-            if (((M4OSA_Int32)mMp4FileDataPtr->videoTrackPtr->avgBitrate) != -1)
-            {
-                /*the optional 'bitr' atom is appended to the dsi,so filesize is 16 bytes bigger*/
-                v_trakSize += 16;
-                v_mdiaSize += 16;
-                v_minfSize += 16;
-                v_stblSize += 16;
-                v_stsdSize += 16;
-                v_esdSize += 16;
-                moovSize += 16;
-            }
-        }
-        else if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
-            == M4SYS_kH264)
-        {
-            bH264 = M4OSA_TRUE;
-            /* For H264 there is no default DSI, and its presence is mandatory,
-            so check the DSI has been set*/
-            if (0 == mMp4FileDataPtr->videoTrackPtr->dsiSize
-                || M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
-            {
-                M4OSA_TRACE1_0(
-                    "M4MP4W_closeWrite: error, no H264 DSI has been set!");
-                err = M4ERR_STATE;
-                goto cleanup;
-            }
-
-            /*H264 sizes of the atom*/
-
-            // Remove the hardcoded DSI values of H264Block2
-            // TODO: check bMULPPSSPS case
-            v_avcCSize = sizeof(M4OSA_UInt32) + sizeof(H264Block2) +
-                mMp4FileDataPtr->videoTrackPtr->dsiSize;
-
-            v_trakSize = AB4N + v_avcCSize + 411;
-            v_mdiaSize = AB4N + v_avcCSize + 311;
-            v_minfSize = AB4N + v_avcCSize + 238;
-            v_stblSize = AB4N + v_avcCSize + 174;
-            v_stsdSize =        v_avcCSize + 102;
-            v_esdSize  =        v_avcCSize + 86;
-
-            moovSize   += AB4N + v_avcCSize + 411;
-
-        }
-        else if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
-            == M4SYS_kMPEG_4)
-        {
-            bMP4V = M4OSA_TRUE;
-            /* For MPEG4 there is no default DSI, and its presence is mandatory,
-            so check the DSI has been set*/
-            if (0 == mMp4FileDataPtr->videoTrackPtr->dsiSize
-                || M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
-            {
-                M4OSA_TRACE1_0(
-                    "M4MP4W_closeWrite: error, no MPEG4 DSI has been set!");
-                err = M4ERR_STATE;
-                goto cleanup;
-            }
-
-            /*MP4V variables*/
-            dsi = mMp4FileDataPtr->videoTrackPtr->dsiSize;
-            v_esdsSize = 37 + dsi;         /* dsi+37*/
-            v_ESDescriptorSize =
-                23
-                + dsi; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
-            v_DCDescriptorSize = 15 + dsi; /* dsi+15*/
-
-            v_trakSize = AB4N + dsi + 448; /* (mp4v=A+B+dsi+4N+448)    */
-            v_mdiaSize = AB4N + dsi + 348; /* (mp4v=A+B+dsi+4N+348)    */
-            v_minfSize = AB4N + dsi + 275; /* (mp4v=A+B+dsi+4N+275)    */
-            v_stblSize = AB4N + dsi + 211; /* (mp4v=A+B+dsi+4N+211)    */
-            v_stsdSize = dsi + 139;        /* (mp4v=139+dsi)*/
-            v_esdSize = dsi + 123;         /* (mp4v=123+dsi)*/
-
-            moovSize += AB4N + dsi + 448;
-        }
-
-        /*video variables*/
-        v_stssSize = 16 + 4 * N; /* 4*N+16     STSS*/
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-        /* stsc update */
-
-        v_stscSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
-        v_stblSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
-        v_minfSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
-        v_mdiaSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
-        v_trakSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
-        moovSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
-
-        /* stco update */
-        v_stcoSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        v_stblSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        v_minfSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        v_mdiaSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        v_trakSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        moovSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-
-#else
-        /*stsc/stco update*/
-
-        v_stscSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        v_stcoSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        v_stblSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        v_minfSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        v_mdiaSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        v_trakSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-        moovSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
-
-#endif
-
-        /*update last chunk time*/
-
-        mMp4FileDataPtr->videoTrackPtr->
-            chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
-            v_msTrakDuration;
-    }
-
-    if (bAudio)
-    {
-        if ((M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable)
-            || (M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->chunkSizeTable)
-            || (M4OSA_NULL
-            == mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable)
-            || (M4OSA_NULL
-            == mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable)
-            || (M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->TABLE_STTS))
-        {
-            mMp4FileDataPtr->fileWriterFunctions->closeWrite(
-                fileWriterContext); /**< close the stream anyway */
-            M4MP4W_freeContext(context); /**< Free the context content */
-            return M4ERR_ALLOC;
-        }
-
-        /*audio microstate*/
-        mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_closed;
-
-        if (mMp4FileDataPtr->audioTrackPtr->CommonData.trackType == M4SYS_kAAC)
-        {
-            bAAC =
-                M4OSA_TRUE; /*else, audio is implicitely amr in the following*/
-            dsi = mMp4FileDataPtr->audioTrackPtr->dsiSize; /*variable size*/
-
-            a_esdsSize = 37 + dsi;                         /* dsi+37*/
-            a_ESDescriptorSize =
-                23
-                + dsi; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
-            a_DCDescriptorSize = 15 + dsi;                 /* dsi+15*/
-
-            a_esdSize = dsi + 73; /*overwrite a_esdSize with aac value*/
-            /*add dif. between amr & aac sizes: (- 53 + dsi + 37)*/
-            a_stsdSize += dsi + 20;
-            a_stblSize += dsi + 20;
-            a_minfSize += dsi + 20;
-            a_mdiaSize += dsi + 20;
-            a_trakSize += dsi + 20;
-            moovSize += dsi + 20;
-        }
-
-        if (mMp4FileDataPtr->audioTrackPtr->CommonData.trackType
-            == M4SYS_kEVRC)
-        {
-            bEVRC =
-                M4OSA_TRUE; /*else, audio is implicitely amr in the following*/
-
-            /* evrc dsi is only 6 bytes while amr dsi is 9 bytes,all other blocks are unchanged */
-            a_esdSize -= 3;
-            a_stsdSize -= 3;
-            a_stblSize -= 3;
-            a_minfSize -= 3;
-            a_mdiaSize -= 3;
-            a_trakSize -= 3;
-            moovSize -= 3;
-        }
-
-        if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize == 0)
-        {
-            if (M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ)
-            {
-                mMp4FileDataPtr->fileWriterFunctions->closeWrite(
-                    fileWriterContext); /**< close the stream anyway */
-                M4MP4W_freeContext(context); /**< Free the context content */
-                return M4ERR_ALLOC;
-            }
-            /*Convert integers in the table from LE into BE*/
-            M4MP4W_table32ToBE(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ,
-                mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb);
-            a_stszSize +=
-                4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
-            a_stblSize +=
-                4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
-            a_minfSize +=
-                4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
-            a_mdiaSize +=
-                4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
-            a_trakSize +=
-                4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
-            moovSize += 4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
-        }
-
-        moovSize += 402;
-
-        /*current chunk is the last one and gives the total number of audio chunks (-1)*/
-        for ( i = 0; i < mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
-        {
-            a_dataSize += mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i];
-        }
-
-#ifndef _M4MP4W_MOOV_FIRST
-        /*flush chunk*/
-
-        if (mMp4FileDataPtr->audioTrackPtr->currentPos > 0)
-        {
-            err = M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[0],
-                mMp4FileDataPtr->audioTrackPtr->currentPos,
-                mMp4FileDataPtr->fileWriterFunctions,
-                mMp4FileDataPtr->fileWriterContext);
-
-            if (M4NO_ERROR != err)
-                goto cleanup;
-        }
-
-        M4OSA_TRACE1_0("flush audio | CLOSE");
-        M4OSA_TRACE1_2("current chunk = %d  offset = 0x%x",
-            mMp4FileDataPtr->audioTrackPtr->currentChunk,
-            mMp4FileDataPtr->absoluteCurrentPos);
-
-        /*update chunk offset*/
-        mMp4FileDataPtr->audioTrackPtr->
-            chunkOffsetTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
-            mMp4FileDataPtr->absoluteCurrentPos;
-
-        /*add chunk size to absoluteCurrentPos*/
-        mMp4FileDataPtr->absoluteCurrentPos +=
-            mMp4FileDataPtr->audioTrackPtr->currentPos;
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-        /*update last chunk size, and add this value to a_dataSize*/
-
-        mMp4FileDataPtr->audioTrackPtr->
-            chunkSizeTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
-            mMp4FileDataPtr->audioTrackPtr->currentPos;
-        a_dataSize +=
-            mMp4FileDataPtr->audioTrackPtr->currentPos; /*add last chunk size*/
-
-        /* bugfix: if a new chunk was just created, cancel it before to close */
-        if ((mMp4FileDataPtr->audioTrackPtr->currentChunk != 0)
-            && (mMp4FileDataPtr->audioTrackPtr->currentPos == 0))
-        {
-            mMp4FileDataPtr->audioTrackPtr->currentChunk--;
-        }
-#ifdef _M4MP4W_UNBUFFERED_VIDEO
-
-        if ((mMp4FileDataPtr->audioTrackPtr->
-            chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->
-            currentStsc] & 0xFFF) == 0)
-        {
-            mMp4FileDataPtr->audioTrackPtr->currentStsc--;
-        }
-
-#endif                                                          /*_M4MP4W_UNBUFFERED_VIDEO*/
-
-        a_trakDuration = mMp4FileDataPtr->audioTrackPtr->
-            CommonData.lastCTS; /* equals lastCTS*/
-        /* add last sample dur */
-
-        if (mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb != 1)
-        {
-#ifdef DUPLICATE_STTS_IN_LAST_AU
-            /*increase of 1 the number of consecutive AUs with same duration*/
-
-            mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2
-                *(mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
-                - 1) - 2] += 1;
-
-#endif /*DUPLICATE_STTS_IN_LAST_AU*/
-
-            a_trakDuration += mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2
-                * (mMp4FileDataPtr->audioTrackPtr->
-                CommonData.sttsTableEntryNb - 1) - 1];
-        }
-        else if (0 == mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS)
-        {
-            if (mMp4FileDataPtr->audioTrackPtr->CommonData.trackType
-                == M4SYS_kAMR)
-            {
-                if (12200 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
-                {
-                    a_trakDuration = a_dataSize / 32
-                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
-                }
-                else if (10200 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
-                {
-                    a_trakDuration = a_dataSize / 27
-                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
-                }
-                else if (7950 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
-                {
-                    a_trakDuration = a_dataSize / 21
-                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
-                }
-                else if (7400 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
-                {
-                    a_trakDuration = a_dataSize / 20
-                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
-                }
-                else if (6700 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
-                {
-                    a_trakDuration = a_dataSize / 18
-                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
-                }
-                else if (5900 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
-                {
-                    a_trakDuration = a_dataSize / 16
-                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
-                }
-                else if (5150 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
-                {
-                    a_trakDuration = a_dataSize / 14
-                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
-                }
-                else if (4750 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
-                {
-                    a_trakDuration = a_dataSize / 13
-                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
-                }
-            }
-        }
-
-        scale_audio =
-            1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
-        a_msTrakDuration = (M4OSA_UInt32)(a_trakDuration * scale_audio);
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-        /* stsc update */
-
-        a_stscSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
-        a_stblSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
-        a_minfSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
-        a_mdiaSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
-        a_trakSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
-        moovSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
-
-        /* stso update */
-        a_stcoSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        a_stblSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        a_minfSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        a_mdiaSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        a_trakSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        moovSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-
-#else
-        /*stsc/stco update*/
-
-        a_stscSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        a_stcoSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        a_stblSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        a_minfSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        a_mdiaSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        a_trakSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-        moovSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
-
-#endif
-
-        /* compute the new size of stts*/
-
-        a_sttsSize = 16 + 8 * (mMp4FileDataPtr->audioTrackPtr->
-            CommonData.sttsTableEntryNb - 1);
-
-        moovSize += a_sttsSize - 24;
-        a_mdiaSize += a_sttsSize - 24;
-        a_minfSize += a_sttsSize - 24;
-        a_stblSize += a_sttsSize - 24;
-        a_trakSize += a_sttsSize - 24;
-
-        /*update last chunk time*/
-        mMp4FileDataPtr->audioTrackPtr->
-            chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
-            a_msTrakDuration;
-    }
-
-    /* changing the way the mdat size is computed.
-    The real purpose of the mdat size is to know the amount to skip to get to the next
-    atom, which is the moov; the size of media in the mdat is almost secondary. Therefore,
-    it is of utmost importance that the mdat size "points" to where the moov actually
-    begins. Now, the moov begins right after the last data we wrote, so how could the sum
-    of all chunk sizes be different from the total size of what has been written? Well, it
-    can happen when the writing was unexpectedly stopped (because of lack of disk space,
-    for instance), in this case a chunk may be partially written (the partial write is not
-    necessarily erased) but it may not be reflected in the chunk size list (which may
-    believe it hasn't been written or on the contrary that it has been fully written). In
-    the case of such a mismatch, there is either unused data in the mdat (not very good,
-    but tolerable) or when reading the last chunk it will read the beginning of the moov
-    as part of the chunk (which means the last chunk won't be correctly decoded), both of
-    which are still better than losing the whole recording. In the long run it'll probably
-    be attempted to always clean up back to a consistent state, but at any rate it is
-    always safer to have the mdat size be computed using the position where the moov
-    actually begins, rather than using the size it is thought the mdat has.
-
-    Therefore, I will record where we are just before writing the moov, to serve when
-    updating the mdat size. */
-
-    /* mdatSize += a_dataSize + v_dataSize; *//*TODO allow for multiple chunks*/
-
-    /* End of Pierre Lebeaupin 19/12/2007: changing the way the mdat size is computed. */
-
-    /* first trak offset is 32+moovSize, second equals 32+moovSize+1st_track_size*/
-    a_trakOffset += moovSize;
-    v_trakOffset += moovSize/*+ a_dataSize*/;
-
-    if (bInterleaveAV == M4OSA_FALSE)
-        v_trakOffset += a_dataSize;
-
-    /*system time since 1970 */
-#ifndef _M4MP4W_DONT_USE_TIME_H
-
-    time((time_t *)&creationTime);
-    /*convert into time since 1/1/1904 00h00 (normative)*/
-    creationTime += 2082841761; /*nb of sec between 1904 and 1970*/
-
-#else                                            /*_M4MP4W_DONT_USE_TIME_H*/
-
-    creationTime =
-        0xBBD09100; /* = 7/11/2003 00h00 ; in hexa because of code scrambler limitation with
-                                           large integers */
-
-#endif                                           /*_M4MP4W_DONT_USE_TIME_H*/
-
-    mMp4FileDataPtr->duration =
-        max(a_msTrakDuration, v_msTrakDuration); /*max audio/video*/
-
-#ifdef _M4MP4W_MOOV_FIRST
-    /*open file in write binary mode*/
-
-    err = mMp4FileDataPtr->fileWriterFunctions->openWrite(&fileWriterContext,
-        mMp4FileDataPtr->url, 0x22);
-    ERR_CHECK(err == M4NO_ERROR, err);
-
-    /*ftyp atom*/
-    if (mMp4FileDataPtr->ftyp.major_brand != 0)
-    {
-        M4OSA_UInt32 i;
-
-        /* Put customized ftyp box */
-        CLEANUPonERR(M4MP4W_putBE32(16
-            + (mMp4FileDataPtr->ftyp.nbCompatibleBrands * 4),
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(M4MPAC_FTYP_TAG,
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->ftyp.major_brand,
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->ftyp.minor_version,
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext));
-
-        for ( i = 0; i < mMp4FileDataPtr->ftyp.nbCompatibleBrands; i++ )
-        {
-            CLEANUPonERR(
-                M4MP4W_putBE32(mMp4FileDataPtr->ftyp.compatible_brands[i],
-                mMp4FileDataPtr->fileWriterFunctions,
-                mMp4FileDataPtr->fileWriterContext));
-        }
-    }
-    else
-    {
-        /* Put default ftyp box */
-        CLEANUPonERR(M4MP4W_putBlock(Default_ftyp, sizeof(Default_ftyp),
-            mMp4FileDataPtr->fileWriterFunctions,
-            mMp4FileDataPtr->fileWriterContext));
-    }
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-#ifndef _M4MP4W_MOOV_FIRST
-    /* seek is used to get the current position relative to the start of the file. */
-    /* ... or rather, seek used to be used for that, but it has been found this functionality
-    is not reliably, or sometimes not at all, implemented in the various OSALs, so we now avoid
-    using it. */
-    /* Notice this new method assumes we're at the end of the file, this will break if ever we
-    are overwriting a larger file. */
-
-    CLEANUPonERR(mMp4FileDataPtr->fileWriterFunctions->getOption(
-        mMp4FileDataPtr->fileWriterContext,
-        M4OSA_kFileWriteGetFileSize, (M4OSA_DataOption *) &moovPos));
-    /* moovPos will be used after writing the moov. */
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-    CLEANUPonERR(M4MP4W_putBE32(moovSize, mMp4FileDataPtr->fileWriterFunctions,
-        fileWriterContext));
-    CLEANUPonERR(M4MP4W_putBlock(CommonBlock3, sizeof(CommonBlock3),
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    CLEANUPonERR(M4MP4W_putBE32(creationTime,
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    CLEANUPonERR(M4MP4W_putBE32(creationTime,
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    CLEANUPonERR(M4MP4W_putBlock(CommonBlock4, sizeof(CommonBlock4),
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->duration,
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    CLEANUPonERR(M4MP4W_putBlock(CommonBlock5, sizeof(CommonBlock5),
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-    if (bAudio)
-    {
-        CLEANUPonERR(M4MP4W_putBE32(a_trakSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock6, sizeof(CommonBlock6),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(creationTime,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(creationTime,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(a_trakId,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock7, sizeof(CommonBlock7),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(a_msTrakDuration,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock7bis, sizeof(CommonBlock7bis),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(AMRBlock1, sizeof(AMRBlock1),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
-        CLEANUPonERR(M4MP4W_putBE32(a_mdiaSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock8, sizeof(CommonBlock8),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(creationTime,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(creationTime,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(
-            M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->CommonData.timescale,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(a_trakDuration,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock9, sizeof(CommonBlock9),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(AMRBlock1_1, sizeof(AMRBlock1_1),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
-        CLEANUPonERR(M4MP4W_putBE32(a_minfSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock10, sizeof(CommonBlock10),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(a_stblSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock11, sizeof(CommonBlock11),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(a_sttsSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock12, sizeof(CommonBlock12),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-        CLEANUPonERR(M4MP4W_putBE32(
-            mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb - 1,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        /*invert the table data to bigendian*/
-        M4MP4W_table32ToBE(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS,
-            2 * (mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
-            - 1));
-        CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
-            *)mMp4FileDataPtr->audioTrackPtr->TABLE_STTS,
-            ( mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb - 1)
-            * 8,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
-
-        /* stsd */
-        CLEANUPonERR(M4MP4W_putBE32(a_stsdSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionHeader,
-            sizeof(SampleDescriptionHeader),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(a_esdSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-        /* sample desc entry inside stsd */
-        if (bAAC)
-        {
-            CLEANUPonERR(M4MP4W_putBlock(AACBlock1, sizeof(AACBlock1),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-        }
-        else if (bEVRC)
-        {
-            CLEANUPonERR(M4MP4W_putBlock(EVRC8Block1, sizeof(EVRC8Block1),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*evrc*/
-        }
-        else                         /*AMR8*/
-        {
-            CLEANUPonERR(M4MP4W_putBlock(AMR8Block1, sizeof(AMR8Block1),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*amr8*/
-        }
-        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryStart,
-            sizeof(SampleDescriptionEntryStart),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(AudioSampleDescEntryBoilerplate,
-            sizeof(AudioSampleDescEntryBoilerplate),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
-        CLEANUPonERR(
-            M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->CommonData.timescale
-            << 16,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-        /* DSI inside sample desc entry */
-        if (bAAC)
-        {
-            CLEANUPonERR(M4MP4W_putBE32(a_esdsSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock0,
-                sizeof(MPEGConfigBlock0), mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(M4MP4W_putByte(a_ESDescriptorSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock1,
-                sizeof(MPEGConfigBlock1), mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(M4MP4W_putByte(a_DCDescriptorSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(M4MP4W_putBlock(AACBlock2, sizeof(AACBlock2),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(
-                M4MP4W_putBE24(mMp4FileDataPtr->audioTrackPtr->avgBitrate * 5,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(
-                M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->maxBitrate,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(
-                M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->avgBitrate,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock2,
-                sizeof(MPEGConfigBlock2), mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(M4MP4W_putByte(mMp4FileDataPtr->audioTrackPtr->dsiSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->DSI,
-                mMp4FileDataPtr->audioTrackPtr->dsiSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock3,
-                sizeof(MPEGConfigBlock3), mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*aac*/
-        }
-        else if (bEVRC)
-        {
-            M4OSA_UInt8 localDsi[6];
-            M4OSA_UInt32 localI;
-
-            CLEANUPonERR(M4MP4W_putBlock(EVRCBlock3_1, sizeof(EVRCBlock3_1),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*audio*/
-
-            /* copy the default block in a local variable*/
-            for ( localI = 0; localI < 6; localI++ )
-            {
-                localDsi[localI] = EVRCBlock3_2[localI];
-            }
-            /* computes the number of sample per au */
-            /* and stores it in the DSI*/
-            /* assumes a char is enough to store the data*/
-            localDsi[5] =
-                (M4OSA_UInt8)(mMp4FileDataPtr->audioTrackPtr->sampleDuration
-                / 160)/*EVRC 1 frame duration*/;
-
-            if (mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL)
-            {
-                /* copy vendor name */
-                for ( localI = 0; localI < 4; localI++ )
-                {
-                    localDsi[localI] = (M4OSA_UInt8)(
-                        mMp4FileDataPtr->audioTrackPtr->DSI[localI]);
-                }
-            }
-            CLEANUPonERR(M4MP4W_putBlock(localDsi, 6,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*audio*/
-        }
-        else                         /*AMR8*/
-        {
-            M4OSA_UInt8 localDsi[9];
-            M4OSA_UInt32 localI;
-
-            CLEANUPonERR(M4MP4W_putBlock(AMRDSIHeader, sizeof(AMRDSIHeader),
-                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-            /* copy the default block in a local variable*/
-            for ( localI = 0; localI < 9; localI++ )
-            {
-                localDsi[localI] = AMRDefaultDSI[localI];
-            }
-            /* computes the number of sample per au */
-            /* and stores it in the DSI*/
-            /* assumes a char is enough to store the data*/
-            /* ALERT! The potential of the following line of code to explode in our face
-            is enormous when anything (sample rate or whatever) will change. This
-            calculation would be MUCH better handled by the VES or whatever deals with
-            the encoder more directly. */
-            localDsi[8] =
-                (M4OSA_UInt8)(mMp4FileDataPtr->audioTrackPtr->sampleDuration
-                / 160)/*AMR NB 1 frame duration*/;
-
-            if (mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL)
-            {
-                /* copy vendor name */
-                for ( localI = 0; localI < 4; localI++ )
-                {
-                    localDsi[localI] = (M4OSA_UInt8)(
-                        mMp4FileDataPtr->audioTrackPtr->DSI[localI]);
-                }
-
-                /* copy the Mode Set */
-                for ( localI = 5; localI < 7; localI++ )
-                {
-                    localDsi[localI] = (M4OSA_UInt8)(
-                        mMp4FileDataPtr->audioTrackPtr->DSI[localI]);
-                }
-            }
-            CLEANUPonERR(M4MP4W_putBlock(localDsi, 9,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*audio*/
-        }
-
-        /*end trak*/
-        CLEANUPonERR(M4MP4W_putBE32(a_stszSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock15, sizeof(CommonBlock15),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(
-            mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(
-            M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-        /*0 value for samplesize means not constant AU size*/
-        if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize == 0)
-        {
-            CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
-                *)mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ,
-                mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb * 4,
-                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        }
-
-        CLEANUPonERR(M4MP4W_putBE32(a_stscSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock16, sizeof(CommonBlock16),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->currentStsc
-            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-        for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentStsc; i++ )
-        {
-            CLEANUPonERR(M4MP4W_putBE32(
-                ( mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable[i]
-            >> 12) + 1, mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext));
-            CLEANUPonERR(M4MP4W_putBE32((mMp4FileDataPtr->audioTrackPtr->
-                chunkSampleNbTable[i] & 0xFFF),
-                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-            CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext));
-        }
-
-#else
-
-        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->currentChunk
-            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-        for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
-        {
-            CLEANUPonERR(M4MP4W_putBE32(i + 1,
-                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-            CLEANUPonERR(M4MP4W_putBE32(
-                mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable[i],
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext));
-            CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext));
-        }
-
-#endif
-
-        CLEANUPonERR(M4MP4W_putBE32(a_stcoSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock17, sizeof(CommonBlock17),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->currentChunk
-            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-#ifdef _M4MP4W_MOOV_FIRST
-
-        for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
-        {
-            CLEANUPonERR(M4MP4W_putBE32(a_trakOffset,
-                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-            a_trakOffset += mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i];
-
-            if (( bInterleaveAV == M4OSA_TRUE)
-                && (mMp4FileDataPtr->videoTrackPtr->currentChunk >= i))
-            {
-                a_trakOffset +=
-                    mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i];
-            }
-        }
-
-#else
-
-        for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
-        {
-            CLEANUPonERR(M4MP4W_putBE32(
-                mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable[i],
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext));
-        }
-
-#endif                                                                 /*_M4MP4W_MOOV_FIRST*/
-
-        CLEANUPonERR(M4MP4W_putBlock(AMRBlock4, sizeof(AMRBlock4),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
-    }
-
-    if (bVideo)
-    {
-        /*trak*/
-        CLEANUPonERR(M4MP4W_putBE32(v_trakSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock6, sizeof(CommonBlock6),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(creationTime,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(creationTime,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(v_trakId,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock7, sizeof(CommonBlock7),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(v_msTrakDuration,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock7bis, sizeof(CommonBlock7bis),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-        /* In the track header width and height are 16.16 fixed point values,
-        so shift left the regular integer value by 16. */
-        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->width << 16,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->height
-            << 16,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-
-        CLEANUPonERR(M4MP4W_putBE32(v_mdiaSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock8, sizeof(CommonBlock8),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(creationTime,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(creationTime,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(
-            M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->CommonData.timescale,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(v_trakDuration,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock9, sizeof(CommonBlock9),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(VideoBlock1_1, sizeof(VideoBlock1_1),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-        CLEANUPonERR(M4MP4W_putBE32(v_minfSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock10, sizeof(CommonBlock10),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(v_stblSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock11, sizeof(CommonBlock11),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(v_sttsSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock12, sizeof(CommonBlock12),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(
-            mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-        for ( i = 0;
-            i < mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb;
-            i++ )
-        {
-            CLEANUPonERR(M4MP4W_putBE32(M4MP4W_get32_Lo(
-                &mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[i]),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*video*/
-            CLEANUPonERR(M4MP4W_putBE32(M4MP4W_get32_Hi(
-                &mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[i]),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*video*/
-        }
-
-#else
-
-        CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
-            *)mMp4FileDataPtr->videoTrackPtr->TABLE_STTS,
-            ( mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb) * 8,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-
-#endif
-
-        /* stsd */
-
-        CLEANUPonERR(M4MP4W_putBE32(v_stsdSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionHeader,
-            sizeof(SampleDescriptionHeader),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(v_esdSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-        /* sample desc entry inside stsd */
-        if (bMP4V)
-        {
-            CLEANUPonERR(M4MP4W_putBlock(Mp4vBlock1, sizeof(Mp4vBlock1),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-        }
-
-        if (bH263)
-        {
-            CLEANUPonERR(M4MP4W_putBlock(H263Block1, sizeof(H263Block1),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*h263*/
-        }
-
-        if (bH264)
-        {
-            CLEANUPonERR(M4MP4W_putBlock(H264Block1, sizeof(H264Block1),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*h264*/
-        }
-        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryStart,
-            sizeof(SampleDescriptionEntryStart),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryVideoBoilerplate1,
-            sizeof(SampleDescriptionEntryVideoBoilerplate1),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-        CLEANUPonERR(M4MP4W_putBE16(mMp4FileDataPtr->videoTrackPtr->width,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-        CLEANUPonERR(M4MP4W_putBE16(mMp4FileDataPtr->videoTrackPtr->height,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-        CLEANUPonERR(M4MP4W_putBlock(VideoResolutions, sizeof(VideoResolutions),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*mp4v*/
-        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryVideoBoilerplate2,
-            sizeof(SampleDescriptionEntryVideoBoilerplate2),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-
-        /* DSI inside sample desc entry */
-        if (bH263)
-        {
-            /* The h263 dsi given through the api must be 7 bytes, that is, it shall not include
-             the optional bitrate box. However, if the bitrate information is set in the stream
-             handler, a bitrate box is appended here to the dsi */
-            if (((M4OSA_Int32)mMp4FileDataPtr->videoTrackPtr->avgBitrate) != -1)
-            {
-                CLEANUPonERR(M4MP4W_putBlock(H263Block2_bitr,
-                    sizeof(H263Block2_bitr),
-                    mMp4FileDataPtr->fileWriterFunctions,
-                    fileWriterContext)); /* d263 box with bitr atom */
-
-                if (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
-                {
-                    CLEANUPonERR(M4MP4W_putBlock(H263Block3, sizeof(H263Block3),
-                        mMp4FileDataPtr->fileWriterFunctions,
-                        fileWriterContext)); /*h263*/
-                }
-                else
-                {
-                    CLEANUPonERR(
-                        M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
-                        mMp4FileDataPtr->videoTrackPtr->dsiSize,
-                        mMp4FileDataPtr->fileWriterFunctions,
-                        fileWriterContext));
-                }
-
-                CLEANUPonERR(M4MP4W_putBlock(H263Block4, sizeof(H263Block4),
-                    mMp4FileDataPtr->fileWriterFunctions,
-                    fileWriterContext)); /*h263*/
-                /* Pierre Lebeaupin 2008/04/29: the two following lines used to be swapped;
-                I changed to this order in order to conform to 3GPP. */
-                CLEANUPonERR(
-                    M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->avgBitrate,
-                    mMp4FileDataPtr->fileWriterFunctions,
-                    fileWriterContext)); /*h263*/
-                CLEANUPonERR(
-                    M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->maxBitrate,
-                    mMp4FileDataPtr->fileWriterFunctions,
-                    fileWriterContext)); /*h263*/
-            }
-            else
-            {
-                CLEANUPonERR(M4MP4W_putBlock(H263Block2, sizeof(H263Block2),
-                    mMp4FileDataPtr->fileWriterFunctions,
-                    fileWriterContext)); /* d263 box */
-
-                if (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
-                {
-                    CLEANUPonERR(M4MP4W_putBlock(H263Block3, sizeof(H263Block3),
-                        mMp4FileDataPtr->fileWriterFunctions,
-                        fileWriterContext)); /*h263*/
-                }
-                else
-                {
-                    CLEANUPonERR(
-                        M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
-                        mMp4FileDataPtr->videoTrackPtr->dsiSize,
-                        mMp4FileDataPtr->fileWriterFunctions,
-                        fileWriterContext));
-                }
-            }
-        }
-
-        if (bMP4V)
-        {
-            M4OSA_UInt32 bufferSizeDB = 5 * mMp4FileDataPtr->videoTrackPtr->
-                avgBitrate; /*bufferSizeDB set to 5 times the bitrate*/
-
-            CLEANUPonERR(M4MP4W_putBE32(v_esdsSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock0,
-                sizeof(MPEGConfigBlock0), mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(M4MP4W_putByte(v_ESDescriptorSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock1,
-                sizeof(MPEGConfigBlock1), mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(M4MP4W_putByte(v_DCDescriptorSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(M4MP4W_putBlock(Mp4vBlock3, sizeof(Mp4vBlock3),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(M4MP4W_putBE24(bufferSizeDB,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(
-                M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->maxBitrate,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(
-                M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->avgBitrate,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock2,
-                sizeof(MPEGConfigBlock2), mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(M4MP4W_putByte(mMp4FileDataPtr->videoTrackPtr->dsiSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
-                mMp4FileDataPtr->videoTrackPtr->dsiSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock3,
-                sizeof(MPEGConfigBlock3), mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*mp4v*/
-        }
-
-        if (bH264)
-        {
-            M4OSA_UInt16 ppsLentgh = 0; /* PPS length */
-            M4OSA_UInt16 spsLentgh = 0; /* SPS length */
-            M4OSA_UChar *tmpDSI = mMp4FileDataPtr->videoTrackPtr->DSI; /* DSI */
-            M4OSA_UInt16 NumberOfPPS;
-            M4OSA_UInt16 lCntPPS;
-
-            /* Put the avcC (header + DSI) size */
-            CLEANUPonERR(M4MP4W_putBE32(v_avcCSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*h264*/
-            /* Put the avcC header */
-            CLEANUPonERR(M4MP4W_putBlock(H264Block2, sizeof(H264Block2),
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*h264*/
-            /* Put the DSI (SPS + PPS) int the 3gp format*/
-            /* SPS length in BE */
-
-            if ((0x01 != mMp4FileDataPtr->videoTrackPtr->DSI[0]) ||
-                 (0x42 != mMp4FileDataPtr->videoTrackPtr->DSI[1]))
-            {
-                M4OSA_TRACE1_2("!!! M4MP4W_closeWrite ERROR : invalid AVCC 0x%X 0x%X",
-                    mMp4FileDataPtr->videoTrackPtr->DSI[0],
-                    mMp4FileDataPtr->videoTrackPtr->DSI[1]);
-                return M4ERR_PARAMETER;
-            }
-            // Do not strip the DSI
-            CLEANUPonERR( M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
-                mMp4FileDataPtr->videoTrackPtr->dsiSize,
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext) );/*h264*/
-
-        }
-
-        /*end trak*/
-        CLEANUPonERR(M4MP4W_putBE32(v_stszSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock15, sizeof(CommonBlock15),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(
-            mMp4FileDataPtr->videoTrackPtr->CommonData.sampleSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(
-            M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-        for ( i = 0; i < mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb;
-            i++ )
-        {
-            CLEANUPonERR(
-                M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ[i],
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext)); /*video*/
-        }
-
-#else
-
-        CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
-            *)mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ,
-            mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb * 4,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-
-#endif
-
-        CLEANUPonERR(M4MP4W_putBE32(v_stscSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock16, sizeof(CommonBlock16),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
-
-        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->currentStsc
-            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-        for ( i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentStsc; i++ )
-        {
-            CLEANUPonERR(M4MP4W_putBE32(
-                ( mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable[i]
-            >> 12) + 1, mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext));
-            CLEANUPonERR(M4MP4W_putBE32((mMp4FileDataPtr->videoTrackPtr->
-                chunkSampleNbTable[i] & 0xFFF),
-                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-            CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext));
-        }
-
-#else
-
-        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->currentChunk
-            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-        for (i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk; i++)
-        {
-            CLEANUPonERR(M4MP4W_putBE32(i + 1,
-                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-            CLEANUPonERR(M4MP4W_putBE32(
-                mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable[i],
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext));
-            CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext));
-        }
-
-#endif
-
-        CLEANUPonERR(M4MP4W_putBE32(v_stcoSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBlock(CommonBlock17, sizeof(CommonBlock17),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->currentChunk
-            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-#ifdef _M4MP4W_MOOV_FIRST
-
-        for (i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk; i++)
-        {
-            if (( bInterleaveAV == M4OSA_TRUE)
-                && (mMp4FileDataPtr->audioTrackPtr->currentChunk >= i))
-            {
-                v_trakOffset +=
-                    mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i];
-            }
-            CLEANUPonERR(M4MP4W_putBE32(v_trakOffset,
-                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-            v_trakOffset += mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i];
-        }
-
-#else
-
-        for ( i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk; i++ )
-        {
-            CLEANUPonERR(M4MP4W_putBE32(
-                mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable[i],
-                mMp4FileDataPtr->fileWriterFunctions,
-                fileWriterContext));
-        }
-
-#endif                                                                 /*_M4MP4W_MOOV_FIRST*/
-
-        CLEANUPonERR(M4MP4W_putBE32(v_stssSize,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-        CLEANUPonERR(M4MP4W_putBlock(VideoBlock4, sizeof(VideoBlock4),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-        CLEANUPonERR(
-            M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb,
-            mMp4FileDataPtr->fileWriterFunctions,
-            fileWriterContext)); /*video*/
-        CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
-            *)mMp4FileDataPtr->videoTrackPtr->TABLE_STSS,
-            mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb * 4,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-        CLEANUPonERR(M4MP4W_putBlock(VideoBlock5, sizeof(VideoBlock5),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
-    }
-#ifdef _M4MP4W_MOOV_FIRST
-    /*mdat*/
-
-    CLEANUPonERR(M4MP4W_putBE32(mdatSize, mMp4FileDataPtr->fileWriterFunctions,
-        fileWriterContext));
-    CLEANUPonERR(M4MP4W_putBlock(CommonBlock2, sizeof(CommonBlock2),
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-    /*write data, according to the interleave mode (default is not interleaved)*/
-    if (bInterleaveAV == M4OSA_FALSE)
-    {
-        if (bAudio)
-        {
-            for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk;
-                i++ )
-            {
-                CLEANUPonERR(
-                    M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[i],
-                    mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i],
-                    mMp4FileDataPtr->fileWriterFunctions,
-                    fileWriterContext)); /*audio (previously a_dataSize)*/
-            }
-        }
-
-        if (bVideo)
-        {
-            for ( i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk;
-                i++ )
-            {
-                CLEANUPonERR(
-                    M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[i],
-                    mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i],
-                    mMp4FileDataPtr->fileWriterFunctions,
-                    fileWriterContext)); /*video (previously a_dataSize)*/
-            }
-        }
-    }
-    else /*in this mode, we have audio and video to interleave*/
-    {
-        for ( i = 0; i <= max(mMp4FileDataPtr->audioTrackPtr->currentChunk,
-            mMp4FileDataPtr->videoTrackPtr->currentChunk); i++ )
-        {
-            if (i <= mMp4FileDataPtr->audioTrackPtr->currentChunk)
-            {
-                CLEANUPonERR(
-                    M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[i],
-                    mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i],
-                    mMp4FileDataPtr->fileWriterFunctions,
-                    fileWriterContext)); /*audio (previously a_dataSize)*/
-            }
-
-            if (i <= mMp4FileDataPtr->videoTrackPtr->currentChunk)
-            {
-                CLEANUPonERR(
-                    M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[i],
-                    mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i],
-                    mMp4FileDataPtr->fileWriterFunctions,
-                    fileWriterContext)); /*video (previously a_dataSize)*/
-            }
-        }
-    }
-
-#endif /*_M4MP4W_MOOV_FIRST*/
-
-    /*skip*/
-
-    CLEANUPonERR(M4MP4W_putBlock(BlockSignatureSkipHeader,
-        sizeof(BlockSignatureSkipHeader), mMp4FileDataPtr->fileWriterFunctions,
-        fileWriterContext));
-
-    /* Write embedded string */
-    if (mMp4FileDataPtr->embeddedString == M4OSA_NULL)
-    {
-        CLEANUPonERR(M4MP4W_putBlock(BlockSignatureSkipDefaultEmbeddedString,
-            sizeof(BlockSignatureSkipDefaultEmbeddedString),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    }
-    else
-    {
-        CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->embeddedString, 16,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    }
-
-    /* Write ves core version */
-    camcoder_maj = (M4OSA_UChar)(mMp4FileDataPtr->camcoderVersion / 100);
-    camcoder_min =
-        (M4OSA_UChar)(( mMp4FileDataPtr->camcoderVersion - 100 * camcoder_maj)
-        / 10);
-    camcoder_rev =
-        (M4OSA_UChar)(mMp4FileDataPtr->camcoderVersion - 100 * camcoder_maj - 10
-        * camcoder_min);
-
-    CLEANUPonERR(M4MP4W_putByte(' ', mMp4FileDataPtr->fileWriterFunctions,
-        fileWriterContext));
-    CLEANUPonERR(M4MP4W_putByte((M4OSA_UChar)(camcoder_maj + '0'),
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    CLEANUPonERR(M4MP4W_putByte('.', mMp4FileDataPtr->fileWriterFunctions,
-        fileWriterContext));
-    CLEANUPonERR(M4MP4W_putByte((M4OSA_UChar)(camcoder_min + '0'),
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    CLEANUPonERR(M4MP4W_putByte('.', mMp4FileDataPtr->fileWriterFunctions,
-        fileWriterContext));
-    CLEANUPonERR(M4MP4W_putByte((M4OSA_UChar)(camcoder_rev + '0'),
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-    /* Write integration tag */
-    CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar *)" -- ", 4,
-        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-
-    if (mMp4FileDataPtr->integrationTag == M4OSA_NULL)
-    {
-        CLEANUPonERR(M4MP4W_putBlock(BlockSignatureSkipDefaultIntegrationTag,
-            sizeof(BlockSignatureSkipDefaultIntegrationTag),
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    }
-    else
-    {
-        CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->integrationTag, 60,
-            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
-    }
-
-#ifndef _M4MP4W_MOOV_FIRST
-    /*overwrite mdat size*/
-
-    if (mMp4FileDataPtr->ftyp.major_brand != 0)
-        mdatPos= 16 + mMp4FileDataPtr->ftyp.nbCompatibleBrands * 4;
-    else
-        mdatPos = 24;
-
-    moovPos = moovPos - mdatPos;
-    mdatSize = moovPos;
-
-    CLEANUPonERR(mMp4FileDataPtr->fileWriterFunctions->seek(fileWriterContext,
-        M4OSA_kFileSeekBeginning, &mdatPos)); /*seek after ftyp...*/
-    CLEANUPonERR(M4MP4W_putBE32(mdatSize, mMp4FileDataPtr->fileWriterFunctions,
-        fileWriterContext));
-
-#endif                                        /*_M4MP4W_MOOV_FIRST*/
-
-cleanup:
-
-    /**
-    * Close the file even if an error occured */
-    if (M4OSA_NULL != mMp4FileDataPtr->fileWriterContext)
-    {
-        err2 =
-            mMp4FileDataPtr->fileWriterFunctions->closeWrite(mMp4FileDataPtr->
-            fileWriterContext); /**< close the stream anyway */
-
-        if (M4NO_ERROR != err2)
-        {
-            M4OSA_TRACE1_1(
-                "M4MP4W_closeWrite: fileWriterFunctions->closeWrite returns 0x%x",
-                err2);
-        }
-        mMp4FileDataPtr->fileWriterContext = M4OSA_NULL;
-    }
-
-#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
-    /* Remove safety file if still present (here it is cleanup in case of error and NOT the normal
-    removal of the safety file to free emergency space for the moov). */
-
-    if (M4OSA_TRUE == mMp4FileDataPtr->cleanSafetyFile)
-    {
-        M4OSA_Context tempContext;
-        err3 = mMp4FileDataPtr->fileWriterFunctions->openWrite(&tempContext,
-            mMp4FileDataPtr->safetyFileUrl,
-            M4OSA_kFileWrite | M4OSA_kFileCreate);
-
-        if (M4NO_ERROR != err2)
-            err2 = err3;
-
-        if (M4NO_ERROR
-            != err3) /* No sense closing if we couldn't open in the first place. */
-        {
-            err3 =
-                mMp4FileDataPtr->fileWriterFunctions->closeWrite(tempContext);
-
-            if (M4NO_ERROR != err2)
-                err2 = err3;
-        }
-        mMp4FileDataPtr->safetyFileUrl = M4OSA_NULL;
-        mMp4FileDataPtr->cleanSafetyFile = M4OSA_FALSE;
-    }
-
-#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
-
-    /* Delete embedded string */
-
-    if (M4OSA_NULL != mMp4FileDataPtr->embeddedString)
-    {
-        free(mMp4FileDataPtr->embeddedString);
-        mMp4FileDataPtr->embeddedString = M4OSA_NULL;
-    }
-
-    /* Delete integration tag */
-    if (M4OSA_NULL != mMp4FileDataPtr->integrationTag)
-    {
-        free(mMp4FileDataPtr->integrationTag);
-        mMp4FileDataPtr->integrationTag = M4OSA_NULL;
-    }
-
-    /**
-    * M4MP4W_freeContext() is now a private method, called only from here*/
-    err3 = M4MP4W_freeContext(context);
-
-    if (M4NO_ERROR != err3)
-    {
-        M4OSA_TRACE1_1("M4MP4W_closeWrite: M4MP4W_freeContext returns 0x%x",
-            err3);
-    }
-
-    /**
-    * Choose which error code to return */
-    if (M4NO_ERROR != err)
-    {
-        /**
-        * We give priority to main error */
-        M4OSA_TRACE1_1("M4MP4W_closeWrite: returning err=0x%x", err);
-        return err;
-    }
-    else if (M4NO_ERROR != err2)
-    {
-        /**
-        * Error from closeWrite is returned if there is no main error */
-        M4OSA_TRACE1_1("M4MP4W_closeWrite: returning err2=0x%x", err2);
-        return err2;
-    }
-    else
-    {
-        /**
-        * Error from M4MP4W_freeContext is returned only if there is no main error and
-          no close error */
-        M4OSA_TRACE1_1("M4MP4W_closeWrite: returning err3=0x%x", err3);
-        return err3;
-    }
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_getOption( M4OSA_Context context, M4OSA_OptionID option,
-                           M4OSA_DataOption *valuePtr )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4SYS_StreamIDValue *streamIDvaluePtr = M4OSA_NULL;
-    M4MP4W_StreamIDsize *streamIDsizePtr = M4OSA_NULL;
-    M4MP4W_memAddr *memAddrPtr = M4OSA_NULL;
-    /*    M4MP4W_WriteCallBack*    callBackPtr = M4OSA_NULL;*/
-
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
-
-    ERR_CHECK(( mMp4FileDataPtr->state == M4MP4W_opened)
-        || (mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
-
-    switch( option )
-    {
-        case (M4MP4W_maxAUperChunk):
-            return M4ERR_NOT_IMPLEMENTED;
-
-        case (M4MP4W_maxChunkSize):
-
-            streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
-
-            switch( streamIDvaluePtr->streamID )
-            {
-                case (AudioStreamID):
-                    if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
-                        return M4ERR_BAD_STREAM_ID;
-                    else
-                        streamIDvaluePtr->value =
-                        mMp4FileDataPtr->audioTrackPtr->MaxChunkSize;
-                    break;
-
-                case (VideoStreamID):
-                    if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
-                        return M4ERR_BAD_STREAM_ID;
-                    else
-                        streamIDvaluePtr->value =
-                        mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
-                    break;
-
-                case (0): /*all streams*/
-                    streamIDvaluePtr->value = mMp4FileDataPtr->MaxChunkSize;
-                    break;
-
-                default:
-                    return M4ERR_BAD_STREAM_ID;
-        }
-
-        break;
-
-    case (M4MP4W_maxChunkInter):
-
-        streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
-
-        switch( streamIDvaluePtr->streamID )
-        {
-            case (0): /*all streams*/
-                streamIDvaluePtr->value = (M4OSA_UInt32)mMp4FileDataPtr->
-                    InterleaveDur; /*time conversion !*/
-                break;
-
-            default:
-                return M4ERR_BAD_STREAM_ID;
-        }
-        break;
-
-    case (M4MP4W_embeddedString):
-        memAddrPtr = (M4MP4W_memAddr *)(*valuePtr);
-        /*memAddrPtr must have been already allocated by the caller
-        and memAddrPtr->size initialized with the max possible length in bytes*/
-        ERR_CHECK(memAddrPtr->size >= 16, M4ERR_PARAMETER);
-        ERR_CHECK(memAddrPtr->addr != M4OSA_NULL, M4ERR_PARAMETER);
-        /*memAddrPtr->size is updated with the actual size of the string*/
-        memAddrPtr->size = 16;
-        /*if no value was set, return the default string */
-        if (mMp4FileDataPtr->embeddedString != M4OSA_NULL)
-            memcpy((void *)memAddrPtr->addr,
-            (void *)mMp4FileDataPtr->embeddedString, 16);
-        else
-            memcpy((void *)memAddrPtr->addr,
-            (void *)BlockSignatureSkipDefaultEmbeddedString,
-            16);
-        break;
-
-    case (M4MP4W_integrationTag):
-        memAddrPtr = (M4MP4W_memAddr *)(*valuePtr);
-        /*memAddrPtr must have been already allocated by the caller
-        and memAddrPtr->size initialized with the max possible length in bytes*/
-        ERR_CHECK(memAddrPtr->size >= 60, M4ERR_PARAMETER);
-        ERR_CHECK(memAddrPtr->addr != M4OSA_NULL, M4ERR_PARAMETER);
-        /*memAddrPtr->size is updated with the actual size of the string*/
-        memAddrPtr->size = 60;
-        /*if no value was set, return the default string 0 */
-        if (mMp4FileDataPtr->integrationTag != M4OSA_NULL)
-            memcpy((void *)memAddrPtr->addr,
-            (void *)mMp4FileDataPtr->integrationTag, 60);
-        else
-            memcpy((void *)memAddrPtr->addr,
-            (void *)BlockSignatureSkipDefaultIntegrationTag,
-            60);
-        break;
-
-    case (M4MP4W_CamcoderVersion):
-
-        streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
-
-        switch( streamIDvaluePtr->streamID )
-        {
-            case (0): /*all streams*/
-                streamIDvaluePtr->value = mMp4FileDataPtr->camcoderVersion;
-                break;
-
-            default:
-                return M4ERR_BAD_STREAM_ID;
-        }
-        break;
-
-    case (M4MP4W_preWriteCallBack):
-        return M4ERR_NOT_IMPLEMENTED;
-        /*callBackPtr = (M4MP4W_WriteCallBack*)(*valuePtr);
-        *callBackPtr = mMp4FileDataPtr->PreWriteCallBack;
-        break;*/
-
-    case (M4MP4W_postWriteCallBack):
-        return M4ERR_NOT_IMPLEMENTED;
-        /*callBackPtr = (M4MP4W_WriteCallBack*)(*valuePtr);
-        *callBackPtr = mMp4FileDataPtr->PostWriteCallBack;
-        break;*/
-
-    case (M4MP4W_maxAUsize):
-
-        streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
-
-        switch( streamIDvaluePtr->streamID )
-        {
-            case (AudioStreamID):
-                if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
-                    return M4ERR_BAD_STREAM_ID;
-                else
-                    streamIDvaluePtr->value =
-                    mMp4FileDataPtr->audioTrackPtr->MaxAUSize;
-                break;
-
-            case (VideoStreamID):
-                if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
-                    return M4ERR_BAD_STREAM_ID;
-                else
-                    streamIDvaluePtr->value =
-                    mMp4FileDataPtr->videoTrackPtr->MaxAUSize;
-                break;
-
-            case (0): /*all streams*/
-                streamIDvaluePtr->value = mMp4FileDataPtr->MaxAUSize;
-                break;
-
-            default:
-                return M4ERR_BAD_STREAM_ID;
-        }
-
-        break;
-
-    case (M4MP4W_IOD):
-        return M4ERR_NOT_IMPLEMENTED;
-
-    case (M4MP4W_ESD):
-        return M4ERR_NOT_IMPLEMENTED;
-
-    case (M4MP4W_SDP):
-        return M4ERR_NOT_IMPLEMENTED;
-
-    case (M4MP4W_trackSize):
-        streamIDsizePtr = (M4MP4W_StreamIDsize *)(*valuePtr);
-        streamIDsizePtr->width = mMp4FileDataPtr->videoTrackPtr->width;
-        streamIDsizePtr->height = mMp4FileDataPtr->videoTrackPtr->height;
-        break;
-
-    case (M4MP4W_estimateAudioSize):
-        streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
-        streamIDvaluePtr->value =
-            (M4OSA_UInt32)mMp4FileDataPtr->estimateAudioSize;
-        break;
-
-    case (M4MP4W_MOOVfirst):
-        return M4ERR_NOT_IMPLEMENTED;
-
-    case (M4MP4W_V2_MOOF):
-        return M4ERR_NOT_IMPLEMENTED;
-
-    case (M4MP4W_V2_tblCompres):
-        return M4ERR_NOT_IMPLEMENTED;
-
-    default:
-        return M4ERR_BAD_OPTION_ID;
-    }
-
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_setOption( M4OSA_Context context, M4OSA_OptionID option,
-                           M4OSA_DataOption value )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4SYS_StreamIDValue *streamIDvaluePtr = M4OSA_NULL;
-    M4MP4W_StreamIDsize *streamIDsizePtr = M4OSA_NULL;
-    M4MP4W_memAddr *memAddrPtr = M4OSA_NULL;
-    M4SYS_StreamIDmemAddr *streamIDmemAddrPtr;
-
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
-
-    /* Verify state */
-    switch( option )
-    {
-        case M4MP4W_maxFileDuration:
-        case M4MP4W_DSI:
-            /* this param can be set at the end of a recording */
-            ERR_CHECK((mMp4FileDataPtr->state != M4MP4W_closed), M4ERR_STATE);
-            break;
-
-        case M4MP4W_setFtypBox:
-            /* this param can only be set before starting any write */
-            ERR_CHECK(mMp4FileDataPtr->state == M4MP4W_opened, M4ERR_STATE);
-            break;
-
-        default:
-            /* in general params can be set at open or ready stage */
-            ERR_CHECK(( mMp4FileDataPtr->state == M4MP4W_opened)
-                || (mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
-    }
-
-    /* Set option */
-    switch( option )
-    {
-        case (M4MP4W_maxAUperChunk):
-            return M4ERR_NOT_IMPLEMENTED;
-
-        case (M4MP4W_maxChunkSize):
-
-            streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
-
-            switch( streamIDvaluePtr->streamID )
-            {
-                case (AudioStreamID):
-                    if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
-                        return
-                        M4ERR_BAD_STREAM_ID; /*maybe the stream has not been added yet*/
-                    else
-                    {
-                        mMp4FileDataPtr->audioTrackPtr->MaxChunkSize =
-                            streamIDvaluePtr->value;
-                    }
-
-                    break;
-
-                case (VideoStreamID):
-                    if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
-                        return
-                        M4ERR_BAD_STREAM_ID; /*maybe the stream has not been added yet*/
-                    else
-                    {
-                        mMp4FileDataPtr->videoTrackPtr->MaxChunkSize =
-                            streamIDvaluePtr->value;
-                    }
-                    break;
-
-                case (0): /*all streams*/
-
-                    /*In M4MP4W_opened state, no stream is present yet, so only global value
-                    needs to be updated.*/
-                    mMp4FileDataPtr->MaxChunkSize = streamIDvaluePtr->value;
-
-                    if (mMp4FileDataPtr->hasAudio == M4OSA_TRUE)
-                    {
-                        mMp4FileDataPtr->audioTrackPtr->MaxChunkSize =
-                            streamIDvaluePtr->value;
-                    }
-
-                    if (mMp4FileDataPtr->hasVideo == M4OSA_TRUE)
-                    {
-                        mMp4FileDataPtr->videoTrackPtr->MaxChunkSize =
-                            streamIDvaluePtr->value;
-                    }
-                    break;
-
-                default:
-                    return M4ERR_BAD_STREAM_ID;
-            }
-            break;
-
-        case (M4MP4W_maxChunkInter):
-
-            streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
-
-            switch( streamIDvaluePtr->streamID )
-            {
-                case (0):                                       /*all streams*/
-                    mMp4FileDataPtr->InterleaveDur =
-                        (M4MP4W_Time32)streamIDvaluePtr->
-                        value; /*time conversion!*/
-                    break;
-
-                default:
-                    return M4ERR_BAD_STREAM_ID;
-                    /*not meaningfull to set this parameter on a streamID basis*/
-            }
-            break;
-
-        case (M4MP4W_maxFileSize):
-            mMp4FileDataPtr->MaxFileSize = *(M4OSA_UInt32 *)value;
-            break;
-
-        case (M4MP4W_embeddedString):
-            memAddrPtr = (M4MP4W_memAddr *)value;
-            /*
-            * If memAddrPtr->size > 16 bytes, then the string will be truncated.
-            * If memAddrPtr->size < 16 bytes, then return M4ERR_PARAMETER
-            */
-            ERR_CHECK(memAddrPtr->size >= 16, M4ERR_PARAMETER);
-
-            if (mMp4FileDataPtr->embeddedString == M4OSA_NULL)
-            {
-                mMp4FileDataPtr->embeddedString =
-                    (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(16, M4MP4_WRITER,
-                    (M4OSA_Char *)"embeddedString");
-                ERR_CHECK(mMp4FileDataPtr->embeddedString != M4OSA_NULL,
-                    M4ERR_ALLOC);
-            }
-            /*else, just overwrite the previously set string*/
-            memcpy((void *)mMp4FileDataPtr->embeddedString,
-                (void *)memAddrPtr->addr, 16);
-            break;
-
-        case (M4MP4W_integrationTag):
-            memAddrPtr = (M4MP4W_memAddr *)value;
-            /*
-            * If memAddrPtr->size > 60 bytes, then the string will be truncated.
-            * If memAddrPtr->size < 60 bytes, then pad with 0
-            */
-            if (mMp4FileDataPtr->integrationTag == M4OSA_NULL)
-            {
-                mMp4FileDataPtr->integrationTag =
-                    (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(60, M4MP4_WRITER,
-                    (M4OSA_Char *)"integrationTag");
-                ERR_CHECK(mMp4FileDataPtr->integrationTag != M4OSA_NULL,
-                    M4ERR_ALLOC);
-            }
-            /*else, just overwrite the previously set string*/
-            if (memAddrPtr->size < 60)
-            {
-                memcpy((void *)mMp4FileDataPtr->integrationTag,
-                    (void *)BlockSignatureSkipDefaultIntegrationTag,
-                    60);
-                memcpy((void *)mMp4FileDataPtr->integrationTag,
-                    (void *)memAddrPtr->addr, memAddrPtr->size);
-            }
-            else
-            {
-                memcpy((void *)mMp4FileDataPtr->integrationTag,
-                    (void *)memAddrPtr->addr, 60);
-            }
-            break;
-
-        case (M4MP4W_CamcoderVersion):
-
-            streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
-
-            switch( streamIDvaluePtr->streamID )
-            {
-                case (0): /*all streams*/
-                    mMp4FileDataPtr->camcoderVersion = streamIDvaluePtr->value;
-                    break;
-
-                default:
-                    return M4ERR_BAD_STREAM_ID;
-                    /*not meaningfull to set this parameter on a streamID basis*/
-            }
-            break;
-
-        case (M4MP4W_preWriteCallBack):
-            return M4ERR_NOT_IMPLEMENTED;
-            /*mMp4FileDataPtr->PreWriteCallBack = *(M4MP4W_WriteCallBack*)value;
-            break;*/
-
-        case (M4MP4W_postWriteCallBack):
-            return M4ERR_NOT_IMPLEMENTED;
-            /*mMp4FileDataPtr->PostWriteCallBack = *(M4MP4W_WriteCallBack*)value;
-            break;*/
-
-        case (M4MP4W_maxAUsize):
-
-            streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
-
-            switch( streamIDvaluePtr->streamID )
-            {
-                case (AudioStreamID):
-
-                    /*if (mMp4FileDataPtr->audioTrackPtr == M4OSA_NULL)*/
-                    if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
-                        return M4ERR_BAD_STREAM_ID;
-                    else
-                        mMp4FileDataPtr->audioTrackPtr->MaxAUSize =
-                        streamIDvaluePtr->value;
-                    break;
-
-                case (VideoStreamID):
-
-                    /*if (mMp4FileDataPtr->videoTrackPtr == M4OSA_NULL)*/
-                    if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
-                        return M4ERR_BAD_STREAM_ID;
-                    else
-                        mMp4FileDataPtr->videoTrackPtr->MaxAUSize =
-                        streamIDvaluePtr->value;
-                    break;
-
-                case (0): /*all streams*/
-
-                    mMp4FileDataPtr->MaxAUSize = streamIDvaluePtr->value;
-
-                    if (mMp4FileDataPtr->hasAudio == M4OSA_TRUE)
-                        mMp4FileDataPtr->audioTrackPtr->MaxAUSize =
-                        streamIDvaluePtr->value;
-
-                    if (mMp4FileDataPtr->hasVideo == M4OSA_TRUE)
-                        mMp4FileDataPtr->videoTrackPtr->MaxAUSize =
-                        streamIDvaluePtr->value;
-
-                    break;
-
-                default:
-                    return M4ERR_BAD_STREAM_ID;
-            }
-            break;
-
-        case (M4MP4W_IOD):
-            return M4ERR_NOT_IMPLEMENTED;
-
-        case (M4MP4W_ESD):
-            return M4ERR_NOT_IMPLEMENTED;
-
-        case (M4MP4W_SDP):
-            return M4ERR_NOT_IMPLEMENTED;
-
-        case (M4MP4W_trackSize):
-
-            streamIDsizePtr = (M4MP4W_StreamIDsize *)value;
-
-            if ((streamIDsizePtr->streamID != VideoStreamID)
-                || (mMp4FileDataPtr->hasVideo == M4OSA_FALSE))
-                return M4ERR_BAD_STREAM_ID;
-            else
-            {
-                mMp4FileDataPtr->videoTrackPtr->width = streamIDsizePtr->width;
-                mMp4FileDataPtr->videoTrackPtr->height =
-                    streamIDsizePtr->height;
-            }
-            break;
-
-        case (M4MP4W_estimateAudioSize):
-
-            streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
-
-            /*shall not set this option before audio and video streams were added*/
-            /*nonsense to set this option if not in case audio+video*/
-            if ((mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
-                || (mMp4FileDataPtr->hasVideo == M4OSA_FALSE))
-                return M4ERR_STATE;
-
-            mMp4FileDataPtr->estimateAudioSize =
-                (M4OSA_Bool)streamIDvaluePtr->value;
-            break;
-
-        case (M4MP4W_MOOVfirst):
-            return M4ERR_NOT_IMPLEMENTED;
-
-        case (M4MP4W_V2_MOOF):
-            return M4ERR_NOT_IMPLEMENTED;
-
-        case (M4MP4W_V2_tblCompres):
-            return M4ERR_NOT_IMPLEMENTED;
-
-        case (M4MP4W_maxFileDuration):
-            mMp4FileDataPtr->MaxFileDuration = *(M4OSA_UInt32 *)value;
-            break;
-
-        case (M4MP4W_setFtypBox):
-            {
-                M4OSA_UInt32 size;
-
-                ERR_CHECK(( (M4MP4C_FtypBox *)value)->major_brand != 0,
-                    M4ERR_PARAMETER);
-
-                /* Copy structure */
-                mMp4FileDataPtr->ftyp = *(M4MP4C_FtypBox *)value;
-
-                /* Update global position variables with the difference between common and
-                 user block */
-                size =
-                    mMp4FileDataPtr->ftyp.nbCompatibleBrands * sizeof(M4OSA_UInt32);
-
-                mMp4FileDataPtr->absoluteCurrentPos = 8/*mdat*/ + 16 + size;
-                mMp4FileDataPtr->filesize = 218/*mdat+moov+skip*/ + 16 + size;
-            }
-            break;
-
-        case (M4MP4W_DSI):
-            {
-                streamIDmemAddrPtr = (M4SYS_StreamIDmemAddr *)value;
-
-                /* Nested switch! Whee! */
-                switch( streamIDmemAddrPtr->streamID )
-                {
-                    case (AudioStreamID):
-                        return M4ERR_NOT_IMPLEMENTED;
-
-                    case (VideoStreamID):
-
-                        /* Protect DSI setting : only once allowed on a given stream */
-
-                        switch( mMp4FileDataPtr->videoTrackPtr->
-                            CommonData.trackType )
-                        {
-                            case M4SYS_kH263:
-                                if ((0 != mMp4FileDataPtr->videoTrackPtr->dsiSize)
-                                    || (M4OSA_NULL
-                                    != mMp4FileDataPtr->videoTrackPtr->DSI))
-                                {
-                                    M4OSA_TRACE1_0(
-                                        "M4MP4W_setOption: dsi already set !");
-                                    return M4ERR_STATE;
-                                }
-
-                                if ((0 == streamIDmemAddrPtr->size)
-                                    || (M4OSA_NULL == streamIDmemAddrPtr->addr))
-                                {
-                                    M4OSA_TRACE1_0(
-                                        "M4MP4W_setOption: Bad H263 dsi!");
-                                    return M4ERR_PARAMETER;
-                                }
-
-                                /*decoder specific info size is supposed to be always 7
-                                 bytes long */
-                                ERR_CHECK(streamIDmemAddrPtr->size == 7,
-                                    M4ERR_PARAMETER);
-                                mMp4FileDataPtr->videoTrackPtr->dsiSize =
-                                    (M4OSA_UInt8)streamIDmemAddrPtr->size;
-                                mMp4FileDataPtr->videoTrackPtr->DSI = (M4OSA_UChar
-                                    *)M4OSA_32bitAlignedMalloc(streamIDmemAddrPtr->size,
-                                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
-                                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
-                                    != M4OSA_NULL, M4ERR_ALLOC);
-                                memcpy(
-                                    (void *)mMp4FileDataPtr->videoTrackPtr->
-                                    DSI,
-                                    (void *)streamIDmemAddrPtr->addr,
-                                    streamIDmemAddrPtr->size);
-
-                                break;
-
-                            case M4SYS_kMPEG_4:
-                                if ((0 != mMp4FileDataPtr->videoTrackPtr->dsiSize)
-                                    || (M4OSA_NULL
-                                    != mMp4FileDataPtr->videoTrackPtr->DSI))
-                                {
-                                    M4OSA_TRACE1_0(
-                                        "M4MP4W_setOption: dsi already set !");
-                                    return M4ERR_STATE;
-                                }
-
-                                if ((0 == streamIDmemAddrPtr->size)
-                                    || (M4OSA_NULL == streamIDmemAddrPtr->addr))
-                                {
-                                    M4OSA_TRACE1_0(
-                                        "M4MP4W_setOption: Bad MPEG4 dsi!");
-                                    return M4ERR_PARAMETER;
-                                }
-
-                                /*MP4V specific*/
-                                ERR_CHECK(streamIDmemAddrPtr->size < 105,
-                                    M4ERR_PARAMETER);
-                                mMp4FileDataPtr->videoTrackPtr->dsiSize =
-                                    (M4OSA_UInt8)streamIDmemAddrPtr->size;
-                                mMp4FileDataPtr->videoTrackPtr->DSI = (M4OSA_UChar
-                                    *)M4OSA_32bitAlignedMalloc(streamIDmemAddrPtr->size,
-                                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
-                                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
-                                    != M4OSA_NULL, M4ERR_ALLOC);
-                                memcpy(
-                                    (void *)mMp4FileDataPtr->videoTrackPtr->
-                                    DSI,
-                                    (void *)streamIDmemAddrPtr->addr,
-                                    streamIDmemAddrPtr->size);
-                                mMp4FileDataPtr->filesize +=
-                                    streamIDmemAddrPtr->size;
-
-                                break;
-
-                            case M4SYS_kH264:
-                                if ((0 != mMp4FileDataPtr->videoTrackPtr->dsiSize)
-                                    || (M4OSA_NULL
-                                    != mMp4FileDataPtr->videoTrackPtr->DSI))
-                                {
-                                    /* + H.264 trimming */
-                                    if (M4OSA_TRUE == mMp4FileDataPtr->bMULPPSSPS)
-                                    {
-                                        free(mMp4FileDataPtr->videoTrackPtr->DSI);
-
-                                        // Do not strip the DSI
-                                        /* Store the DSI size */
-                                        mMp4FileDataPtr->videoTrackPtr->dsiSize =
-                                            (M4OSA_UInt8)streamIDmemAddrPtr->size;
-                                             M4OSA_TRACE1_1("M4MP4W_setOption: in set option DSI size =%d"\
-                                            ,mMp4FileDataPtr->videoTrackPtr->dsiSize);
-                                        /* Copy the DSI (SPS + PPS) */
-                                        mMp4FileDataPtr->videoTrackPtr->DSI =
-                                            (M4OSA_UChar*)M4OSA_32bitAlignedMalloc(
-                                            streamIDmemAddrPtr->size, M4MP4_WRITER,
-                                            (M4OSA_Char *)"videoTrackPtr->DSI");
-                                        ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI !=
-                                             M4OSA_NULL, M4ERR_ALLOC);
-                                        memcpy(
-                                            (void *)mMp4FileDataPtr->videoTrackPtr->DSI,
-                                            (void *)streamIDmemAddrPtr->addr,
-                                            streamIDmemAddrPtr->size);
-
-                                        break;
-                                        /* - H.264 trimming */
-                                    }
-                                    else
-                                    {
-                                        M4OSA_TRACE1_0(
-                                            "M4MP4W_setOption: dsi already set !");
-                                        return M4ERR_STATE;
-                                    }
-                                }
-
-                                if (( 0 == streamIDmemAddrPtr->size)
-                                    || (M4OSA_NULL == streamIDmemAddrPtr->addr))
-                                {
-                                    M4OSA_TRACE1_0(
-                                        "M4MP4W_setOption: Bad H264 dsi!");
-                                    return M4ERR_PARAMETER;
-                                }
-
-                                /* Store the DSI size */
-                                mMp4FileDataPtr->videoTrackPtr->dsiSize =
-                                    (M4OSA_UInt8)streamIDmemAddrPtr->size;
-
-                                /* Copy the DSI (SPS + PPS) */
-                                mMp4FileDataPtr->videoTrackPtr->DSI = (M4OSA_UChar
-                                    *)M4OSA_32bitAlignedMalloc(streamIDmemAddrPtr->size,
-                                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
-                                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
-                                    != M4OSA_NULL, M4ERR_ALLOC);
-                                memcpy(
-                                    (void *)mMp4FileDataPtr->videoTrackPtr->
-                                    DSI,
-                                    (void *)streamIDmemAddrPtr->addr,
-                                    streamIDmemAddrPtr->size);
-                                break;
-
-                            default:
-                                return M4ERR_BAD_STREAM_ID;
-                        }
-                    break;
-
-                default:
-                    return M4ERR_BAD_STREAM_ID;
-                }
-            }
-            break;
-            /* H.264 Trimming  */
-        case M4MP4W_MUL_PPS_SPS:
-            mMp4FileDataPtr->bMULPPSSPS = *(M4OSA_Int8 *)value;
-            /* H.264 Trimming  */
-            break;
-
-        default:
-            return M4ERR_BAD_OPTION_ID;
-    }
-
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_getState( M4OSA_Context context, M4MP4W_State *state,
-                          M4SYS_StreamID streamID )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
-
-    switch( streamID )
-    {
-        case (0):
-            *state = mMp4FileDataPtr->state;
-            break;
-
-        case (AudioStreamID):
-            if (mMp4FileDataPtr->hasAudio == M4OSA_TRUE)
-            {
-                *state = mMp4FileDataPtr->audioTrackPtr->microState;
-            }
-            else
-            {
-                return M4ERR_BAD_STREAM_ID;
-            }
-            break;
-
-        case (VideoStreamID):
-            if (mMp4FileDataPtr->hasVideo == M4OSA_TRUE)
-            {
-                *state = mMp4FileDataPtr->videoTrackPtr->microState;
-            }
-            else
-            {
-                return M4ERR_BAD_STREAM_ID;
-            }
-            break;
-
-        default:
-            return M4ERR_BAD_STREAM_ID;
-    }
-
-    return err;
-}
-
-/*******************************************************************************/
-M4OSA_ERR M4MP4W_getCurrentFileSize( M4OSA_Context context,
-                                    M4OSA_UInt32 *pCurrentFileSize )
-/*******************************************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
-    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
-
-    ERR_CHECK(pCurrentFileSize != M4OSA_NULL, M4ERR_PARAMETER);
-    *pCurrentFileSize = mMp4FileDataPtr->filesize;
-
-    return err;
-}
-
-#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
diff --git a/libvideoeditor/vss/3gpwriter/src/MODULE_LICENSE_APACHE2 b/libvideoeditor/vss/3gpwriter/src/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/libvideoeditor/vss/3gpwriter/src/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/libvideoeditor/vss/3gpwriter/src/NOTICE b/libvideoeditor/vss/3gpwriter/src/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/libvideoeditor/vss/3gpwriter/src/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
-   Copyright (c) 2005-2008, The Android Open Source Project
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
diff --git a/libvideoeditor/vss/Android.mk b/libvideoeditor/vss/Android.mk
deleted file mode 100755
index 1d4ec7f..0000000
--- a/libvideoeditor/vss/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
\ No newline at end of file
diff --git a/libvideoeditor/vss/common/inc/From2iToMono_16.h b/libvideoeditor/vss/common/inc/From2iToMono_16.h
deleted file mode 100755
index c6ffb3f..0000000
--- a/libvideoeditor/vss/common/inc/From2iToMono_16.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _FROM2ITOMONO_16_H_
-#define _FROM2ITOMONO_16_H_
-
-
-void From2iToMono_16(  const short *src,
-                             short *dst,
-                             short n);
-
-/**********************************************************************************/
-
-#endif  /* _FROM2ITOMONO_16_H_ */
-
-/**********************************************************************************/
-
diff --git a/libvideoeditor/vss/common/inc/LVM_Types.h b/libvideoeditor/vss/common/inc/LVM_Types.h
deleted file mode 100755
index a28974d..0000000
--- a/libvideoeditor/vss/common/inc/LVM_Types.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/****************************************************************************************
- * @file name:          LVM_Types.h
-
-*****************************************************************************************/
-
-/****************************************************************************************/
-/*                                                                                      */
-/*  Header file defining the standard LifeVibes types for use in the application layer  */
-/*  interface of all LifeVibes modules                                                  */
-/*                                                                                      */
-/****************************************************************************************/
-
-#ifndef LVM_TYPES_H
-#define LVM_TYPES_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/****************************************************************************************/
-/*                                                                                      */
-/*  definitions                                                                         */
-/*                                                                                      */
-/****************************************************************************************/
-
-#define LVM_NULL                0                   /* NULL pointer */
-
-#define LVM_TRUE                1                   /* Booleans */
-#define LVM_FALSE               0
-
-#define LVM_MAXINT_8            127                 /* Maximum positive integer size */
-#define LVM_MAXINT_16           32767
-#define LVM_MAXINT_32           2147483647
-#define LVM_MAXENUM             2147483647
-
-#define LVM_MODULEID_MASK       0xFF00              /* Mask to extract the calling module ID
-                                                        from callbackId */
-#define LVM_EVENTID_MASK        0x00FF              /* Mask to extract the callback event from
-                                                         callbackId */
-
-/* Memory table*/
-#define LVM_MEMREGION_PERSISTENT_SLOW_DATA      0   /* Offset to the instance memory region */
-#define LVM_MEMREGION_PERSISTENT_FAST_DATA      1   /* Offset to the persistent data memory
-                                                        region */
-#define LVM_MEMREGION_PERSISTENT_FAST_COEF      2   /* Offset to the persistent coefficient
-                                                        memory region */
-#define LVM_MEMREGION_TEMPORARY_FAST            3   /* Offset to temporary memory region */
-
-
-/****************************************************************************************/
-/*                                                                                      */
-/*  Basic types                                                                         */
-/*                                                                                      */
-/****************************************************************************************/
-
-typedef     char                LVM_CHAR;           /* ASCII character */
-
-typedef     char                LVM_INT8;           /* Signed 8-bit word */
-typedef     unsigned char       LVM_UINT8;          /* Unsigned 8-bit word */
-
-typedef     short               LVM_INT16;          /* Signed 16-bit word */
-typedef     unsigned short      LVM_UINT16;         /* Unsigned 16-bit word */
-
-typedef     long                LVM_INT32;          /* Signed 32-bit word */
-typedef     unsigned long       LVM_UINT32;         /* Unsigned 32-bit word */
-
-
-/****************************************************************************************/
-/*                                                                                      */
-/*  Standard Enumerated types                                                           */
-/*                                                                                      */
-/****************************************************************************************/
-
-/* Operating mode */
-typedef enum
-{
-    LVM_MODE_OFF    = 0,
-    LVM_MODE_ON     = 1,
-    LVM_MODE_DUMMY  = LVM_MAXENUM
-} LVM_Mode_en;
-
-
-/* Format */
-typedef enum
-{
-    LVM_STEREO          = 0,
-    LVM_MONOINSTEREO    = 1,
-    LVM_MONO            = 2,
-    LVM_SOURCE_DUMMY    = LVM_MAXENUM
-} LVM_Format_en;
-
-
-/* Word length */
-typedef enum
-{
-    LVM_16_BIT      = 0,
-    LVM_32_BIT      = 1,
-    LVM_WORDLENGTH_DUMMY = LVM_MAXENUM
-} LVM_WordLength_en;
-
-
-/* LVM sampling rates */
-typedef enum
-{
-    LVM_FS_8000  = 0,
-    LVM_FS_11025 = 1,
-    LVM_FS_12000 = 2,
-    LVM_FS_16000 = 3,
-    LVM_FS_22050 = 4,
-    LVM_FS_24000 = 5,
-    LVM_FS_32000 = 6,
-    LVM_FS_44100 = 7,
-    LVM_FS_48000 = 8,
-    LVM_FS_INVALID = LVM_MAXENUM-1,
-    LVM_FS_DUMMY = LVM_MAXENUM
-} LVM_Fs_en;
-
-
-/* Memory Types */
-typedef enum
-{
-    LVM_PERSISTENT_SLOW_DATA    = LVM_MEMREGION_PERSISTENT_SLOW_DATA,
-    LVM_PERSISTENT_FAST_DATA    = LVM_MEMREGION_PERSISTENT_FAST_DATA,
-    LVM_PERSISTENT_FAST_COEF    = LVM_MEMREGION_PERSISTENT_FAST_COEF,
-    LVM_TEMPORARY_FAST          = LVM_MEMREGION_TEMPORARY_FAST,
-    LVM_MEMORYTYPE_DUMMY        = LVM_MAXENUM
-} LVM_MemoryTypes_en;
-
-
-/* Memory region definition */
-typedef struct
-{
-    LVM_UINT32                  Size;                   /* Region size in bytes */
-    LVM_MemoryTypes_en          Type;                   /* Region type */
-    void                        *pBaseAddress;          /* Pointer to the region base address */
-} LVM_MemoryRegion_st;
-
-
-/****************************************************************************************/
-/*                                                                                      */
-/*  Standard Function Prototypes                                                        */
-/*                                                                                      */
-/****************************************************************************************/
-typedef LVM_INT32 (*LVM_Callback)(void          *pCallbackData,     /* Pointer to the callback
-                                                                     data structure */
-                                  void          *pGeneralPurpose,   /* General purpose pointer
-                                                                    (e.g. to a data structure
-                                                                    needed in the callback) */
-                                  LVM_INT16     GeneralPurpose );   /* General purpose variable
-                                  (e.g. to be used as callback ID) */
-
-
-/****************************************************************************************/
-/*                                                                                      */
-/*  End of file                                                                         */
-/*                                                                                      */
-/****************************************************************************************/
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif  /* LVM_TYPES_H */
diff --git a/libvideoeditor/vss/common/inc/M4AD_Common.h b/libvideoeditor/vss/common/inc/M4AD_Common.h
deleted file mode 100755
index f6e596d..0000000
--- a/libvideoeditor/vss/common/inc/M4AD_Common.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @fil        M4AD_Common.h
- * @brief    Audio Shell Decoder common interface declaration
- * @note    This file declares the common interfaces that audio decoder shells must implement
- ************************************************************************
-*/
-#ifndef __M4AD_COMMON_H__
-#define __M4AD_COMMON_H__
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_OptionID.h"
-#include "M4OSA_CoreID.h"
-#include "M4DA_Types.h"
-#include "M4TOOL_VersionInfo.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-typedef M4OSA_Void* M4AD_Context;
-
-/**
- ************************************************************************
- * enum     M4AD_OptionID
- * @brief    This enum defines the Audio decoder options.
- * @note    These options can be read from or written to a decoder via
- *            M4AD_getOption_fct/M4AD_setOption_fct
- ************************************************************************
-*/
-typedef enum
-{
-    /**
-     * Set the flag of presence of protection */
-    M4AD_kOptionID_ProtectionAbsent = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4DECODER_AUDIO, 0x01),
-
-    /**
-     * Set the number of frames per bloc */
-    M4AD_kOptionID_NbFramePerBloc    = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4DECODER_AUDIO, 0x02),
-
-    /**
-     * Set the AAC decoder user parameters */
-    M4AD_kOptionID_UserParam        = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4DECODER_AUDIO, 0x03),
-
-
-    /**
-     * Get the AAC steam type */
-    M4AD_kOptionID_StreamType        = M4OSA_OPTION_ID_CREATE(M4_READ , M4DECODER_AUDIO, 0x10),
-
-    /**
-     * Get the number of used bytes in the latest decode
-     (used only when decoding AAC from ADIF file) */
-    M4AD_kOptionID_UsedBytes        = M4OSA_OPTION_ID_CREATE(M4_READ , M4DECODER_AUDIO, 0x11),
-
-    /* Reader Interface */
-    M4AD_kOptionID_3gpReaderInterface = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x012),
-
-    /* Audio Access Unit */
-    M4AD_kOptionID_AudioAU = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x13),
-
-    /* Reader error code */
-    M4AD_kOptionID_GetAudioAUErrCode = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x14),
-
-    /* Number of channels */
-    M4AD_kOptionID_AudioNbChannels = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x15),
-
-    /* Sampling frequency */
-    M4AD_kOptionID_AudioSampFrequency = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x16),
-
-    /* Audio AU CTS */
-    M4AD_kOptionID_AuCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x17)
-
-} M4AD_OptionID;
-
-
-
-typedef enum
-{
-    M4_kUnknown = 0,    /* Unknown stream type */
-    M4_kAAC,            /* M4_kAAC_MAIN or M4_kAAC_LC or M4_kAAC_SSR or M4_kAAC_LTP    */
-    M4_kAACplus,        /* Decoder type is AAC plus */
-    M4_keAACplus        /* Decoder type is enhanced AAC plus */
-} M4_AACType;
-
-/**
- ************************************************************************
- * enum     M4AD_Type
- * @brief    This enum defines the audio types used to create decoders
- * @note    This enum is used internally by the VPS to identify a currently supported
- *            audio decoder interface. Each decoder is registered with one of this type associated.
- *            When a decoder instance is needed, this type is used to identify
- *            and retrieve its interface.
- ************************************************************************
-*/
-typedef enum
-{
-    M4AD_kTypeAMRNB = 0,
-    M4AD_kTypeAMRWB,
-    M4AD_kTypeAAC,
-    M4AD_kTypeMP3,
-    M4AD_kTypePCM,
-    M4AD_kTypeBBMusicEngine,
-    M4AD_kTypeWMA,
-    M4AD_kTypeRMA,
-    M4AD_kTypeADPCM,
-    M4AD_kType_NB  /* number of decoders, keep it as last enum entry */
-
-} M4AD_Type ;
-
-
-
-/**
- ************************************************************************
- * structure    M4AD_Buffer
- * @brief        Structure to describe a buffer
- ************************************************************************
-*/
-typedef struct
-{
-    M4OSA_MemAddr8    m_dataAddress;
-    M4OSA_UInt32    m_bufferSize;
-    int64_t         m_timeStampUs;
-} M4AD_Buffer;
-
-/**
- ************************************************************************
- * @brief    Creates an instance of the decoder
- * @note    Allocates the context
- *
- * @param    pContext:        (OUT)    Context of the decoder
- * @param    pStreamHandler:    (IN)    Pointer to an audio stream description
- * @param    pUserData:        (IN)    Pointer to User data
- *
- * @return    M4NO_ERROR                 there is no error
- * @return  M4ERR_STATE             State automaton is not applied
- * @return    M4ERR_ALLOC                a memory allocation has failed
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set (in DEBUG only)
- ************************************************************************
-*/
-
-typedef M4OSA_ERR  (M4AD_create_fct)(M4AD_Context *pContext,
-                                     M4_AudioStreamHandler *pStreamHandler, void* pUserData);
-
-
-/**
- ************************************************************************
- * @brief    Destroys the instance of the decoder
- * @note    After this call the context is invalid
- *
- * @param    context:    (IN)    Context of the decoder
- *
- * @return    M4NO_ERROR             There is no error
- * @return  M4ERR_PARAMETER     The context is invalid (in DEBUG only)
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4AD_destroy_fct)    (M4AD_Context context);
-
-/**
- ************************************************************************
- * @brief   Decodes the given audio data
- * @note    Parses and decodes the next audio frame, from the given buffer.
- *            This function changes pInputBufferSize value according to the amount
- *            of data actually read.
- *
- * @param    context:            (IN)    Context of the decoder
- * @param    inputBuffer:        (IN/OUT)Input Data buffer. It contains at least one audio frame.
- *                                       The size of the buffer must be updated inside the
- *                                       function to reflect the size of the actually decoded data.
- *                                       (e.g. the first frame in pInputBuffer)
- * @param   decodedPCMBuffer:    (OUT)   Output PCM buffer (decoded data).
- * @param   jumping:            (IN)    M4OSA_TRUE if a jump was just done, M4OSA_FALSE otherwise.
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4AD_step_fct)    (M4AD_Context context, M4AD_Buffer *pInputBuffer,
-                                     M4AD_Buffer *pDecodedPCMBuffer, M4OSA_Bool jumping);
-
-/**
- ************************************************************************
- * @brief    Gets the decoder version
- * @note    The version is given in a M4_VersionInfo structure
- *
- * @param    pValue:        (OUT)        Pointer to the version structure
- *
- * @return    M4NO_ERROR                 there is no error
- * @return  M4ERR_PARAMETER         The given pointer is null (in DEBUG only)
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4AD_getVersion_fct)(M4_VersionInfo* pVersionInfo);
-
-
-/**
- ************************************************************************
- * @brief    This function creates the AAC core decoder according to
- *            the stream properties and to the options that may
- *            have been set using M4AD_setOption_fct
- * @note    Creates an instance of the AAC decoder
- * @note    This function is used especially by the AAC decoder
- *
- * @param    pContext:        (IN/OUT)    Context of the decoder
- * @param    pStreamHandler:    (IN)    Pointer to an audio stream description
- *
- * @return    M4NO_ERROR                 there is no error
- * @return  M4ERR_STATE             State automaton is not applied
- * @return    M4ERR_ALLOC                a memory allocation has failed
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set (in DEBUG only)
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4AD_start_fct)    (M4AD_Context pContext);
-
-/**
- ************************************************************************
- * @brief    Reset the instance of the decoder
- *
- * @param    context:    (IN)    Context of the decoder
- *
- * @return    M4NO_ERROR             There is no error
- * @return  M4ERR_PARAMETER     The context is invalid (in DEBUG only)
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4AD_reset_fct)    (M4AD_Context context);
-
-
-/**
- ************************************************************************
- * @brief   set en option value of the audio decoder
- *
- * @param    context:        (IN)    Context of the decoder
- * @param    optionId:        (IN)    indicates the option to set
- * @param    pValue:            (IN)    pointer to structure or value (allocated by user)
- *                                  where option is stored
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4AD_setOption_fct) (M4AD_Context context,
-                                         M4OSA_OptionID optionId, M4OSA_DataOption pValue);
-
-/**
- ************************************************************************
- * @brief   Get en option value of the audio decoder
- *
- * @param    context:        (IN)    Context of the decoder
- * @param    optionId:        (IN)    indicates the option to set
- * @param    pValue:            (OUT)    pointer to structure or value (allocated by user)
- *                                  where option is stored
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4AD_getOption_fct) (M4AD_Context context, M4OSA_OptionID optionId,
-                                         M4OSA_DataOption pValue);
-/**
- ************************************************************************
- * structure    M4AD_Interface
- * @brief        This structure defines the generic audio decoder interface
- * @note        This structure stores the pointers to functions of one audio decoder type.
- *                The decoder type is one of the M4AD_Type
- ************************************************************************
-*/
-typedef struct _M4AD_Interface
-{
-
-    M4AD_create_fct*        m_pFctCreateAudioDec;
-    M4AD_start_fct*            m_pFctStartAudioDec;
-    M4AD_step_fct*            m_pFctStepAudioDec;
-    M4AD_getVersion_fct*    m_pFctGetVersionAudioDec;
-    M4AD_destroy_fct*        m_pFctDestroyAudioDec;
-    M4AD_reset_fct*            m_pFctResetAudioDec;
-    M4AD_setOption_fct*        m_pFctSetOptionAudioDec;
-    M4AD_getOption_fct*        m_pFctGetOptionAudioDec;
-
-} M4AD_Interface;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*__M4AD_COMMON_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4AD_Null.h b/libvideoeditor/vss/common/inc/M4AD_Null.h
deleted file mode 100755
index 78140cd..0000000
--- a/libvideoeditor/vss/common/inc/M4AD_Null.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
-*************************************************************************
- * @file    M4AD_Null.h
- * @brief    Implementation of the decoder public interface that do nothing
- * @note    This file defines the getInterface function.
-*************************************************************************
-*/
-#ifndef __M4AD_NULL_H__
-#define __M4AD_NULL_H__
-
-#include "M4AD_Common.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/**
- ************************************************************************
- * @brief Retrieves the interface implemented by the decoder
- * @param pDecoderType        : pointer on an M4AD_Type (allocated by the caller)
- *                              that will be filled with the decoder type supported by this decoder
- * @param pDecoderInterface   : address of a pointer that will be set to the interface implemented
- *                              by this decoder. The interface is a structure allocated by the
- *                              function and must be un-allocated by the caller.
- *
- * @return : M4NO_ERROR  if OK
- *           M4ERR_ALLOC if allocation failed
- ************************************************************************
-*/
-M4OSA_ERR M4AD_NULL_getInterface( M4AD_Type *pDecoderType, M4AD_Interface **pDecoderInterface);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /*__M4AD_NULL_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4AIR_API.h b/libvideoeditor/vss/common/inc/M4AIR_API.h
deleted file mode 100755
index 7541362..0000000
--- a/libvideoeditor/vss/common/inc/M4AIR_API.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
- * @file   M4AIR_API.h
- * @brief  Area of Interest Resizer  API
- * @note
-*************************************************************************
-*/
-#ifndef M4AIR_API_H
-#define M4AIR_API_H
-
-/******************************* INCLUDES *******************************/
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_CoreID.h"
-#include "M4OSA_Mutex.h"
-#include "M4OSA_Memory.h"
-#include "M4VIFI_FiltersAPI.h"
-#include "M4Common_types.h"
-
-/************************ M4AIR TYPES DEFINITIONS ***********************/
-
-/**
- ******************************************************************************
- * enum        M4AIR_InputFormatType
- * @brief     The following enumeration lists the different accepted format for the AIR.
- * To be available, the associated compilation flag must be defined, else,
- * the AIR will return an error (compilation flag : M4AIR_XXXXXX_FORMAT_SUPPORTED).
- ******************************************************************************
-*/
-typedef enum
-{
-    M4AIR_kYUV420P,
-    M4AIR_kYUV420AP,
-    M4AIR_kJPG
-}M4AIR_InputFormatType ;
-
-
-/**
- ******************************************************************************
- * struct         M4AIR_Coordinates
- * @brief     The following structure is used to retrieve X and Y coordinates in a given picture.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_UInt32    m_x;    /**< X coordinate */
-    M4OSA_UInt32    m_y;    /**< Y coordinate */
-}M4AIR_Coordinates;
-
-
-/**
- ******************************************************************************
- * struct         M4AIR_Size
- * @brief     The following structure is used to retrieve the dimension of a given picture area.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_UInt32    m_width;    /**< Width */
-    M4OSA_UInt32    m_height;    /**< Height */
-}M4AIR_Size;
-
-
-/**
- ******************************************************************************
- * struct         M4AIR_Params
- * @brief     The following structure is used to retrieve the parameters needed to get a resized ROI (Region of interest).
- ******************************************************************************
-*/
-typedef struct
-{
-    M4AIR_Coordinates        m_inputCoord;            /**< X and Y positionning in the input of the first interesting pixel (top-left) */
-    M4AIR_Size                m_inputSize;            /**< Size of the interesting area inside input (width and height)*/
-    M4AIR_Size                m_outputSize;            /**< Size of the output */
-    M4OSA_Bool                m_bOutputStripe;            /**< Flag to know if we will have to provide output per stripe or not */
-    M4COMMON_Orientation        m_outputOrientation;    /**< Desired orientation of the AIR output */
-}M4AIR_Params;
-
-
-
-
-/*********************** M4AIR ERRORS DEFINITIONS **********************/
-
-/* This error means that the requested video format is not supported. */
-#define M4ERR_AIR_FORMAT_NOT_SUPPORTED    M4OSA_ERR_CREATE(M4_ERR,M4AIR,0x000001)
-
-/* This error means that the input or output size is incorrect */
-#define M4ERR_AIR_ILLEGAL_FRAME_SIZE    M4OSA_ERR_CREATE(M4_ERR,M4AIR,0x000002)
-
-
-
-/********************** M4AIR PUBLIC API DEFINITIONS ********************/
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat);
- * @brief        This function initialize an instance of the AIR.
- * @param    pContext:    (IN/OUT) Address of the context to create
- * @param    inputFormat:    (IN) input format type.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only). Invalid formatType
- * @return    M4ERR_ALLOC: No more memory is available
- ******************************************************************************
-*/
-M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat);
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
- * @brief        This function destroys an instance of the AIR component
- * @param    pContext:    (IN) Context identifying the instance to destroy
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
- * @return    M4ERR_STATE: Internal state is incompatible with this function call.
- ******************************************************************************
-*/
-M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext);
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
- * @brief    This function will configure the AIR.
- * @note    It will set the input and output coordinates and sizes,
- *            and indicates if we will proceed in stripe or not.
- *            In case a M4AIR_get in stripe mode was on going, it will cancel this previous
- *            processing and reset the get process.
- * @param    pContext:                (IN) Context identifying the instance
- * @param    pParams->m_bOutputStripe:(IN) Stripe mode.
- * @param    pParams->m_inputCoord:    (IN) X,Y coordinates of the first valid pixel in input.
- * @param    pParams->m_inputSize:    (IN) input ROI size.
- * @param    pParams->m_outputSize:    (IN) output size.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_ALLOC: No more memory space to add a new effect.
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
- * @return    M4ERR_AIR_FORMAT_NOT_SUPPORTED: the requested input format is not supported.
- ******************************************************************************
-*/
-M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams);
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
- * @brief    This function will provide the requested resized area of interest according to
- *            settings provided in M4AIR_configure.
- * @note    In case the input format type is JPEG, input plane(s)
- *            in pIn is not used. In normal mode, dimension specified in output plane(s) structure
- *            must be the same than the one specified in M4AIR_configure. In stripe mode, only
- *            the width will be the same, height will be taken as the stripe height (typically 16).
- *            In normal mode, this function is call once to get the full output picture. In stripe
- *            mode, it is called for each stripe till the whole picture has been retrieved,and
- *            the position of the output stripe in the output picture is internally incremented
- *            at each step.
- *            Any call to M4AIR_configure during stripe process will reset this one to the
- *              beginning of the output picture.
- * @param    pContext:    (IN) Context identifying the instance
- * @param    pIn:            (IN) Plane structure containing input Plane(s).
- * @param    pOut:        (IN/OUT)  Plane structure containing output Plane(s).
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_ALLOC: No more memory space to add a new effect.
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
- ******************************************************************************
-*/
-M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut);
-
-
-
-#endif /* M4AIR_API_H */
diff --git a/libvideoeditor/vss/common/inc/M4AMRR_CoreReader.h b/libvideoeditor/vss/common/inc/M4AMRR_CoreReader.h
deleted file mode 100755
index 9d710f3..0000000
--- a/libvideoeditor/vss/common/inc/M4AMRR_CoreReader.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file        M4AMRR_CoreReader.h
- * @brief        Implementation of AMR parser
- * @note        This file contains the API def. for AMR Parser.
- ******************************************************************************
-*/
-#ifndef __M4AMR_COREREADER_H__
-#define __M4AMR_COREREADER_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "M4OSA_Types.h"
-#include "M4OSA_FileReader.h"
-#include "M4SYS_Stream.h"
-#include "M4SYS_AccessUnit.h"
-#include "M4OSA_Time.h"
-#include "M4TOOL_VersionInfo.h"
-
-/**
- ******************************************************************************
- * AMR reader Errors & Warnings definition
- ******************************************************************************
-*/
-#define M4ERR_AMR_INVALID_FRAME_TYPE    M4OSA_ERR_CREATE(M4_ERR,M4AMR_READER, 0x000001)
-#define M4ERR_AMR_NOT_COMPLIANT    M4OSA_ERR_CREATE(M4_ERR,M4AMR_READER, 0x000002)
-
-/**
- ******************************************************************************
- * enumeration    M4AMRR_State
- * @brief        This enum defines the AMR reader states
- * @note        These states are used internaly, but can be retrieved from outside the reader.
- ******************************************************************************
-*/
-typedef enum{
-    M4AMRR_kOpening    = 0x0100,
-    M4AMRR_kOpened    = 0x0101,
-    M4AMRR_kReading = 0x0200,
-    M4AMRR_kReading_nextAU = 0x0201,
-    M4AMRR_kClosed = 0x300
-}M4AMRR_State;
-
-/**
-*******************************************************************************
-* M4OSA_ERR M4AMRR_openRead (M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
-*                               M4OSA_FileReaderPointer* pFileFunction);
-* @brief    M4AMRR_OpenRead parses the meta data of the AMR and allocates data structure
-* @note        This function opens the file and creates a context for AMR  Parser.
-*            - sets context to null if error occured.
-* @param    pContext(OUT)        : AMR Reader context allocated in the function
-* @param    pFileDesscriptor(IN): File descriptor of the input file
-* @param    pFileFunction(IN)    : pointer to file function for file access
-*
-* @returns    M4NO_ERROR        : There is no error
-* @returns    M4ERR_PARAMETER    : pContext and/or pFileDescriptor is NULL
-* @returns    M4ERR_ALLOC        : Memory allocation failed
-* @returns    M4ERR_FILE_NOT_FOUND : file cannot be found
-* @returns    M4AMRR_ERR_AMR_NOT_COMPLIANT : Tthe input is not a AMR file
-* @returns    M4OSA_FILE        : See OSAL file Spec. for details.
-*******************************************************************************
-*/
-M4OSA_ERR M4AMRR_openRead (M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
-                            M4OSA_FileReadPointer* pFileFunction);
-
-/**
-******************************************************************************
-* M4OSA_ERR M4AMRR_getNextStream(M4OSA_Context Context, M4SYS_StreamDescription* pStreamDesc );
-* @brief    Reads the next available stream in the file
-* @note        Get the stream description of the stream.
-*            - This function assumes that there is only one stream in AMR file.
-* @param    Context(IN/OUT)    : AMR Reader context
-* @param    pStreamDesc(OUT): Description of the next read stream
-*
-* @returns     M4NO_ERROR        : There is no error
-* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
-* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
-* @returns     M4ERR_ALLOC        : Memory allocation failed
-* @returns     M4ERR_STATE        : this function cannot be called in this state.
-* @returns     M4AMRR_WAR_NO_MORE_STREAM : There are no more streams in the file.
-******************************************************************************
-*/
-
-M4OSA_ERR M4AMRR_getNextStream(M4OSA_Context Context, M4SYS_StreamDescription* pStreamDesc );
-
-/**
-******************************************************************************
-* M4OSA_ERR M4AMRR_startReading(M4OSA_Context Context, M4SYS_StreamID* pStreamIDs );
-* @brief    Prepares the AMR reading of the specified stream Ids
-* @note        This function changes the state of the reader reading.
-* @param    Context(IN/OUT)    : AMR Reader context
-* @param    pStreamIDs(IN)    : Array of stream Ids to be prepared.
-*
-* @returns     M4NO_ERROR        : There is no error
-* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
-* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
-* @returns     M4ERR_ALLOC        : Memory allocation failed
-* @returns     M4ERR_STATE        : this function cannot be called in this state.
-* @returns     M4ERR_BAD_STREAM_ID    : Atleast one of the stream Id. does not exist.
-******************************************************************************
-*/
-M4OSA_ERR M4AMRR_startReading(M4OSA_Context Context, M4SYS_StreamID* pStreamIDs );
-
-/**
-******************************************************************************
-* M4OSA_ERR M4AMRR_nextAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
-* @brief    Reads the access unit into the providing stream
-* @note        This function allocates the memory to dataAddress filed and copied the data.
-*            -The Application should not free the dataAddress pointer.
-* @param    Context(IN/OUT)    : AMR Reader context
-* @param    StreamID(IN)    : Selects the stream
-* @param    pAu(IN/OUT)        : Access Unit
-*
-* @returns    M4NO_ERROR        : There is no error
-* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
-* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
-* @returns     M4ERR_ALLOC        : Memory allocation failed
-* @returns     M4ERR_STATE        : this function cannot be called in this state.
-* @returns     M4ERR_BAD_STREAM_ID    : Atleast one of the stream Id. does not exist.
-* @returns     M4WAR_NO_DATA_YET    : there    is no enough data on the stream for new access unit
-* @returns     M4WAR_END_OF_STREAM    : There are no more access unit in the stream
-* @returns     M4AMRR_ERR_INVALID_FRAME_TYPE : current frame has no valid frame type.
-******************************************************************************
-*/
-M4OSA_ERR M4AMRR_nextAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
-
-/**
-******************************************************************************
-* M4OSA_ERR M4AMRR_freeAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
-* @brief    Notify the ARM Reader that application will no longer use "AU"
-* @note        This function frees the memory pointed by pAu->dataAddress pointer
-*            -Changes the state of the reader back to reading.
-* @param    Context(IN/OUT)    : AMR Reader context
-* @param    StreamID(IN)    : Selects the stream
-* @param    pAu(IN)            : Access Unit
-*
-* @returns     M4NO_ERROR        : There is no error
-* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
-* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
-* @returns     M4ERR_ALLOC        : Memory allocation failed
-* @returns     M4ERR_STATE        : this function cannot be called in this state.
-* @returns     M4ERR_BAD_STREAM_ID    : Atleast one of the stream Id. does not exist.
-******************************************************************************
-*/
-M4OSA_ERR M4AMRR_freeAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
-
-/**
-******************************************************************************
-* M4OSA_ERR M4AMRR_seek(M4OSA_Context Context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
-*                        M4SYS_seekAccessMode    seekMode, M4OSA_Time* pObtainCTS);
-* @brief    The function seeks the targeted time in the give stream by streamId.
-* @note        Each frame is of 20 ms duration,, builds the seek table and points
-*            the file pointer to starting for the required AU.
-* @param    Context(IN/OUT)    : AMR Reader context
-* @param    StreamID(IN)    : Array of stream IDs.
-* @param    time(IN)        : targeted time
-* @param    seekMode(IN)    : Selects the seek mode
-* @param    pObtainCTS(OUT)    : Returned time nearest to target.
-*
-* @returns     M4NO_ERROR        : There is no error
-* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
-* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
-* @returns     M4ERR_ALLOC        : Memory allocation failed
-* @returns     M4ERR_STATE        : this function cannot be called in this state.
-* @returns     M4ERR_BAD_STREAM_ID    : Atleast one of the stream Id. does not exist.
-* @returns     M4WAR_INVALID_TIME    : time cannot be reached.
-******************************************************************************
-*/
-M4OSA_ERR M4AMRR_seek(M4OSA_Context Context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
-                         M4SYS_SeekAccessMode    seekMode, M4OSA_Time* pObtainCTS);
-
-/**
-******************************************************************************
-* M4OSA_ERR M4AMRR_closeRead(M4OSA_Context Context);
-* @brief    AMR reader closes the file
-* @param    Context(IN?OUT)    : AMR Reader context
-* @returns     M4NO_ERROR        : There is no error
-* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
-* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
-* @returns     M4ERR_ALLOC        : Memory allocation failed
-* @returns     M4ERR_STATE        : this function cannot be called in this state.
-******************************************************************************
-*/
-M4OSA_ERR M4AMRR_closeRead(M4OSA_Context Context);
-
-/**
-******************************************************************************
-* M4OSA_ERR M4AMRR_getState(M4OSA_Context Context, M4AMRR_State* pState, M4SYS_StreamID streamId);
-* @brief    Gets the current state of the AMR reader
-* @param    Context(IN/OUT)    : AMR Reader context
-* @param    pState(OUT)        : Core AMR reader state
-* @param    streamId(IN)    : Selects the stream 0 for all
-*
-* @returns     M4NO_ERROR            :    There is no error
-* @returns     M4ERR_PARAMETER        :    atleast one parament is NULL
-* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
-* @returns     M4ERR_BAD_STREAM_ID    :    Atleast one of the stream Id. does not exist.
-******************************************************************************
-*/
-M4OSA_ERR M4AMRR_getState(M4OSA_Context Context, M4AMRR_State* pState, M4SYS_StreamID streamId);
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AMRR_getVersion    (M4_VersionInfo *pVersion)
- * @brief    Gets the current version of the AMR reader
- * @param    version(OUT)    : the structure that stores the version numbers
- *
- * @returns     M4NO_ERROR            :    There is no error
- * @returns     M4ERR_PARAMETER        :    version is NULL
- ******************************************************************************
-*/
-M4OSA_ERR M4AMRR_getVersion    (M4_VersionInfo *pVersion);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AMRR_getmaxAUsize    (M4OSA_Context Context, M4OSA_UInt32 *pMaxAuSize)
- * @brief    Computes the maximum access unit size of a stream
- *
- * @param    Context        (IN)  Context of the reader
- * @param    pMaxAuSize    (OUT) Maximum Access Unit size in the stream
- *
- * @return    M4NO_ERROR: No error
- * @return    M4ERR_PARAMETER: One of the input pointer is M4OSA_NULL (Debug only)
- ******************************************************************************
-*/
-M4OSA_ERR M4AMRR_getmaxAUsize(M4OSA_Context Context, M4OSA_UInt32 *pMaxAuSize);
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus*/
-#endif /*__M4AMR_COREREADER_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4Common_types.h b/libvideoeditor/vss/common/inc/M4Common_types.h
deleted file mode 100755
index 9e6a0fb..0000000
--- a/libvideoeditor/vss/common/inc/M4Common_types.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file   M4Common_Types.h
- * @brief  defines common structures
- * @note
- *
- ************************************************************************
-*/
-#ifndef M4COMMON_TYPES_H
-#define M4COMMON_TYPES_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Memory.h"
-
-/**
- ************************************************************************
- * structure M4COMMON_MetadataType
- ************************************************************************
-*/
-typedef enum
-{
-    M4COMMON_kUnknownMetaDataType,
-    /* Local files */
-    M4COMMON_kTagID3v1,                /**<  Metadata from TAG ID3 V1 */
-    M4COMMON_kTagID3v2,                /**<  Metadata from TAG ID3 V2 */
-    M4COMMON_kASFContentDesc,        /**<  Metadata from ASF content description  */
-
-    M4COMMON_k3GppAssetMovieBox,    /**<  Metadata from a 3gpp file (movie box) */
-    M4COMMON_k3GppAssetTrackBox,    /**<  Metadata from a 3gpp file (track box) */
-
-    /* Streaming */
-    M4COMMON_kMetaDataSdpSession,    /**<  Metadata from an SDP file (Session level) */
-    M4COMMON_kMetaDataSdpAudio,        /**<  Metadata from an SDP file (media audio level) */
-    M4COMMON_kMetaDataSdpVideo,        /**<  Metadata from an SDP file (media video level) */
-
-    M4COMMON_kJpegExif                /**< EXIF in JPEG */
-} M4COMMON_MetadataType;
-
-/**
- ************************************************************************
- * enumeration    M4VPS_EncodingFormat
- * @brief        Text encoding format
- ************************************************************************
-*/
-typedef enum
-{
-    M4COMMON_kEncFormatUnknown    = 0,      /**< Unknown format                                 */
-    M4COMMON_kEncFormatASCII    = 1,        /**< ISO-8859-1. Terminated with $00                */
-    M4COMMON_kEncFormatUTF8        = 2,     /**< UTF-8 encoded Unicode . Terminated with $00    */
-    M4COMMON_kEncFormatUTF16    = 3         /**< UTF-16 encoded Unicode. Terminated with $00 00 */
-}  M4COMMON_EncodingFormat;
-
-/**
- ************************************************************************
- * structure    M4VPS_String
- * @brief        This structure defines string attribute
- ************************************************************************
-*/
-typedef struct
-{
-    M4OSA_Void*            m_pString;                /**< Pointer to text        */
-    M4OSA_UInt32        m_uiSize;                /**< Text size in bytes        */
-    M4COMMON_EncodingFormat    m_EncodingFormat;    /**< Text encoding format    */
-
-} M4COMMON_String;
-
-/**
- ************************************************************************
- * structure    M4COMMON_Buffer
- * @brief        This structure defines generic buffer attribute
- ************************************************************************
-*/
-typedef struct
-{
-    M4OSA_MemAddr8         m_pBuffer;        /**< Pointer to buffer        */
-    M4OSA_UInt32        m_size;            /**< size of buffer in bytes    */
-} M4COMMON_Buffer;
-
-typedef enum
-{
-    M4COMMON_kMimeType_NONE,
-    M4COMMON_kMimeType_JPG,
-    M4COMMON_kMimeType_PNG,
-    M4COMMON_kMimeType_BMP,   /* bitmap, with header */
-    M4COMMON_kMimeType_RGB24, /* raw RGB 24 bits */
-    M4COMMON_kMimeType_RGB565, /* raw, RGB 16 bits */
-    M4COMMON_kMimeType_YUV420,
-    M4COMMON_kMimeType_MPEG4_IFrame /* RC: to support PV art */
-
-} M4COMMON_MimeType;
-
-/* picture type definition from id3v2 tag*/
-typedef enum
-{
-    M4COMMON_kPicType_Other                = 0x00,
-    M4COMMON_kPicType_32_32_Icon            = 0x01,
-    M4COMMON_kPicType_Other_Icon            = 0x02,
-    M4COMMON_kPicType_FrontCover            = 0x03,
-    M4COMMON_kPicType_BackCover            = 0x04,
-    M4COMMON_kPicType_LeafletPage            = 0x05,
-    M4COMMON_kPicType_Media                = 0x06,
-    M4COMMON_kPicType_LeadArtist            = 0x07,
-    M4COMMON_kPicType_Artist                = 0x08,
-    M4COMMON_kPicType_Conductor            = 0x09,
-    M4COMMON_kPicType_Orchestra            = 0x0A,
-    M4COMMON_kPicType_Composer            = 0x0B,
-    M4COMMON_kPicType_Lyricist            = 0x0C,
-    M4COMMON_kPicType_RecordingLocation    = 0x0D,
-    M4COMMON_kPicType_DuringRecording        = 0x0E,
-    M4COMMON_kPicType_DuringPerformance    = 0x0F,
-    M4COMMON_kPicType_MovieScreenCapture    = 0x10,
-    M4COMMON_kPicType_BrightColouredFish    = 0x11,
-    M4COMMON_kPicType_Illustration        = 0x12,
-    M4COMMON_kPicType_ArtistLogo            = 0x13,
-    M4COMMON_kPicType_StudioLogo            = 0x14
-} M4COMMON_PictureType;
-
-/**
- ******************************************************************************
- * enum        M4COMMON_Orientation
- * @brief        This enum defines the possible orientation of a frame as described
- *            in the EXIF standard.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4COMMON_kOrientationUnknown = 0,
-    M4COMMON_kOrientationTopLeft,
-    M4COMMON_kOrientationTopRight,
-    M4COMMON_kOrientationBottomRight,
-    M4COMMON_kOrientationBottomLeft,
-    M4COMMON_kOrientationLeftTop,
-    M4COMMON_kOrientationRightTop,
-    M4COMMON_kOrientationRightBottom,
-    M4COMMON_kOrientationLeftBottom
-}M4COMMON_Orientation ;
-
-/**
- ******************************************************************************
- * structure    M4EXIFC_Location
- * @brief        The Image GPS location (example : 48°52.21' )
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_Float    degrees;
-    M4OSA_Float    minsec;
-} M4COMMON_Location;
-
-/**
- ************************************************************************
- * structure    M4COMMON_MetaDataAlbumArt
- * @brief        This structure defines fields of a album art
- ************************************************************************
-*/
-typedef struct
-{
-    M4COMMON_MimeType    m_mimeType;
-    M4OSA_UInt32        m_uiSize;
-    M4OSA_Void*            m_pData;
-
-    M4COMMON_String        m_pDescription;
-
-} M4COMMON_MetaDataAlbumArt;
-
-/**
- ************************************************************************
- * structure    M4COMMON_MetaDataFields
- * @brief        This structure defines fields of metadata information
- ************************************************************************
-*/
-typedef struct
-{
-    M4COMMON_MetadataType    m_MetadataType;
-
-    /* Meta data fields */
-    M4COMMON_String    m_pTitle;            /**< Title for the media  */
-    M4COMMON_String    m_pArtist;            /**< Performer or artist */
-    M4COMMON_String    m_pAlbum;            /**< Album title for the media */
-    M4COMMON_String    m_pAuthor;            /**< Author of the media */
-    M4COMMON_String    m_pGenre;            /**< Genre (category and style) of the media */
-    M4COMMON_String    m_pDescription;        /**< Caption or description for the media */
-    M4COMMON_String    m_pCopyRights;        /**< Notice about organization holding copyright
-                                                     for the media file */
-    M4COMMON_String    m_pRecordingYear;    /**< Recording year for the media */
-    M4COMMON_String    m_pRating;            /**< Media rating */
-
-    M4COMMON_String    m_pClassification;    /**< Classification of the media */
-    M4COMMON_String    m_pKeyWords;        /**< Media keywords */
-    M4COMMON_String    m_pLocation;        /**< Location information */
-    M4COMMON_String    m_pUrl;                /**< Reference of the resource */
-
-    M4OSA_UInt8        m_uiTrackNumber;    /**< Track number for the media*/
-    M4OSA_UInt32    m_uiDuration;        /**< The track duration in milliseconds */
-
-    M4COMMON_MetaDataAlbumArt    m_albumArt;    /**< AlbumArt description */
-    M4COMMON_String                m_pMood;    /**< Mood of the media */
-
-    /**< Modifs ACO 4/12/07 : add Exif specific infos */
-    M4COMMON_String    m_pCreationDateTime;    /**< date and time original image was generated */
-    M4COMMON_String    m_pLastChangeDateTime;    /**< file change date and time */
-    M4COMMON_String    m_pManufacturer;        /**< manufacturer of image input equipment */
-    M4COMMON_String    m_pModel;                /**< model of image input equipment */
-    M4COMMON_String    m_pSoftware;            /**< software used */
-    M4COMMON_Orientation m_Orientation;        /**< Orientation of the picture */
-
-    /**< Modifs FS 29/08/08 : additionnal Exif infos */
-    M4OSA_UInt32    m_width;            /**< image width in pixels */
-    M4OSA_UInt32    m_height;            /**< image height in pixels */
-    M4OSA_UInt32    m_thumbnailSize;    /**< size of the thumbnail */
-    M4COMMON_String    m_pLatitudeRef;        /**< Latitude reference */
-    M4COMMON_Location m_latitude;        /**< Latitude */
-    M4COMMON_String    m_pLongitudeRef;    /**< Longitude reference */
-    M4COMMON_Location m_longitude;        /**< Longitude  */
-
-} M4COMMON_MetaDataFields;
-
-
-#endif /*M4COMMON_TYPES_H*/
-
diff --git a/libvideoeditor/vss/common/inc/M4DA_Types.h b/libvideoeditor/vss/common/inc/M4DA_Types.h
deleted file mode 100755
index 58cab7e..0000000
--- a/libvideoeditor/vss/common/inc/M4DA_Types.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file    M4DA_Types.h
- * @brief    Data access type definition
- * @note    This file implements media specific types
- ************************************************************************
-*/
-
-#ifndef __M4DA_TYPES_H__
-#define __M4DA_TYPES_H__
-
-#include "NXPSW_CompilerSwitches.h"
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Memory.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif /*__cplusplus*/
-
-/**
- ************************************************************************
- * enumeration    M4_StreamType
- * @brief        Type used to describe a stream (audio or video data flow).
- ************************************************************************
-*/
-typedef enum
-{
-    M4DA_StreamTypeUnknown                = -1,    /**< Unknow type */
-    M4DA_StreamTypeVideoMpeg4            = 0,    /**< MPEG-4 video */
-    M4DA_StreamTypeVideoH263            = 1,    /**< H263 video */
-    M4DA_StreamTypeAudioAmrNarrowBand    = 2,    /**< Amr narrow band audio */
-    M4DA_StreamTypeAudioAmrWideBand        = 3,    /**< Amr wide band audio */
-    M4DA_StreamTypeAudioAac                = 4,    /**< AAC audio */
-    M4DA_StreamTypeAudioMp3                = 5,    /**< MP3 audio */
-    M4DA_StreamTypeVideoMJpeg            = 6,    /**< MJPEG video */
-    M4DA_StreamTypeAudioPcm                = 7,    /**< Wav audio */
-    M4DA_StreamTypeAudioMidi            = 8,    /**< Midi audio */
-    M4DA_StreamTypeVideoMpeg4Avc        = 9,    /**< MPEG-4 AVC video (h264) */
-    M4DA_StreamTypeAudioAacADTS            = 10,    /**< AAC ADTS audio */
-    M4DA_StreamTypeAudioAacADIF            = 11,    /**< AAC ADIF audio */
-    M4DA_StreamTypeAudioWma                = 12,    /**< WMA audio */
-    M4DA_StreamTypeVideoWmv                = 13,    /**< WMV video */
-    M4DA_StreamTypeAudioReal            = 14,   /**< REAL audio */
-    M4DA_StreamTypeVideoReal            = 15,   /**< REAL video */
-    M4DA_StreamTypeAudioEvrc            = 16,   /**< Evrc audio */
-    M4DA_StreamTypeTimedText            = 20,    /**< Timed Text */
-    M4DA_StreamTypeAudioBba                = 21,    /**< Beat Brew audio fomat */
-    M4DA_StreamTypeAudioSmaf            = 22,    /**< SMAF audio */
-    M4DA_StreamTypeAudioImelody            = 23,    /**< IMELODY audio*/
-    M4DA_StreamTypeAudioXmf                = 24,    /**< XMF audio */
-    M4DA_StreamTypeAudioBpc                = 25,    /**< BPC audio */
-
-    /* ADPCM */
-    M4DA_StreamTypeAudioADPcm            = 26,    /**< ADPCM */
-
-    M4DA_StreamTypeVideoARGB8888        = 27
-} M4_StreamType;
-
-/**
- ************************************************************************
- * structure    M4_StreamHandler
- * @brief        Base structure to describe a stream.
- ************************************************************************
-*/
-typedef struct
-{
-    M4_StreamType    m_streamType;                /**< Stream type */
-    M4OSA_UInt32    m_streamId;                    /**< Stream Id (unique number definning
-                                                        the stream) */
-    M4OSA_Int32        m_duration;                    /**< Duration of the stream in milli
-                                                            seconds */
-    M4OSA_UInt32    m_averageBitRate;            /**< Average bitrate in kb/s */
-    M4OSA_UInt32    m_maxAUSize;                /**< Maximum size of an Access Unit */
-    M4OSA_UInt8*    m_pDecoderSpecificInfo;        /**< Pointer on specific information required
-                                                        to create a decoder */
-    M4OSA_UInt32    m_decoderSpecificInfoSize;    /**< Size of the specific information
-                                                         pointer above */
-    void*            m_pUserData;                /**< Pointer on User Data
-                                                    (initialized by the user) */
-    M4OSA_UInt32    m_structSize;                /**< Size of the structure in bytes */
-    M4OSA_Bool      m_bStreamIsOK;              /**< Flag to know if stream has no errors after
-                                                        parsing is finished */
-    M4OSA_UInt8*    m_pH264DecoderSpecificInfo;        /**< Pointer on specific information
-                                                            required to create a decoder */
-    M4OSA_UInt32    m_H264decoderSpecificInfoSize;    /**< Size of the specific
-                                                            information pointer above */
-    // MPEG4 & AAC decoders require ESDS info
-    M4OSA_UInt8*    m_pESDSInfo;                /**< Pointer on MPEG4 or AAC ESDS box */
-    M4OSA_UInt32    m_ESDSInfoSize;             /**< Size of the MPEG4 or AAC ESDS box */
-} M4_StreamHandler;
-
-/**
- ************************************************************************
- * structure    M4_VideoStreamHandler
- * @brief        Extended structure to describe a video stream.
- ************************************************************************
-*/
-typedef struct
-{
-    M4_StreamHandler    m_basicProperties;        /**< Audio-Video stream common parameters */
-    M4OSA_UInt32        m_videoWidth;            /**< Width of the video in the stream */
-    M4OSA_UInt32        m_videoHeight;            /**< Height of the video in the stream */
-    M4OSA_Float            m_averageFrameRate;        /**< Average frame rate of the video
-                                                            in the stream */
-    M4OSA_Int32         videoRotationDegrees;        /**< Video rotation degree */
-    M4OSA_UInt32        m_structSize;            /**< Size of the structure in bytes */
-} M4_VideoStreamHandler;
-
-/**
- ************************************************************************
- * structure    M4_AudioStreamHandler
- * @brief        Extended structure to describe an audio stream.
- ************************************************************************
-*/
-typedef struct
-{
-    M4_StreamHandler    m_basicProperties;        /**< Audio-Video stream common parameters */
-    M4OSA_UInt32        m_nbChannels;            /**< Number of channels in the audio stream
-                                                        (1-mono, 2-stereo) */
-    M4OSA_UInt32        m_byteFrameLength;        /**< Size of frame samples in bytes */
-    M4OSA_UInt32        m_byteSampleSize;        /**< Number of bytes per sample */
-    M4OSA_UInt32        m_samplingFrequency;    /**< Sample frequency in kHz */
-    M4OSA_UInt32        m_structSize;            /**< Size of the structure in bytes */
-} M4_AudioStreamHandler;
-
-#ifdef M4VPS_SUPPORT_TTEXT
-
-/**
- ************************************************************************
- * structure    M4_TextStreamHandler
- * @brief        Extended structure to describe a text stream.
- ************************************************************************
-*/
-typedef struct
-{
-    M4_StreamHandler    m_basicProperties;    /**< Audio-Video stream common parameters */
-    M4OSA_UInt32        m_trackWidth;        /**< Width of the video in the stream */
-    M4OSA_UInt32        m_trackHeight;        /**< Height of the video in the stream */
-    M4OSA_UInt32        m_trackXpos;        /**< X position of the text track in video area */
-    M4OSA_UInt32        m_trackYpos;        /**< Y position of the text track in video area */
-    M4OSA_UInt8            back_col_rgba[4];    /**< the background color in RGBA */
-    M4OSA_UInt16        uiLenght;            /**< the string lenght in bytes */
-    M4OSA_UInt32        disp_flag;            /**< the way text will be displayed */
-    M4OSA_UInt8            horiz_justif;        /**< the horizontal justification of the text */
-    M4OSA_UInt8            verti_justif;        /**< the vertical justification of the text */
-    /* style */
-    M4OSA_UInt16        styl_start_char;    /**< the first character impacted by style */
-    M4OSA_UInt16        styl_end_char;        /**< the last character impacted by style */
-    M4OSA_UInt16        fontID;                /**< ID of the font */
-    M4OSA_UInt8            face_style;            /**< the text face-style: bold, italic,
-                                                         underlined, plain(default) */
-    M4OSA_UInt8            font_size;            /**< size in pixel of font */
-    M4OSA_UInt8            text_col_rgba[4];    /**< the text color in RGBA */
-    /* box */
-    M4OSA_UInt16        box_top;         /**< the top position of text box in the track area */
-    M4OSA_UInt16        box_left;        /**< the left position of text box in the track area */
-    M4OSA_UInt16        box_bottom;      /**< the bottom position of text box in the track area */
-    M4OSA_UInt16        box_right;       /**< the right position of text box in the track area */
-    M4OSA_UInt32        m_structSize;    /**< Size of the structure in bytes */
-} M4_TextStreamHandler;
-
-#endif /*M4VPS_SUPPORT_TTEXT*/
-
-/**
- ************************************************************************
- * structure    M4_AccessUnit
- * @brief        Structure to describe an access unit.
- ************************************************************************
-*/
-typedef struct
-{
-  M4OSA_UInt32            m_streamID;       /**< Id of the stream to get an AU from */
-  M4OSA_MemAddr8        m_dataAddress;      /**< Pointer to a memory area with the encoded data */
-  M4OSA_UInt32            m_size;           /**< Size of the dataAdress area */
-  M4OSA_Double            m_CTS;            /**< Composition Time Stamp for the Access Unit */
-  M4OSA_Double            m_DTS ;           /**< Decoded Time Stamp for the Access Unit */
-  M4OSA_UInt8            m_attribute;       /**< RAP information & AU corrupted */
-  M4OSA_UInt32            m_maxsize;        /**< Maximum size of the AU */
-  M4OSA_UInt32            m_structSize;     /**< Structure size */
-} M4_AccessUnit;
-
-#ifdef __cplusplus
-}
-#endif /*__cplusplus*/
-
-#endif /* __M4DA_TYPES_H__ */
-
diff --git a/libvideoeditor/vss/common/inc/M4DECODER_Common.h b/libvideoeditor/vss/common/inc/M4DECODER_Common.h
deleted file mode 100755
index 93e3062..0000000
--- a/libvideoeditor/vss/common/inc/M4DECODER_Common.h
+++ /dev/null
@@ -1,389 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file   M4DECODER_Common.h
- * @brief  Shell Decoder common interface declaration
- * @note   This file declares the common interfaces that decoder shells must implement
- *
- ************************************************************************
-*/
-#ifndef __M4DECODER_COMMON_H__
-#define __M4DECODER_COMMON_H__
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_OptionID.h"
-#include "M4OSA_CoreID.h"
-
-#include "M4READER_Common.h"
-#include "M4VIFI_FiltersAPI.h"
-
-#include "M4_Utils.h"
-
-/* ----- Errors and Warnings ----- */
-
-/**
- * Warning: there is no new decoded frame to render since the last rendering
- */
-#define M4WAR_VIDEORENDERER_NO_NEW_FRAME M4OSA_ERR_CREATE(M4_WAR, M4DECODER_COMMON, 0x0001)
-/**
- * Warning: the deblocking filter is not implemented
- */
-#define M4WAR_DEBLOCKING_FILTER_NOT_IMPLEMENTED M4OSA_ERR_CREATE(M4_WAR, M4DECODER_COMMON,\
-                                                                     0x000002)
-
-
-/* Error: Stream H263 profiles (other than  0) are not supported */
-#define M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED            M4OSA_ERR_CREATE(M4_ERR,\
-                                                                 M4DECODER_MPEG4, 0x0001)
-/* Error: Stream H263 not baseline not supported (Supported sizes are CIF, QCIF or SQCIF) */
-#define M4ERR_DECODER_H263_NOT_BASELINE                        M4OSA_ERR_CREATE(M4_ERR,\
-                                                                 M4DECODER_MPEG4, 0x0002)
-
-/**
- ************************************************************************
- * enum     M4DECODER_AVCProfileLevel
- * @brief    This enum defines the AVC decoder profile and level for the current instance
- * @note    This options can be read from decoder via M4DECODER_getOption_fct
- ************************************************************************
-*/
-typedef enum
-{
-    M4DECODER_AVC_kProfile_0_Level_1 = 0,
-    M4DECODER_AVC_kProfile_0_Level_1b,
-    M4DECODER_AVC_kProfile_0_Level_1_1,
-    M4DECODER_AVC_kProfile_0_Level_1_2,
-    M4DECODER_AVC_kProfile_0_Level_1_3,
-    M4DECODER_AVC_kProfile_0_Level_2,
-    M4DECODER_AVC_kProfile_0_Level_2_1,
-    M4DECODER_AVC_kProfile_0_Level_2_2,
-    M4DECODER_AVC_kProfile_0_Level_3,
-    M4DECODER_AVC_kProfile_0_Level_3_1,
-    M4DECODER_AVC_kProfile_0_Level_3_2,
-    M4DECODER_AVC_kProfile_0_Level_4,
-    M4DECODER_AVC_kProfile_0_Level_4_1,
-    M4DECODER_AVC_kProfile_0_Level_4_2,
-    M4DECODER_AVC_kProfile_0_Level_5,
-    M4DECODER_AVC_kProfile_0_Level_5_1,
-    M4DECODER_AVC_kProfile_and_Level_Out_Of_Range = 255
-} M4DECODER_AVCProfileLevel;
-
-/**
- ************************************************************************
- * enum     M4DECODER_OptionID
- * @brief    This enum defines the decoder options
- * @note    These options can be read from or written to a decoder via M4DECODER_getOption_fct
- ************************************************************************
-*/
-typedef enum
-{
-    /**
-    Get the version of the core decoder
-    */
-    M4DECODER_kOptionID_Version        = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x01),
-    /**
-    Get the size of the currently decoded video
-    */
-    M4DECODER_kOptionID_VideoSize    = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x02),
-    /**
-    Set the conversion filter to use at rendering
-    */
-    M4DECODER_kOptionID_OutputFilter = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x03),
-    /**
-    Activate the Deblocking filter
-    */
-    M4DECODER_kOptionID_DeblockingFilter = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x04),
-    /**
-    Get nex rendered frame CTS
-    */
-    M4DECODER_kOptionID_NextRenderedFrameCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON,\
-                                                                         0x05),
-
-    /**
-    Set the YUV data to the dummy video decoder
-    */
-    M4DECODER_kOptionID_DecYuvData =
-        M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x06),
-    /**
-    Set the YUV data with color effect applied to the dummy video decoder
-    */
-    M4DECODER_kOptionID_YuvWithEffectNonContiguous =
-        M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x07),
-
-    M4DECODER_kOptionID_YuvWithEffectContiguous =
-        M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x08),
-
-    M4DECODER_kOptionID_EnableYuvWithEffect =
-        M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x09),
-
-    /**
-     * Get the supported video decoders and capabilities */
-    M4DECODER_kOptionID_VideoDecodersAndCapabilities =
-        M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x10),
-
-    /* common to MPEG4 decoders */
-    /**
-     * Get the DecoderConfigInfo */
-    M4DECODER_MPEG4_kOptionID_DecoderConfigInfo = M4OSA_OPTION_ID_CREATE(M4_READ,\
-                                                         M4DECODER_MPEG4, 0x01),
-
-    /* last decoded cts */
-    M4DECODER_kOptionID_AVCLastDecodedFrameCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AVC,\
-                                                                             0x01)
-/* Last decoded cts */
-
-} M4DECODER_OptionID;
-
-
-/**
- ************************************************************************
- * struct    M4DECODER_MPEG4_DecoderConfigInfo
- * @brief    Contains info read from the MPEG-4 VideoObjectLayer.
- ************************************************************************
-*/
-typedef struct
-{
-    M4OSA_UInt8        uiProfile;                /**< profile and level as defined in the Visual
-                                                         Object Sequence header, if present */
-    M4OSA_UInt32    uiTimeScale;            /**< time scale as parsed in VOL header */
-    M4OSA_UInt8        uiUseOfResynchMarker;    /**< Usage of resynchronization marker */
-    M4OSA_Bool        bDataPartition;            /**< If 1 data partitioning is used. */
-    M4OSA_Bool        bUseOfRVLC;                /**< Usage of RVLC for the stream */
-
-} M4DECODER_MPEG4_DecoderConfigInfo;
-
-
-/**
- ***********************************************************************
- * structure    M4DECODER_VideoSize
- * @brief        This structure defines the video size (width and height)
- * @note        This structure is used to retrieve via the M4DECODER_getOption_fct
- *                function the size of the current decoded video
- ************************************************************************
-*/
-typedef struct _M4DECODER_VideoSize
-{
-    M4OSA_UInt32   m_uiWidth;    /**< video width  in pixels */
-    M4OSA_UInt32   m_uiHeight;    /**< video height in pixels */
-
-} M4DECODER_VideoSize;
-
-/**
- ************************************************************************
- * structure    M4DECODER_OutputFilter
- * @brief        This structure defines the conversion filter
- * @note        This structure is used to retrieve the filter function
- *                pointer and its user data via the function
- *                M4DECODER_getOption_fct    with the option
- *                M4DECODER_kOptionID_OutputFilter
- ************************************************************************
-*/
-typedef struct _M4DECODER_OutputFilter
-{
-    M4OSA_Void   *m_pFilterFunction;    /**< pointer to the filter function */
-    M4OSA_Void   *m_pFilterUserData;    /**< user data of the filter        */
-
-} M4DECODER_OutputFilter;
-
-/**
- ************************************************************************
- * enum     M4DECODER_VideoType
- * @brief    This enum defines the video types used to create decoders
- * @note    This enum is used internally by the VPS to identify a currently supported
- *            video decoder interface. Each decoder is registered with one of this type associated.
- *            When a decoder instance is needed, this type is used to identify and
- *            and retrieve its interface.
- ************************************************************************
-*/
-typedef enum
-{
-    M4DECODER_kVideoTypeMPEG4 = 0,
-    M4DECODER_kVideoTypeMJPEG,
-    M4DECODER_kVideoTypeAVC,
-    M4DECODER_kVideoTypeWMV,
-    M4DECODER_kVideoTypeREAL,
-    M4DECODER_kVideoTypeYUV420P,
-
-    M4DECODER_kVideoType_NB  /* number of decoders, keep it as last enum entry */
-
-} M4DECODER_VideoType ;
-
-typedef struct {
-    M4OSA_UInt32 mProfile;
-    M4OSA_UInt32 mLevel;
-} VideoProfileLevel;
-
-typedef struct {
-    VideoProfileLevel *profileLevel;
-    M4OSA_UInt32 profileNumber;
-} VideoComponentCapabilities;
-
-typedef struct {
-    M4_StreamType codec;
-    VideoComponentCapabilities *component;
-    M4OSA_UInt32 componentNumber;
-} VideoDecoder;
-
-typedef struct {
-    VideoDecoder *decoder;
-    M4OSA_UInt32 decoderNumber;
-} M4DECODER_VideoDecoders;
-/**
- ************************************************************************
- * @brief    creates an instance of the decoder
- * @note    allocates the context
- *
- * @param    pContext:        (OUT)    Context of the decoder
- * @param    pStreamHandler:    (IN)    Pointer to a video stream description
- * @param    pGlobalInterface:  (IN)    Pointer to the M4READER_GlobalInterface structure that must
- *                                       be used by the decoder to read data from the stream
- * @param    pDataInterface:    (IN)    Pointer to the M4READER_DataInterface structure that must
- *                                       be used by the decoder to read data from the stream
- * @param    pAccessUnit        (IN)    Pointer to an access unit (allocated by the caller)
- *                                      where the decoded data are stored
- *
- * @return    M4NO_ERROR                 there is no error
- * @return  M4ERR_STATE             State automaton is not applied
- * @return    M4ERR_ALLOC                a memory allocation has failed
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set (in DEBUG only)
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4DECODER_create_fct)    (M4OSA_Context *pContext,
-                                                 M4_StreamHandler *pStreamHandler,
-                                                 M4READER_GlobalInterface *pGlobalInterface,
-                                                 M4READER_DataInterface *pDataInterface,
-                                                 M4_AccessUnit *pAccessUnit,
-                                                 M4OSA_Void* pUserData);
-
-/**
- ************************************************************************
- * @brief    destroy the instance of the decoder
- * @note    after this call the context is invalid
- *
- * @param    context:    (IN)    Context of the decoder
- *
- * @return    M4NO_ERROR             There is no error
- * @return  M4ERR_PARAMETER     The context is invalid (in DEBUG only)
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4DECODER_destroy_fct)    (M4OSA_Context context);
-
-/**
- ************************************************************************
- * @brief    get an option value from the decoder
- * @note    this function follows the set/get option mechanism described in OSAL 3.0
- *          it allows the caller to retrieve a property value:
- *          -the version number of the decoder
- *          -the size (widthxheight) of the image
- *
- * @param    context:    (IN)        Context of the decoder
- * @param    optionId:    (IN)        indicates the option to set
- * @param    pValue:        (IN/OUT)    pointer to structure or value (allocated by user) where
- *                                      option is stored
- * @return    M4NO_ERROR                 there is no error
- * @return  M4ERR_PARAMETER         The context is invalid (in DEBUG only)
- * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
- * @return  M4ERR_STATE             State automaton is not applied
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4DECODER_getOption_fct)(M4OSA_Context context, M4OSA_OptionID optionId,
-                                             M4OSA_DataOption pValue);
-
-/**
- ************************************************************************
- * @brief   set an option value of the decoder
- * @note    this function follows the set/get option mechanism described in OSAL 3.0
- *          it allows the caller to set a property value:
- *          -the conversion filter to use at rendering
- *
- * @param   context:    (IN)        Context of the decoder
- * @param   optionId:   (IN)        Identifier indicating the option to set
- * @param   pValue:     (IN)        Pointer to structure or value (allocated by user)
- *                                     where option is stored
- * @return  M4NO_ERROR              There is no error
- * @return  M4ERR_BAD_OPTION_ID     The option ID is not a valid one
- * @return  M4ERR_STATE             State automaton is not applied
- * @return  M4ERR_PARAMETER         The option parameter is invalid
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4DECODER_setOption_fct)(M4OSA_Context context, M4OSA_OptionID optionId,
-                                                 M4OSA_DataOption pValue);
-
-/**
- ************************************************************************
- * @brief   Decode Access Units up to a target time
- * @note    Parse and decode the stream until it is possible to output a decoded image for which
- *            the composition time is equal or greater to the passed targeted time
- *          The data are read from the reader data interface
- *
- * @param    context:    (IN)        Context of the decoder
- * @param    pTime:        (IN/OUT)    IN: Time to decode up to (in milli secondes)
- *                                    OUT:Time of the last decoded frame (in ms)
- * @param   bJump:      (IN)        0 if no jump occured just before this call
- *                                  1 if a a jump has just been made
- * @param   tolerance:      (IN)        We may decode an earlier frame within the tolerance.
- *                                      The time difference is specified in milliseconds.
- *
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4WAR_NO_MORE_AU        there is no more access unit to decode (end of stream)
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4DECODER_decode_fct)    (M4OSA_Context context, M4_MediaTime* pTime,
-                                                 M4OSA_Bool bJump, M4OSA_UInt32 tolerance);
-
-/**
- ************************************************************************
- * @brief    Renders the video at the specified time.
- * @note
- * @param    context:     (IN)        Context of the decoder
- * @param   pTime:       (IN/OUT)   IN: Time to render to (in milli secondes)
- *                OUT:Time of the actually rendered frame (in ms)
- * @param    pOutputPlane:(OUT)        Output plane filled with decoded data (converted)
- * @param   bForceRender:(IN)       1 if the image must be rendered even it has already been
- *                                  0 if not (in which case the function can return
- *                                       M4WAR_VIDEORENDERER_NO_NEW_FRAME)
- * @return    M4NO_ERROR                 There is no error
- * @return    M4ERR_PARAMETER            At least one parameter is not properly set
- * @return  M4ERR_STATE             State automaton is not applied
- * @return  M4ERR_ALLOC             There is no more available memory
- * @return    M4WAR_VIDEORENDERER_NO_NEW_FRAME    If the frame to render has already been rendered
- ************************************************************************
-*/
-typedef M4OSA_ERR  (M4DECODER_render_fct)    (M4OSA_Context context, M4_MediaTime* pTime,
-                                              M4VIFI_ImagePlane* pOutputPlane,
-                                              M4OSA_Bool bForceRender);
-
-/**
- ************************************************************************
- * structure    M4DECODER_VideoInterface
- * @brief        This structure defines the generic video decoder interface
- * @note        This structure stores the pointers to functions of one video decoder type.
- *                The decoder type is one of the M4DECODER_VideoType
- ************************************************************************
-*/
-typedef struct _M4DECODER_VideoInterface
-{
-    M4DECODER_create_fct*        m_pFctCreate;
-    M4DECODER_destroy_fct*        m_pFctDestroy;
-    M4DECODER_getOption_fct*    m_pFctGetOption;
-    M4DECODER_setOption_fct*    m_pFctSetOption;
-    M4DECODER_decode_fct*        m_pFctDecode;
-    M4DECODER_render_fct*        m_pFctRender;
-} M4DECODER_VideoInterface;
-
-#endif /*__M4DECODER_COMMON_H__*/
diff --git a/libvideoeditor/vss/common/inc/M4DECODER_Null.h b/libvideoeditor/vss/common/inc/M4DECODER_Null.h
deleted file mode 100644
index 047d857..0000000
--- a/libvideoeditor/vss/common/inc/M4DECODER_Null.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
- * @file    M4VD_Null.h
- * @brief   Implementation of the a "null" video decoder,i.e. a decoder
- *          that does not do actual decoding.
- * @note    This file defines the getInterface function.
-*************************************************************************
-*/
-#ifndef __M4DECODER_NULL_H__
-#define __M4DECODER_NULL_H__
-
-#include "M4DECODER_Common.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/**
- ************************************************************************
- * @brief Retrieves the interface implemented by the decoder
- * @param pDecoderType        : Pointer to a M4DECODER_VideoType
- *                             (allocated by the caller)
- *                             that will be filled with the decoder type
- * @param pDecoderInterface   : Address of a pointer that will be set to
- *                              the interface implemented by this decoder.
- *                              The interface is a structure allocated by
- *                              this function and must be freed by the caller.
- *
- * @returns : M4NO_ERROR  if OK
- *            M4ERR_ALLOC if allocation failed
- ************************************************************************
-*/
-M4OSA_ERR M4DECODER_NULL_getInterface( M4DECODER_VideoType *pDecoderType,
-                                 M4DECODER_VideoInterface **pDecoderInterface);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /*__M4DECODER_NULL_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4ENCODER_AudioCommon.h b/libvideoeditor/vss/common/inc/M4ENCODER_AudioCommon.h
deleted file mode 100755
index cba02a0..0000000
--- a/libvideoeditor/vss/common/inc/M4ENCODER_AudioCommon.h
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4ENCODER_AudioCommon.h
- * @brief    VES audio encoders shell interface.
- * @note    This file defines the types internally used by the VES to abstract audio encoders
- ******************************************************************************
-*/
-#ifndef __M4ENCODER_AUDIOCOMMON_H__
-#define __M4ENCODER_AUDIOCOMMON_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#include "M4OSA_OptionID.h"     /* for M4OSA_OPTION_ID_CREATE() */
-#include "M4OSA_CoreID.h"
-
-#define M4ENCODER_AUDIO_NB_CHANNELS_MAX 2
-/* WARNING: this value must be equal to the number of samples grabbed */
-//#define M4ENCODER_AUDIO_PCM_SAMPLE_NUMBER 960    /* imposed by the AAC encoder. */
-#define M4ENCODER_AUDIO_PCM_SAMPLE_NUMBER 1024    /* imposed by the AAC encoder. */
-
-
-/**
- ******************************************************************************
- * enumeration    M4ENCODER_Audio_OptionID
- * @brief        This enum defines the core AAC shell encoder options
- ******************************************************************************
-*/
-typedef enum
-{
- /* Maximum generated AU size */
-    M4ENCODER_Audio_maxAUsize     = M4OSA_OPTION_ID_CREATE(M4_READ,M4ENCODER_AUDIO, 0x01)
-
-} M4ENCODER_Audio_OptionID;
-
-
- /**
- ******************************************************************************
- * enum        M4ENCODER_SamplingFrequency
- * @brief    Thie enum defines the audio sampling frequency.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_k8000Hz = 8000,
-    M4ENCODER_k11025Hz = 11025,
-    M4ENCODER_k12000Hz = 12000,
-    M4ENCODER_k16000Hz = 16000,
-    M4ENCODER_k22050Hz = 22050,
-    M4ENCODER_k24000Hz = 24000,
-    M4ENCODER_k32000Hz = 32000,
-    M4ENCODER_k44100Hz = 44100,
-    M4ENCODER_k48000Hz = 48000
-} M4ENCODER_SamplingFrequency;
-
-
-/**
- ******************************************************************************
- * enum        M4ENCODER_AudioFormat
- * @brief    This enum defines the audio compression formats.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_kAMRNB = 0,
-    M4ENCODER_kAAC,
-    M4ENCODER_kAudioNULL,    /**< No compression */
-    M4ENCODER_kMP3,
-    M4ENCODER_kAudio_NB        /* number of encoders, keep it as last enum entry */
-
-} M4ENCODER_AudioFormat;
-
-/**
- ******************************************************************************
- * enum        M4ENCODER_ChannelNumber
- * @brief    Thie enum defines the number of audio channels.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_kMono  = 0,
-    M4ENCODER_kStereo,
-    M4ENCODER_kStereoNoInterleave
-} M4ENCODER_ChannelNumber;
-
-/**
- ******************************************************************************
- * enum        M4ENCODER_AudioBitrate
- * @brief    Thie enum defines the avalaible bitrates.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_kAudio_4_75_KBPS    = 4750,
-    M4ENCODER_kAudio_5_15_KBPS    = 5150,
-    M4ENCODER_kAudio_5_9_KBPS    = 5900,
-    M4ENCODER_kAudio_6_7_KBPS    = 6700,
-    M4ENCODER_kAudio_7_4_KBPS    = 7400,
-    M4ENCODER_kAudio_7_95_KBPS    = 7950,
-    M4ENCODER_kAudio_8_KBPS        = 8000,
-    M4ENCODER_kAudio_10_2_KBPS    = 10200,
-    M4ENCODER_kAudio_12_2_KBPS    = 12200,
-    M4ENCODER_kAudio_16_KBPS    = 16000,
-    M4ENCODER_kAudio_24_KBPS    = 24000,
-    M4ENCODER_kAudio_32_KBPS    = 32000,
-    M4ENCODER_kAudio_40_KBPS    = 40000,
-    M4ENCODER_kAudio_48_KBPS    = 48000,
-    M4ENCODER_kAudio_56_KBPS    = 56000,
-    M4ENCODER_kAudio_64_KBPS    = 64000,
-    M4ENCODER_kAudio_80_KBPS    = 80000,
-    M4ENCODER_kAudio_96_KBPS    = 96000,
-    M4ENCODER_kAudio_112_KBPS    = 112000,
-    M4ENCODER_kAudio_128_KBPS    = 128000,
-    M4ENCODER_kAudio_144_KBPS    = 144000,
-    M4ENCODER_kAudio_160_KBPS    = 160000,
-    M4ENCODER_kAudio_192_KBPS    = 192000,
-    M4ENCODER_kAudio_224_KBPS    = 224000,
-    M4ENCODER_kAudio_256_KBPS    = 256000,
-    M4ENCODER_kAudio_320_KBPS    = 320000
-} M4ENCODER_AudioBitrate;
-
-
-/**
- ******************************************************************************
- * enum            M4ENCODER_AacRegulation
- * @brief        The current mode of the bitrate regulation.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_kAacRegulNone = 0,    /**< no bitrate regulation */
-    M4ENCODER_kAacBitReservoir        /**< better quality, but more CPU consumed */
-} M4ENCODER_AacRegulation;
-
-/**
- ******************************************************************************
- * enum        M4ENCODER_AmrSID
- * @brief    This enum defines the SID of the AMR encoder.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_kAmrNoSID = 0     /**< no SID */
-} M4ENCODER_AmrSID;
-
-/**
- ******************************************************************************
- * struct    M4ENCODER_AacParams
- * @brief    This structure defines all the settings specific to the AAC encoder.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4ENCODER_AacRegulation    Regulation;
-    M4OSA_Bool                bHighSpeed;
-    M4OSA_Bool                bTNS;
-    M4OSA_Bool                bPNS;
-    M4OSA_Bool                bIS;
-    M4OSA_Bool                bMS;
-} M4ENCODER_AacParams;
-
-/**
- ******************************************************************************
- * struct    M4ENCODER_AudioParams
- * @brief    This structure defines all the settings avalaible when encoding audio.
- ******************************************************************************
-*/
-typedef struct s_M4ENCODER_AudioParams
-{
-    M4ENCODER_SamplingFrequency    Frequency;    /**< the sampling frequency */
-    M4ENCODER_ChannelNumber        ChannelNum;    /**< the numbe of channels (mono, stereo, ..) */
-    M4ENCODER_AudioBitrate        Bitrate;    /**<  bitrate, see enum  */
-    M4ENCODER_AudioFormat        Format;        /**<  audio compression format, AMR, AAC ...  */
-    union {
-        M4ENCODER_AacParams        AacParam;
-        M4ENCODER_AmrSID        AmrSID;
-    } SpecifParam;                            /**< the audio encoder specific parameters */
-} M4ENCODER_AudioParams;
-
-/**
- ******************************************************************************
- * struct    M4ENCODER_AudioDecSpecificInfo
- * @brief    This structure describes the decoder specific info buffer.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_MemAddr8    pInfo;        /**< the buffer adress */
-    M4OSA_UInt32    infoSize;    /**< the buffer size in bytes */
-} M4ENCODER_AudioDecSpecificInfo;
-
-/**
- ******************************************************************************
- * struct    M4ENCODER_AudioBuffer
- * @brief    This structure defines the data buffer.
- ******************************************************************************
-*/
-typedef struct
-{
-    /**< the table of buffers (unused buffers are set to NULL) */
-    M4OSA_MemAddr8    pTableBuffer[M4ENCODER_AUDIO_NB_CHANNELS_MAX];
-    /**< the table of the size of corresponding buffer at same index */
-    M4OSA_UInt32    pTableBufferSize[M4ENCODER_AUDIO_NB_CHANNELS_MAX];
-} M4ENCODER_AudioBuffer;
-
-typedef M4OSA_ERR (M4AE_init)        (M4OSA_Context* hContext, M4OSA_Void* pUserData);
-typedef M4OSA_ERR (M4AE_cleanUp)    (M4OSA_Context pContext);
-typedef M4OSA_ERR (M4AE_open)        (M4OSA_Context pContext, M4ENCODER_AudioParams *params,
-                                        M4ENCODER_AudioDecSpecificInfo *decSpecInfo,
-                                        M4OSA_Context grabberContext);
-typedef M4OSA_ERR (M4AE_close)        (M4OSA_Context pContext);
-typedef M4OSA_ERR (M4AE_step)         (M4OSA_Context pContext, M4ENCODER_AudioBuffer *inBuffer,
-                                        M4ENCODER_AudioBuffer *outBuffer);
-typedef M4OSA_ERR (M4AE_getOption)    (M4OSA_Context pContext, M4OSA_OptionID    option,
-                                        M4OSA_DataOption *valuePtr);
-/**
- ******************************************************************************
- * struct    M4ENCODER_AudioGlobalInterface
- * @brief    Defines all the functions required for an audio encoder shell.
- ******************************************************************************
-*/
-typedef struct _M4ENCODER_AudioGlobalInterface
-{
-    M4AE_init*        pFctInit;
-    M4AE_cleanUp*    pFctCleanUp;
-    M4AE_open*        pFctOpen;
-    M4AE_close*        pFctClose;
-    M4AE_step*        pFctStep;
-    M4AE_getOption*    pFctGetOption;
-} M4ENCODER_AudioGlobalInterface;
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /*__M4ENCODER_AUDIOCOMMON_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4ENCODER_common.h b/libvideoeditor/vss/common/inc/M4ENCODER_common.h
deleted file mode 100755
index 9064602..0000000
--- a/libvideoeditor/vss/common/inc/M4ENCODER_common.h
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4ENCODER_common.h
- * @note    This file defines the types internally used by the VES to abstract encoders
-
- ******************************************************************************
-*/
-#ifndef __M4ENCODER_COMMON_H__
-#define __M4ENCODER_COMMON_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/**
- * Video preprocessing common interface */
-#include "M4VPP_API.h"
-
-/**
- * Writer common interface */
-#include "M4WRITER_common.h"
-
-/* IMAGE STAB */
-/* percentage of image suppressed (computed from the standard dimension).*/
-#define M4ENCODER_STAB_FILTER_CROP_PERCENTAGE 10
-        /* WARNING: take the inferior even dimension, ex: 10% for QCIF output => 192x158 */
-
-/**
- ******************************************************************************
- * enum        M4ENCODER_OpenMode
- * @brief    Definition of open mode for the encoder.
- * @note    DEFAULT  : pointer to M4ENCODER_open() which use default parameters
- *          ADVANCED : pointer to M4ENCODER_open_advanced() which allow to customize
- *                     various encoding parameters
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_OPEN_DEFAULT,
-    M4ENCODER_OPEN_ADVANCED
-} M4ENCODER_OpenMode;
-
- /**
- ******************************************************************************
- * enum        M4ENCODER_FrameRate
- * @brief    Thie enum defines the encoded video framerates.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_k5_FPS,
-    M4ENCODER_k7_5_FPS,
-    M4ENCODER_k10_FPS,
-    M4ENCODER_k12_5_FPS,
-    M4ENCODER_k15_FPS,
-    M4ENCODER_k20_FPS,
-    M4ENCODER_k25_FPS,
-    M4ENCODER_k30_FPS,
-    M4ENCODER_kVARIABLE_FPS,            /**< Variable video bitrate */
-    M4ENCODER_kUSE_TIMESCALE            /**< Advanced encoding, use timescale indication rather
-                                                than framerate */
-} M4ENCODER_FrameRate;
-
-/**
- ******************************************************************************
- * enum        M4ENCODER_InputFormat
- * @brief    Thie enum defines the video format of the grabbing.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_kIYUV420=0, /**< YUV 4:2:0 planar (standard input for mpeg-4 video) */
-    M4ENCODER_kIYUV422,   /**< YUV422 planar */
-    M4ENCODER_kIYUYV,     /**< YUV422 interlaced, luma first */
-    M4ENCODER_kIUYVY,     /**< YUV422 interlaced, chroma first */
-    M4ENCODER_kIJPEG,     /**< JPEG compressed frames */
-    M4ENCODER_kIRGB444,   /**< RGB 12 bits 4:4:4 */
-    M4ENCODER_kIRGB555,   /**< RGB 15 bits 5:5:5 */
-    M4ENCODER_kIRGB565,   /**< RGB 16 bits 5:6:5 */
-    M4ENCODER_kIRGB24,    /**< RGB 24 bits 8:8:8 */
-    M4ENCODER_kIRGB32,    /**< RGB 32 bits  */
-    M4ENCODER_kIBGR444,   /**< BGR 12 bits 4:4:4 */
-    M4ENCODER_kIBGR555,   /**< BGR 15 bits 5:5:5 */
-    M4ENCODER_kIBGR565,   /**< BGR 16 bits 5:6:5 */
-    M4ENCODER_kIBGR24,    /**< BGR 24 bits 8:8:8 */
-    M4ENCODER_kIBGR32     /**< BGR 32 bits  */
-} M4ENCODER_InputFormat;
-
-/**
- ******************************************************************************
- * enum        M4ENCODER_Format
- * @brief    Thie enum defines the video compression formats.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_kMPEG4 = 0,
-    M4ENCODER_kH263,
-    M4ENCODER_kH264,
-    M4ENCODER_kJPEG,
-    M4ENCODER_kMJPEG,
-    M4ENCODER_kNULL,
-    M4ENCODER_kYUV420,            /**< No compression */
-    M4ENCODER_kYUV422,            /**< No compression */
-
-    M4ENCODER_kVideo_NB /* number of decoders, keep it as last enum entry */
-} M4ENCODER_Format;
-
-/**
- ******************************************************************************
- * enum        M4ENCODER_FrameWidth
- * @brief    Thie enum defines the avalaible frame Width.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_SQCIF_Width = 128, /**< SQCIF 128x96 */
-    M4ENCODER_QQVGA_Width = 160, /**< QQVGA 160x120 */
-    M4ENCODER_QCIF_Width  = 176, /**< QCIF 176x144 */
-    M4ENCODER_QVGA_Width  = 320, /**< QVGA 320x240 */
-    M4ENCODER_CIF_Width   = 352, /**< CIF 352x288 */
-    M4ENCODER_VGA_Width   = 640, /**< VGA 640x480 */
-    M4ENCODER_SVGA_Width  = 800, /**< SVGA 800x600 */
-    M4ENCODER_XGA_Width   = 1024, /**< XGA 1024x768 */
-    M4ENCODER_XVGA_Width  = 1280, /**< XVGA 1280x1024 */
-/* +PR LV5807 */
-    M4ENCODER_WVGA_Width  = 800, /**< WVGA 800 x 480 */
-    M4ENCODER_NTSC_Width  = 720, /**< NTSC 720 x 480 */
-/* -PR LV5807 */
-
-/* +CR Google */
-    M4ENCODER_640_360_Width   = 640,  /**< 640x360 */
-    // StageFright encoders require %16 resolution
-    M4ENCODER_854_480_Width   = 848,  /**< 848x480 */
-    M4ENCODER_1280_720_Width  = 1280, /**< 720p 1280x720 */
-    // StageFright encoders require %16 resolution
-    M4ENCODER_1080_720_Width  = 1088, /**< 720p 1088x720 */
-    M4ENCODER_960_720_Width   = 960,  /**< 720p 960x720 */
-    M4ENCODER_1920_1080_Width = 1920  /**< 1080p 1920x1080 */
-/* -CR Google */
-
-} M4ENCODER_FrameWidth;
-
-/**
- ******************************************************************************
- * enum        M4ENCODER_FrameHeight
- * @brief    Thie enum defines the avalaible frame Height.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_SQCIF_Height = 96,  /**< SQCIF 128x96 */
-    M4ENCODER_QQVGA_Height = 120, /**< QQVGA 160x120 */
-    M4ENCODER_QCIF_Height  = 144, /**< QCIF 176x144 */
-    M4ENCODER_QVGA_Height  = 240, /**< QVGA 320x240 */
-    M4ENCODER_CIF_Height   = 288, /**< CIF 352x288 */
-    M4ENCODER_VGA_Height   = 480, /**< VGA 340x480 */
-    M4ENCODER_SVGA_Height  = 600, /**< SVGA 800x600 */
-    M4ENCODER_XGA_Height   = 768, /**< XGA 1024x768 */
-    M4ENCODER_XVGA_Height  = 1024, /**< XVGA 1280x1024 */
-/* +PR LV5807 */
-    M4ENCODER_WVGA_Height  = 480, /**< WVGA 800 x 480 */
-    M4ENCODER_NTSC_Height  = 480, /**< NTSC 720 x 480 */
-/* -PR LV5807 */
-
-/* +CR Google */
-    M4ENCODER_640_360_Height  = 360, /**< 640x360 */
-    M4ENCODER_854_480_Height  = 480, /**< 854x480 */
-    M4ENCODER_1280_720_Height = 720, /**< 720p 1280x720 */
-    M4ENCODER_1080_720_Height = 720, /**< 720p 1080x720 */
-    M4ENCODER_960_720_Height  = 720, /**< 720p 960x720 */
-    // StageFright encoders require %16 resolution
-    M4ENCODER_1920_1080_Height = 1088 /**< 1080p 1920x1080 */
-/* -CR Google */
-} M4ENCODER_FrameHeight;
-
-/**
- ******************************************************************************
- * enum        M4ENCODER_Bitrate
- * @brief    Thie enum defines the avalaible bitrates.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_k28_KBPS  = 28000,
-    M4ENCODER_k40_KBPS  = 40000,
-    M4ENCODER_k64_KBPS  = 64000,
-    M4ENCODER_k96_KBPS  = 96000,
-    M4ENCODER_k128_KBPS = 128000,
-    M4ENCODER_k192_KBPS = 192000,
-    M4ENCODER_k256_KBPS = 256000,
-    M4ENCODER_k384_KBPS = 384000,
-    M4ENCODER_k512_KBPS = 512000,
-    M4ENCODER_k800_KBPS = 800000
-
-} M4ENCODER_Bitrate;
-
-/* IMAGE STAB */
-
-/**
- ******************************************************************************
- * enum            M4ENCODER_StabMode
- * @brief        The current mode of the stabilization filter.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_kStabOff = 0,        /**< stabilization filter is disabled */
-    M4ENCODER_kStabCentered,    /**< stabilization filter is enabled. */
-                                /**< Video input and output must have the same dimensions. Output
-                                    image will have black borders */
-    M4ENCODER_kStabGrabMore        /**< stabilization filter is enabled. */
-                                /**< Video input dimensions must be bigger than output. The ratio
-                                        is indicated by M4ENCODER_STAB_FILTER_CROP_PERCENTAGE */
-
-} M4ENCODER_StabMode;
-
-/**
- ******************************************************************************
- * enum            M4ENCODER_FrameMode
- * @brief        Values to drive the encoder behaviour (type of frames produced)
- ******************************************************************************
-*/
-typedef enum
-{
-    M4ENCODER_kNormalFrame = 0,   /**< let the encoder decide which type of frame to encode */
-    M4ENCODER_kLastFrame   = 1,   /**< force encoder the flush all its buffers because it is
-                                         last frame  */
-    M4ENCODER_kIFrame      = 2    /**< force encoder to generate an I frame */
-
-} M4ENCODER_FrameMode;
-
-/**
- ******************************************************************************
- * struct    M4ENCODER_Params
- * @brief    This structure defines all the settings avalaible when encoding.
- ******************************************************************************
-*/
-typedef struct
-{
-    /* Input */
-    M4ENCODER_InputFormat    InputFormat;        /**< Input video format (grabbing) */
-    M4ENCODER_FrameWidth    InputFrameWidth;    /**< Input Frame width (grabbing) */
-    M4ENCODER_FrameHeight    InputFrameHeight;    /**< Input Frame height (grabbing) */
-
-    /* Output */
-    M4ENCODER_FrameWidth    FrameWidth;            /**< Frame width  */
-    M4ENCODER_FrameHeight    FrameHeight;        /**< Frame height  */
-    M4ENCODER_Bitrate        Bitrate;            /**< Bitrate, see enum  */
-    M4ENCODER_FrameRate        FrameRate;            /**< Framerate, see enum  */
-    M4ENCODER_Format        Format;                /**< Video compression format, H263, MPEG4,
-                                                         MJPEG ...  */
-    M4OSA_Int32            videoProfile; /** video profile */
-    M4OSA_Int32            videoLevel;   /** video level */
-} M4ENCODER_Params;
-
-/**
- ******************************************************************************
- * struct    M4ENCODER_AdvancedParams
- * @brief    This structure defines the advanced settings available for MPEG-4 encoding.
- ******************************************************************************
-*/
-typedef struct
-{
-    /**
-     * Input parameters (grabber coupled with encoder): */
-    M4ENCODER_InputFormat    InputFormat;                /**< Input video format */
-    M4ENCODER_FrameWidth    InputFrameWidth;            /**< Input Frame width */
-    M4ENCODER_FrameHeight    InputFrameHeight;            /**< Input Frame height */
-
-    /**
-     * Common settings for H263 and MPEG-4: */
-    M4ENCODER_FrameWidth    FrameWidth;                    /**< Frame width  */
-    M4ENCODER_FrameHeight    FrameHeight;                /**< Frame height  */
-    M4OSA_UInt32            Bitrate;                    /**< Free value for the bitrate */
-    /**< Framerate (if set to M4ENCODER_kUSE_TIMESCALE use uiRateFactor & uiTimeScale instead) */
-    M4ENCODER_FrameRate        FrameRate;
-    /**< Video compression format: H263 or MPEG4 */
-    M4ENCODER_Format        Format;
-    M4OSA_Int32            videoProfile; /** output video profile */
-    M4OSA_Int32            videoLevel;   /** output video level */
-    M4OSA_UInt32            uiHorizontalSearchRange; /**< Set to 0 will use default value (15) */
-    M4OSA_UInt32            uiVerticalSearchRange;   /**< Set to 0 will use default value (15) */
-    /**< Set to 0 will use default value (0x7FFF i.e. let engine decide when to put an I) */
-    M4OSA_UInt32            uiStartingQuantizerValue;
-    /**< Enable if priority is quality, Disable if priority is framerate */
-    M4OSA_Bool                bInternalRegulation;
-    /**< Ratio between the encoder frame rate and the actual frame rate */
-    M4OSA_UInt8                uiRateFactor;
-    /**< I frames periodicity, set to 0 will use default value */
-    M4OSA_UInt32            uiIVopPeriod;
-    /**< Motion estimation [default=0 (all tools), disable=8 (no tool)] */
-    M4OSA_UInt8             uiMotionEstimationTools;
-
-    /**
-     * Settings for MPEG-4 only: */
-    M4OSA_UInt32            uiTimeScale;                /**< Free value for the timescale */
-    M4OSA_Bool                bErrorResilience;           /**< Disabled by default */
-    /**< Disabled by default (if enabled, bErrorResilience should be enabled too!) */
-    M4OSA_Bool                bDataPartitioning;
-    M4OSA_Bool              bAcPrediction;           /**< AC prediction [default=1, disable=0] */
-
-} M4ENCODER_AdvancedParams;
-
-/**
- ******************************************************************************
- * struct    M4ENCODER_StillPictureParams
- * @brief    This structure defines all the settings avalaible when encoding still
- *            picture.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4ENCODER_FrameWidth    FrameWidth;            /**< Frame width  */
-    M4ENCODER_FrameHeight    FrameHeight;        /**< Frame height  */
-    M4OSA_UInt32            Quality;            /**< Bitrate, see enum  */
-    M4ENCODER_Format        InputFormat;        /**< YUV 420 or 422  */
-    M4ENCODER_Format        Format;                /**< Video compression format, H263, MPEG4,
-                                                         MJPEG ...  */
-    M4OSA_Bool                PreProcessNeeded;    /**< Is the call to the VPP is necessary */
-    M4OSA_Bool                EncodingPerStripes;    /**< Is encoding per stripes */
-
-} M4ENCODER_StillPictureParams;
-
-/**
- ******************************************************************************
- * struct    M4ENCODER_Header
- * @brief    This structure defines the buffer where the sequence header is put.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_MemAddr8    pBuf;        /**< Buffer for the header */
-    M4OSA_UInt32    Size;        /**< Size of the data */
-
-} M4ENCODER_Header;
-
-/**
- ******************************************************************************
- * enum    M4ENCODER_OptionID
- * @brief This enums defines all avalaible options.
- ******************************************************************************
-*/
-typedef enum
-{
-    /**< set the fragment size, option value is M4OSA_UInt32 type */
-    M4ENCODER_kOptionID_VideoFragmentSize    = M4OSA_OPTION_ID_CREATE (M4_WRITE,\
-                                                     M4ENCODER_COMMON, 0x01),
-
-    /**< set the stabilization filtering, option value is M4ENCODER_StabMode type */
-    M4ENCODER_kOptionID_ImageStabilization    = M4OSA_OPTION_ID_CREATE (M4_WRITE,\
-                                                          M4ENCODER_COMMON, 0x02),
-
-    /**< prevent writting of any AU, option value is M4OSA_Bool type */
-    M4ENCODER_kOptionID_InstantStop            = M4OSA_OPTION_ID_CREATE (M4_WRITE,\
-                                                         M4ENCODER_COMMON, 0x03),
-
-    /**< get the DSI (encoder header) generated by the encoder */
-    M4ENCODER_kOptionID_EncoderHeader        = M4OSA_OPTION_ID_CREATE (M4_READ ,\
-                                                             M4ENCODER_COMMON, 0x04),
-/*+ CR LV6775 -H.264 Trimming  */
-
-    M4ENCODER_kOptionID_SetH264ProcessNALUfctsPtr= M4OSA_OPTION_ID_CREATE (M4_READ ,\
-                                                             M4ENCODER_COMMON, 0x05),
-    M4ENCODER_kOptionID_H264ProcessNALUContext        = M4OSA_OPTION_ID_CREATE (M4_READ ,\
-                                                             M4ENCODER_COMMON, 0x06)
-/*-CR LV6775 -H.264 Trimming  */
-} M4ENCODER_OptionID;
-
-/*+ CR LV6775 -H.264 Trimming  */
-typedef M4OSA_ERR (H264MCS_ProcessEncodedNALU_fct)(M4OSA_Void*ainstance,M4OSA_UInt8* inbuff,
-                               M4OSA_Int32  inbuf_size,
-                               M4OSA_UInt8 *outbuff, M4OSA_Int32 *outbuf_size);
-//*- CR LV6775 -H.264 Trimming  */
-
-typedef M4OSA_Void* M4ENCODER_Context;
-
-typedef M4OSA_ERR (M4ENCODER_init) (
-        M4ENCODER_Context* pContext,
-        M4WRITER_DataInterface* pWriterDataInterface,
-        M4VPP_apply_fct* pVPPfct,
-        M4VPP_Context pVPPctxt,
-        M4OSA_Void* pExternalAPI,
-        M4OSA_Void* pUserData
-);
-
-typedef M4OSA_ERR (M4ENCODER_open) (
-        M4ENCODER_Context pContext,
-        M4SYS_AccessUnit* pAU,
-        M4OSA_Void* pParams     /* Can be M4ENCODER_Params, M4ENCODER_AdvancedParams or
-                                    M4ENCODER_StillPictureParams */
-);
-
-typedef M4OSA_ERR (M4ENCODER_start) (M4ENCODER_Context pContext);
-typedef M4OSA_ERR (M4ENCODER_stop) (M4ENCODER_Context pContext);
-typedef M4OSA_ERR (M4ENCODER_pause) (M4ENCODER_Context pContext);
-typedef M4OSA_ERR (M4ENCODER_resume) (M4ENCODER_Context pContext);
-typedef M4OSA_ERR (M4ENCODER_close) (M4ENCODER_Context pContext);
-typedef M4OSA_ERR (M4ENCODER_cleanup) (M4ENCODER_Context pContext);
-typedef M4OSA_ERR (M4ENCODER_regulBitRate) (M4ENCODER_Context pContext);
-
-typedef M4OSA_ERR (M4ENCODER_encode) (
-        M4ENCODER_Context pContext,
-        M4VIFI_ImagePlane* pInPlane,
-        M4OSA_Double Cts,
-        M4ENCODER_FrameMode FrameMode
-);
-
-typedef M4OSA_ERR (M4ENCODER_setOption)    (
-        M4ENCODER_Context pContext,
-        M4OSA_UInt32 optionID,
-        M4OSA_DataOption optionValue
-);
-
-typedef M4OSA_ERR (M4ENCODER_getOption)    (
-        M4ENCODER_Context pContext,
-        M4OSA_UInt32 optionID,
-        M4OSA_DataOption optionValue
-);
-
-/**
- ******************************************************************************
- * struct    M4ENCODER_GlobalInterface
- * @brief    Defines all the functions required for an encoder shell.
- ******************************************************************************
-*/
-
-typedef struct _M4ENCODER_GlobalInterface
-{
-    M4ENCODER_init*                pFctInit;
-    M4ENCODER_open*                pFctOpen;
-
-    M4ENCODER_start*            pFctStart;          /* Grabber mode */
-    M4ENCODER_stop*                pFctStop;           /* Grabber mode */
-
-    M4ENCODER_pause*            pFctPause;          /* Grabber mode */
-    M4ENCODER_resume*            pFctResume;         /* Grabber mode */
-
-    M4ENCODER_close*            pFctClose;
-    M4ENCODER_cleanup*            pFctCleanup;
-
-    M4ENCODER_regulBitRate*     pFctRegulBitRate;
-    M4ENCODER_encode*            pFctEncode;         /* Standalone mode */
-
-    M4ENCODER_setOption*        pFctSetOption;
-    M4ENCODER_getOption*        pFctGetOption;
-} M4ENCODER_GlobalInterface;
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /*__M4ENCODER_COMMON_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4OSA_CoreID.h b/libvideoeditor/vss/common/inc/M4OSA_CoreID.h
deleted file mode 100755
index 7408fc8..0000000
--- a/libvideoeditor/vss/common/inc/M4OSA_CoreID.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file   M4OSA_CoreID.h
- * @brief  defines the uniques component identifiers used for memory management
- *         and optionID mechanism
- * @note
- *
- ************************************************************************
-*/
-#ifndef __M4OSA_COREID_H__
-#define __M4OSA_COREID_H__
-
-/* CoreId are defined on 14 bits */
-/* we start from 0x0100, lower values are reserved for osal core components */
-
-/* reader shells*/
-#define M4READER_COMMON     0x0100
-#define M4READER_AVI        0x0101
-#define M4READER_AMR        0x0102
-#define M4READER_3GP        0x0103
-#define M4READER_NET        0x0104
-#define M4READER_3GP_HTTP   0x0105
-#define M4READER_MP3        0x0106
-#define M4READER_WAV        0x0107
-#define M4READER_MIDI       0x0108
-#define M4READER_ASF        0x0109
-#define M4READER_REAL        0x010A
-#define M4READER_AAC        0x010B
-#define M4READER_FLEX        0x010C
-#define M4READER_BBA        0x010D
-#define M4READER_SYNTHESIS_AUDIO    0x010E
-#define M4READER_JPEG        0x010F
-
-
-/* writer shells*/
-#define M4WRITER_COMMON     0x0110
-#define M4WRITER_AVI        0x0111
-#define M4WRITER_AMR        0x0112
-#define M4WRITER_3GP        0x0113
-#define M4WRITER_JPEG        0x0116
-#define M4WRITER_MP3        0x0117
-
-/* decoder shells */
-#define M4DECODER_COMMON    0x0120
-#define M4DECODER_JPEG      0x0121
-#define M4DECODER_MPEG4     0x0122
-#define M4DECODER_AUDIO     0x0123
-#define M4DECODER_AVC       0x0124
-#define M4DECODER_MIDI      0x0125
-#define M4DECODER_WMA        0x0126
-#define M4DECODER_WMV        0x0127
-#define M4DECODER_RMV        0x0128
-#define M4DECODER_RMA        0x0129
-#define M4DECODER_AAC       0x012A
-#define M4DECODER_BEATBREW  0x012B
-#define M4DECODER_EXTERNAL  0x012C
-
-/* encoder shells */
-#define M4ENCODER_COMMON    0x0130
-#define M4ENCODER_JPEG      0x0131
-#define M4ENCODER_MPEG4     0x0132
-#define M4ENCODER_AUDIO     0x0133
-#define M4ENCODER_VID_NULL  0x0134
-#define M4ENCODER_MJPEG        0x0135
-#define M4ENCODER_MP3        0x0136
-#define M4ENCODER_H264        0x0137
-#define M4ENCODER_AAC        0x0138
-#define M4ENCODER_AMRNB        0x0139
-#define M4ENCODER_AUD_NULL  0x013A
-#define M4ENCODER_EXTERNAL  0x013B
-
-/* cores */
-#define M4JPG_DECODER       0x0140
-#define M4JPG_ENCODER       0x0141
-
-#define M4MP4_DECODER       0x0142
-#define M4MP4_ENCODER       0x0143
-
-#define M4AVI_COMMON        0x0144
-#define M4AVI_READER        0x0145
-#define M4AVI_WRITER        0x0146
-
-#define M4HTTP_ENGINE       0x0147
-
-#define M4OSA_TMPFILE       0x0148
-#define M4TOOL_TIMER        0x0149
-
-#define M4AMR_READER        0x014A
-
-#define M4MP3_READER        0x014B
-
-#define M4WAV_READER        0x014C
-#define M4WAV_WRITER        0x014D
-#define M4WAV_COMMON        0x014E
-
-#define M4ADTS_READER        0x014F
-#define M4ADIF_READER        0x016A
-
-#define M4SPS               0x0150
-#define M4EXIF_DECODER      0x0151
-#define M4EXIF_ENCODER      0x0152
-#define M4GIF_DECODER       0x0153
-#define M4GIF_ENCODER       0x0154
-#define M4PNG_DECODER       0x0155
-#define M4PNG_ENCODER       0x0156
-#define M4WBMP_DECODER      0x0157
-#define M4WBMP_ENCODER      0x0158
-
-#define M4AMR_WRITER        0x0159    /**< no room to put it along M4AMR_READER */
-
-
-#define M4AVC_DECODER       0x015A
-#define M4AVC_ENCODER       0x015B
-
-#define M4ASF_READER        0x015C
-#define M4WMDRM_AGENT        0x015D
-#define M4MIDI_READER        0x0162    /**< no room before the presenters */
-#define M4RM_READER         0x163
-#define M4RMV_DECODER        0x164
-#define M4RMA_DECODER        0x165
-
-#define M4TOOL_XML            0x0166
-#define M4TOOL_EFR            0x0167    /**< Decryption module for Video Artist */
-#define M4IAL_FTN            0x0168    /* FTN implementation of the IAL */
-#define M4FTN                0x0169    /* FTN library */
-
-/* presenter */
-#define M4PRESENTER_AUDIO   0x0160
-#define M4PRESENTER_VIDEO   0x0161
-
-/* high level interfaces (vps, etc..)*/
-#define M4VPS               0x0170
-#define M4VTS               0x0171
-#define M4VXS               0x0172
-#define M4CALLBACK          0x0173
-#define M4VES               0x0174
-#define M4PREPROCESS_VIDEO  0x0175
-#define M4GRAB_AUDIO        0x0176
-#define M4GRAB_VIDEO        0x0177
-#define M4VSSAVI            0x0178
-#define M4VSS3GPP           0x0179
-#define M4PTO3GPP           0x017A
-#define M4PVX_PARSER        0x017B
-#define M4VCS                0x017C
-#define M4MCS                0x017D
-#define M4MNMC                0x0180    /**< mnm controller */
-#define M4TTEXT_PARSER      0x0181    /**< timed text */
-#define M4MM                0x0182    /**< Music manager */
-#define M4MDP                0x0183    /**< Metadata parser */
-#define M4MMSQLCORE            0x0184
-#define M4VPSIL                0x0185
-#define M4FILEIL            0x0186 /* IL file Interface */
-#define M4MU                0x0187
-#define M4VEE                0x0188  /**< Video effect engine */
-#define M4VA                0x0189 /* VideoArtist */
-#define M4JTS                0x018A
-#define M4JTSIL                0x018B
-#define M4AIR                0x018C  /**< AIR */
-#define M4SPE                0x018D  /**< Still picture editor */
-#define M4VS                0x018E    /**< Video Studio (xVSS) */
-#define M4VESIL                0x018F    /**< VES il */
-#define M4ID3                0x0190    /**< ID3 Tag Module */
-#define M4SC                0x0191    /**< Media Scanner */
-#define M4TG                0x0192  /**< Thumbnail Generator*/
-#define M4TS                0x0193    /**< Thumbnail storage */
-#define M4MB                0x0194    /**< Media browser */
-
-/* high level application (test or client app) */
-#define M4APPLI             0x0200
-#define M4VA_APPLI            0x0201    /**< Video Artist test application */
-
-/* external components (HW video codecs, etc.) */
-#define M4VD_EXTERNAL        0x0300
-#define M4VE_EXTERNAL        0x0301
-
-
-/* priority to combine with module ids */
-#define M4HIGH_PRIORITY     0xC000
-#define M4MEDIUM_PRIORITY   0x8000
-#define M4LOW_PRIORITY      0x4000
-#define M4DEFAULT_PRIORITY  0x0000
-
-
-#endif /*__M4OSA_COREID_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4PCMR_CoreReader.h b/libvideoeditor/vss/common/inc/M4PCMR_CoreReader.h
deleted file mode 100755
index 6afc50c..0000000
--- a/libvideoeditor/vss/common/inc/M4PCMR_CoreReader.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file    M4WAV_WavReader.h
- * @brief   WAV Reader declarations
- * @note    This file implements functions of the WAV reader
- ************************************************************************
-*/
-
-#include "M4OSA_CoreID.h"
-#include "M4OSA_Types.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_FileReader.h"
-#include "M4SYS_AccessUnit.h"
-#include "M4TOOL_VersionInfo.h"
-
-
-#define M4PCMC_ERR_PCM_NOT_COMPLIANT    M4OSA_ERR_CREATE(M4_ERR, M4WAV_COMMON,0x000001)
-#define M4PCMC_ERR_PCM_NO_SPACE_AVAIL   M4OSA_ERR_CREATE(M4_ERR, M4WAV_COMMON,0x000002)
-#define M4PCMC_ERR_PCM_NOT_SUPPORTED    M4OSA_ERR_CREATE(M4_ERR, M4WAV_COMMON,0x000003)
-
-#define M4PCMC_WAR_END_OF_STREAM        M4OSA_ERR_CREATE(M4_WAR, M4WAV_COMMON ,0x000001)
-
-/**
- ************************************************************************
- * structure    M4WAVC_DecoderSpecificInfo
- * @brief       This structure defines the decoder Specific informations
- * @note        This structure is used by the WAV reader to store all
- *              decoder specific informations:
- *              - Sample Frequency
- *              - Average Bytes per second
- *              - Number of channels (1 or 2)
- *              - Number of bits per sample (8 or 16)
- ************************************************************************
-*/
-typedef struct {
-    M4OSA_UInt32    SampleFrequency;
-    M4OSA_UInt32    AvgBytesPerSec;
-    M4OSA_UInt32    DataLength;
-    M4OSA_UInt16    nbChannels;
-    M4OSA_UInt16    BitsPerSample;
-} M4PCMC_DecoderSpecificInfo;
-
-/**
- ************************************************************************
- * enum     M4WAVR_State
- * @brief   This enum defines the WAV Reader States
- * @note    The state automaton is documented separately
- *          consult the design specification for details
- ************************************************************************
-*/
-typedef enum {
-    M4PCMR_kInit    = 0x0000,
-    M4PCMR_kOpening = 0x0100,
-    M4PCMR_kOpening_streamRetrieved = 0x0101,
-    M4PCMR_kReading = 0x0200,
-    M4PCMR_kReading_nextAU  = 0x0201,
-    M4PCMR_kClosed  = 0x0300
-} M4PCMR_State;
-
-/**
- ************************************************************************
- * enum     M4WAVR_OptionID
- * @brief   This enum defines the WAV Reader options
- * @note    Only one option is available:
- *          - M4WAVR_kPCMblockSize: sets the size of the PCM block to read
- *            from WAV file
- ************************************************************************
-*/
-typedef enum {
-    M4PCMR_kPCMblockSize    = M4OSA_OPTION_ID_CREATE(M4_READ, M4WAV_READER, 0x01)
-} M4PCMR_OptionID;
-
-/**
- ************************************************************************
- * structure    M4WAVR_Context
- * @brief       This structure defines the WAV Reader context
- * @note        This structure is used for all WAV Reader calls to store
- *              the context
- ************************************************************************
-*/
-typedef struct {
-    M4OSA_MemAddr32             m_pDecoderSpecInfo;/**< Pointer to the decoder specific info
-                                                        structure contained in pStreamDesc
-                                                        (only used to free...) */
-    M4OSA_FileReadPointer*      m_pFileReadFunc;/**< The OSAL set of pointer to function for
-                                                         file management */
-    M4OSA_Context               m_fileContext;  /**< The context needed by OSAL to manage File */
-    M4PCMC_DecoderSpecificInfo  m_decoderConfig;/**< Specific configuration for decoder */
-    M4PCMR_State                m_state;        /**< state of the wav reader */
-    M4PCMR_State                m_microState;   /**< state of the read wav stream */
-    M4OSA_UInt32                m_blockSize;    /**< Size of the read block */
-    M4OSA_UInt32                m_offset;       /**< Offset of the PCM read (i.e m_offset of the
-                                                        file without wav header) */
-    M4OSA_MemAddr32             m_pAuBuffer;    /**< Re-used buffer for AU content storage */
-    M4OSA_FilePosition          m_dataStartOffset;/**< offset of the pcm data beginning into
-                                                         the file */
-} M4PCMR_Context;
-
-/*************************************************************************
- *
- *  Prototypes of all WAV reader functions
- *
- ************************************************************************/
-M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
-                             M4OSA_FileReadPointer* pFileFunction);
-M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc);
-M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs);
-M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU);
-M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU);
-M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
-                         M4SYS_SeekAccessMode seekAccessMode, M4OSA_Time* pObtainCTS);
-M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context);
-M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
-                              M4OSA_DataOption* pValue);
-M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID,
-                              M4OSA_DataOption Value);
-M4OSA_ERR M4PCMR_getVersion(M4_VersionInfo *pVersion);
diff --git a/libvideoeditor/vss/common/inc/M4READER_3gpCom.h b/libvideoeditor/vss/common/inc/M4READER_3gpCom.h
deleted file mode 100755
index 22a5a03..0000000
--- a/libvideoeditor/vss/common/inc/M4READER_3gpCom.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file    M4READER_3gpCom.h
- * @brief    Generic encapsulation of the core 3gp reader
- * @note    This file declares the generic shell interface retrieving function
- *            of the 3GP reader
- ************************************************************************
-*/
-
-#ifndef __M4READER_3GPCOM_H__
-#define __M4READER_3GPCOM_H__
-
-#include "NXPSW_CompilerSwitches.h"
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4READER_Common.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * Error: Function M4READER_Com3GP_getNextStreamHandler must be called before.
- */
-#define M4ERR_NO_VIDEO_STREAM_RETRIEVED_YET        M4OSA_ERR_CREATE(M4_ERR, M4READER_3GP, 0x000001)
-
-/**
- * Error: No video stream H263 in file.
- */
-#define M4ERR_VIDEO_NOT_H263                    M4OSA_ERR_CREATE(M4_ERR, M4READER_3GP, 0x000002)
-
-/**
- * There has been a problem with the decoder configuration information, seems to be invalid */
-#define M4ERR_READER3GP_DECODER_CONFIG_ERROR    M4OSA_ERR_CREATE(M4_ERR, M4READER_3GP, 0x000003)
-
-#define M4READER_COM3GP_MAXVIDEOSTREAM  5
-#define M4READER_COM3GP_MAXAUDIOSTREAM  5
-#define M4READER_COM3GP_MAXTEXTSTREAM   5
-
-typedef struct
-{
-    M4OSA_Context                m_pFFContext;    /**< core file format context */
-
-    M4_StreamHandler*            m_AudioStreams[M4READER_COM3GP_MAXAUDIOSTREAM];
-    M4_StreamHandler*            m_pAudioStream;    /**< pointer to the current allocated audio
-                                                            stream handler */
-
-    M4_StreamHandler*            m_VideoStreams[M4READER_COM3GP_MAXVIDEOSTREAM];
-    M4_StreamHandler*            m_pVideoStream;    /**< pointer to the current allocated video
-                                                            stream handler */
-
-#ifdef M4VPS_SUPPORT_TTEXT
-    M4_StreamHandler*            m_TextStreams[M4READER_COM3GP_MAXTEXTSTREAM];
-    M4_StreamHandler*            m_pTextStream;    /**< pointer to the current allocated text
-                                                            stream handler */
-#endif /*M4VPS_SUPPORT_TTEXT*/
-
-} M4READER_Com3GP_Context;
-
-/**
- ************************************************************************
- * structure M4READER_3GP_Buffer (but nothing specific to 3GP, nor to a reader !)
- * @brief     This structure defines a buffer that can be used to exchange data (should be in OSAL)
- ************************************************************************
-*/
-typedef struct
-{
-    M4OSA_UInt32    size;            /**< the size in bytes of the buffer */
-    M4OSA_MemAddr8    dataAddress;    /**< the pointer to the buffer */
-} M4READER_3GP_Buffer;
-
-/**
- ************************************************************************
- * enum     M4READER_3GP_OptionID
- * @brief    This enum defines the reader options specific to the 3GP format.
- * @note    These options can be read from or written to a 3GP reader via M4READER_3GP_getOption.
- ************************************************************************
-*/
-typedef enum
-{
-    /**
-     * Get the DecoderConfigInfo for H263,
-     * option value must be a pointer to M4READER_3GP_H263Properties allocated by caller */
-    M4READER_3GP_kOptionID_H263Properties = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_3GP, 0x01),
-
-    /**
-     * Get the Purple Labs drm information */
-    M4READER_3GP_kOptionID_PurpleLabsDrm = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_3GP, 0x02),
-
-    /**
-     * Set the Fast open mode (Only the first AU of each stream will be parsed -> less CPU,
-                                 less RAM). */
-    M4READER_3GP_kOptionID_FastOpenMode = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_3GP, 0x03),
-
-    /**
-     * Set the Audio only mode (the video stream won't be opened) */
-    M4READER_3GP_kOptionID_AudioOnly = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_3GP, 0x04),
-
-    /**
-     * Set the Video only mode (the audio stream won't be opened) */
-    M4READER_3GP_kOptionID_VideoOnly = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_3GP, 0x05),
-
-    /**
-     * Get the next video CTS */
-    M4READER_3GP_kOptionID_getNextVideoCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_3GP, 0x06)
-
-} M4READER_3GP_OptionID;
-
-
-/**
- ************************************************************************
- * struct    M4READER_3GP_H263Properties
- * @brief    Contains info about H263 stream read from the 3GP file.
- ************************************************************************
-*/
-typedef struct
-{
-    /**< the profile as defined in the Visual Object Sequence header, if present */
-    M4OSA_UInt8        uiProfile;
-    /**< the level as defined in the Visual Object Sequence header, if present */
-    M4OSA_UInt8        uiLevel;
-
-} M4READER_3GP_H263Properties;
-
-/**
- ************************************************************************
- * @brief    Get the next stream found in the 3gp file
- * @note
- * @param    pContext:        (IN)    Context of the reader
- * @param    pMediaFamily:    (OUT)    Pointer to a user allocated M4READER_MediaFamily that will
- *                                      be filled with the media family of the found stream
- * @param    pStreamHandler:    (OUT)    Pointer to a stream handler that will be allocated and
- *                                          filled with the found stream description
- * @return    M4NO_ERROR                 There is no error
- * @return    M4ERR_PARAMETER            At least one parameter is not properly set
- * @return    M4WAR_NO_MORE_STREAM    No more available stream in the media (all streams found)
- ************************************************************************
-*/
-M4OSA_ERR M4READER_Com3GP_getNextStreamHandler(M4OSA_Context context,
-                                                 M4READER_MediaFamily *pMediaFamily,
-                                                 M4_StreamHandler **pStreamHandler);
-
-/**
- ************************************************************************
- * @brief    Prepare the  access unit (AU)
- * @note    An AU is the smallest possible amount of data to be decoded by a decoder.
- * @param    pContext:        (IN)        Context of the reader
- * @param    pStreamHandler    (IN)        The stream handler of the stream to make jump
- * @param    pAccessUnit        (IN/OUT)    Pointer to an access unit to fill with read data
- *                                          (the au structure is allocated by the user, and must
- *                                          be initialized by calling M4READER_fillAuStruct_fct
- *                                          after creation)
- * @return    M4NO_ERROR                     There is no error
- * @return    M4ERR_PARAMETER                At least one parameter is not properly set
- * @returns    M4ERR_ALLOC                    Memory allocation failed
- ************************************************************************
-*/
-M4OSA_ERR M4READER_Com3GP_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
-                                         M4_AccessUnit *pAccessUnit);
-
-/**
- ************************************************************************
- * @brief    Cleans up the stream handler
- * @param    pContext: (IN/OUT) Context of the reader shell
- * @param    pStreamHandler: (IN/OUT) Stream handler
- * @return    M4ERR_PARAMETER:    The context is null
- * @return    M4NO_ERROR:            No error
- ************************************************************************
-*/
-M4OSA_ERR M4READER_Com3GP_cleanUpHandler(M4_StreamHandler* pStreamHandler);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* __M4READER_3GPCOM_H__ */
-
diff --git a/libvideoeditor/vss/common/inc/M4READER_Amr.h b/libvideoeditor/vss/common/inc/M4READER_Amr.h
deleted file mode 100755
index b6e7f97..0000000
--- a/libvideoeditor/vss/common/inc/M4READER_Amr.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ************************************************************************
- * @file   M4READER_Amr.h
- * @brief  Generic encapsulation of the core amr reader
- * @note   This file declares the generic shell interface retrieving function
- *         of the AMR reader
- ************************************************************************
-*/
-#ifndef __M4READER_AMR_H__
-#define __M4READER_AMR_H__
-
-#include "M4READER_Common.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/**
-*************************************************************************
-* @brief Retrieves the generic interfaces implemented by the reader
-*
-* @param pMediaType             : Pointer on a M4READER_MediaType (allocated by the caller)
-*                              that will be filled with the media type supported by this reader
-* @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface
-*                              implemented by this reader. The interface is a structure allocated
-*                              by the function and must be un-allocated by the caller.
-* @param pRdrDataInterface   : Address of a pointer that will be set to the data interface
-*                              implemented by this reader. The interface is a structure allocated
-*                              by the function and must be un-allocated by the caller.
-*
-* @returns : M4NO_ERROR     if OK
-*             ERR_ALLOC      if an allocation failed
-*            ERR_PARAMETER  at least one parameter is not properly set (in DEBUG only)
-*************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
-                                      M4READER_GlobalInterface **pRdrGlobalInterface,
-                                      M4READER_DataInterface **pRdrDataInterface);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /*__M4READER_AMR_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4READER_Common.h b/libvideoeditor/vss/common/inc/M4READER_Common.h
deleted file mode 100755
index 8863a7e..0000000
--- a/libvideoeditor/vss/common/inc/M4READER_Common.h
+++ /dev/null
@@ -1,717 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file   M4READER_Common.h
- * @brief  Shell Reader common interface declaration
- * @note   This file declares the common interfaces that reader shells must implement
- *
- ************************************************************************
-*/
-#ifndef __M4READER_COMMON_H__
-#define __M4READER_COMMON_H__
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_FileReader.h"
-#include "M4OSA_CoreID.h"
-#include "M4DA_Types.h"
-#include "M4Common_types.h"
-
-/* ERRORS */
-#define M4ERR_READER_UNKNOWN_STREAM_TYPE        M4OSA_ERR_CREATE(M4_ERR, M4READER_COMMON, 0x0001)
-
-/* WARNINGS */
-#define M4WAR_READER_NO_METADATA                M4OSA_ERR_CREATE(M4_WAR, M4READER_COMMON, 0x0001)
-#define M4WAR_READER_INFORMATION_NOT_PRESENT    M4OSA_ERR_CREATE(M4_WAR, M4READER_COMMON, 0x0002)
-
-
-/**
- ************************************************************************
- * enum        M4READER_MediaType
- * @brief    This enum defines the Media types used to create media readers
- * @note    This enum is used internally by the VPS to identify a currently supported
- *          media reader interface. Each reader is registered with one of this type associated.
- *          When a reader instance is needed, this type is used to identify and
- *          and retrieve its interface.
- ************************************************************************
-*/
-typedef enum
-{
-    M4READER_kMediaTypeUnknown        = -1,    /**< Unknown media type */
-    M4READER_kMediaType3GPP            = 0,    /**< 3GPP file media type */
-    M4READER_kMediaTypeAVI            = 1,    /**< AVI file media type */
-    M4READER_kMediaTypeAMR            = 2,    /**< AMR file media type */
-    M4READER_kMediaTypeMP3            = 3,    /**< MP3 file media type */
-    M4READER_kMediaTypeRTSP            = 4,    /**< RTSP network accessed media type */
-    M4READER_kMediaType3GPPHTTP        = 5,    /**< Progressively downloaded 3GPP file media type */
-    M4READER_kMediaTypePVHTTP        = 6,    /**< Packet Video HTTP proprietary type */
-    M4READER_kMediaTypeWAV            = 7,    /**< WAV file media type */
-    M4READER_kMediaType3GPEXTHTTP    = 8,    /**< An external progressively downloaded 3GPP file
-                                                     media type */
-    M4READER_kMediaTypeAAC            = 9,    /**< ADTS and ADIF AAC support */
-    M4READER_kMediaTypeREAL            = 10,    /**< REAL Media type */
-    M4READER_kMediaTypeASF            = 11,    /**< ASF Media type */
-    M4READER_kMediaTypeFLEXTIME        = 12,    /**< FlexTime Media type */
-    M4READER_kMediaTypeBBA            = 13,    /**< Beatbrew audio Media type */
-    M4READER_kMediaTypeSYNTHAUDIO    = 14,    /**< Synthesis audio Media type */
-    M4READER_kMediaTypePCM            = 15,    /**< PCM Media type */
-    M4READER_kMediaTypeJPEG            = 16,    /**< JPEG Media type */
-    M4READER_kMediaTypeGIF            = 17,    /**< GIF Media type */
-    M4READER_kMediaTypeADIF            = 18,    /**< AAC-ADTS Media type */
-    M4READER_kMediaTypeADTS            = 19,    /**< AAC-ADTS Media type */
-
-    M4READER_kMediaType_NB  /* number of readers, keep it as last enum entry */
-
-} M4READER_MediaType;
-
-/**
- ************************************************************************
- * enum        M4READER_MediaFamily
- * @brief    This enum defines the Media family of a stream
- * @note    This enum is used internally by the VPS to identify what kind of stream
- *          has been retrieved via getNextStream() function.
- ************************************************************************
-*/
-typedef enum
-{
-    M4READER_kMediaFamilyUnknown   = -1,
-    M4READER_kMediaFamilyVideo     = 0,
-    M4READER_kMediaFamilyAudio     = 1,
-    M4READER_kMediaFamilyText      = 2
-} M4READER_MediaFamily;
-
-
-
-/**
- ************************************************************************
- * enum        M4READER_OptionID
- * @brief    This enum defines the reader options
- * @note    These options can be read from a reader via M4READER_getOption_fct
- ************************************************************************
-*/
-typedef enum
-{
-    /**
-    Get the duration of the movie (in ms)
-    */
-    M4READER_kOptionID_Duration = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0),
-
-    /**
-    Get the version of the core reader
-    */
-    M4READER_kOptionID_Version  = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 1),
-
-    /**
-    Get the copyright from the media (if present)
-    (currently implemented for 3GPP only: copyright get from the cprt atom in the udta if present)
-    */
-    M4READER_kOptionID_Copyright= M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 2),
-
-
-    /**
-    Set the OSAL file reader functions to the reader (type of value: M4OSA_FileReadPointer*)
-    */
-    M4READER_kOptionID_SetOsaFileReaderFctsPtr = M4OSA_OPTION_ID_CREATE(M4_READ,\
-                                                     M4READER_COMMON, 3),
-
-    /**
-    Set the OSAL file writer functions to the reader (type of value: M4OSA_FileWriterPointer*)
-    */
-    M4READER_kOptionID_SetOsaFileWriterFctsPtr = M4OSA_OPTION_ID_CREATE(M4_READ,\
-                                                     M4READER_COMMON, 4),
-
-    /**
-    Set the OSAL file writer functions to the reader (type of value: M4OSA_NetFunction*)
-    */
-    M4READER_kOptionID_SetOsaNetFctsPtr = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 5),
-
-    /**
-    Creation time in sec. since midnight, Jan. 1, 1970 (type of value: M4OSA_UInt32*)
-    (available only for 3GPP content, including PGD)
-    */
-    M4READER_kOptionID_CreationTime = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 6),
-
-    /**
-    Bitrate in bps (type of value: M4OSA_Double*)
-    */
-    M4READER_kOptionID_Bitrate = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 7),
-
-    /**
-    Tag ID3v1 of MP3 source (type of value: M4MP3R_ID3Tag*)
-    */
-    M4READER_kOptionID_Mp3Id3v1Tag = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 8),
-
-    /**
-    Tag ID3v2 of MP3 source (type of value: M4MP3R_ID3Tag*)
-    */
-    M4READER_kOptionID_Mp3Id3v2Tag = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 9),
-
-    /**
-    Number of Access Unit in the Audio stream (type of value: M4OSA_UInt32*)
-    */
-    M4READER_kOptionID_GetNumberOfAudioAu = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0xA),
-
-    /**
-    Number of frames per bloc
-    */
-    M4READER_kOptionID_GetNbframePerBloc    = M4OSA_OPTION_ID_CREATE(M4_READ,\
-                                                             M4READER_COMMON, 0xB),
-
-    /**
-    Flag for protection presence
-    */
-    M4READER_kOptionID_GetProtectPresence    = M4OSA_OPTION_ID_CREATE(M4_READ,\
-                                                             M4READER_COMMON, 0xC),
-
-    /**
-    Set DRM Context
-    */
-    M4READER_kOptionID_SetDRMContext    = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0xD),
-
-    /**
-    Get ASF Content Description Object
-    */
-    M4READER_kOptionID_ContentDescription = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0xE),
-
-    /**
-    Get ASF Content Description Object
-    */
-    M4READER_kOptionID_ExtendedContentDescription = M4OSA_OPTION_ID_CREATE(M4_READ,\
-                                                             M4READER_COMMON, 0xF),
-
-    /**
-    Get Asset 3gpp Fields
-    */
-    M4READER_kOptionID_3gpAssetFields = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x10),
-
-    /**
-    Set the max metadata size supported in the reader
-    Only relevant in 3gp parser till now, but can be used for other readers
-    */
-    M4READER_kOptionID_MaxMetadataSize = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_COMMON, 0x11),
-
-    M4READER_kOptionID_GetMetadata = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x12),
-    /**
-    Get 3gpp 'ftyp' atom
-    */
-    M4READER_kOptionID_3gpFtypBox  = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x13),
-
-
-    /* value is M4OSA_Bool* */
-    /* return the drm protection status of the file*/
-    M4READER_kOptionID_isProtected = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x14),
-
-    /* value is a void* */
-    /* return the aggregate rights of the file*/
-    /* The buffer must be allocated by the application and must be big enough*/
-    /* By default, the size for WMDRM is 76 bytes */
-    M4READER_kOptionID_getAggregateRights = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x15),
-    /**
-    Get ASF Content Description Object
-    */
-    M4READER_kOptionID_ExtendedContentEncryption = M4OSA_OPTION_ID_CREATE(M4_READ,\
-                                                         M4READER_COMMON, 0x16),
-
-    /**
-    Number of Access Unit in the Video stream (type of value: M4OSA_UInt32*)
-    */
-    M4READER_kOptionID_GetNumberOfVideoAu = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x17),
-
-    /**
-    Chunk mode activation  size in case of JPG reader */
-    M4READER_kOptionID_JpegChunckSize = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x18),
-
-    /**
-    Check if ASF file contains video */
-    M4READER_kOptionID_hasVideo = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x19),
-
-    /**
-     Set specific read mode for Random Access JPEG */
-    M4READER_kOptionID_JpegRAMode = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_COMMON, 0x20),
-
-    /**
-    Get Thumbnail buffer in case of JPG reader */
-    M4READER_kOptionID_JpegThumbnail = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x21),
-
-    /**
-    Get FPDATA buffer in case of JPG reader */
-    M4READER_kOptionID_JpegFPData = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x22),
-
-    /**
-    Get JPEG info (progressive, subsampling) */
-    M4READER_kOptionID_JpegInfo= M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x23)
-
-
-/*****************************************/
-} M4READER_OptionID;
-/*****************************************/
-
-/**
- ************************************************************************
- * structure    M4READER_CopyRight
- * @brief        This structure defines a copyRight description
- * @note        This structure is used to retrieve the copyRight of the media
- *              (if present) via the getOption() function
- ************************************************************************
-*/
-typedef struct _M4READER_CopyRight
-{
-    /**
-    Pointer to copyright data (allocated by user)
-    */
-    M4OSA_UInt8*   m_pCopyRight;
-
-    /**
-    Pointer to copyright size. The pCopyRightSize must
-    be Initialized with the size available in the pCopyRight buffer
-    */
-    M4OSA_UInt32   m_uiCopyRightSize;
-
-} M4READER_CopyRight;
-
-
-
-/**
- ************************************************************************
- * structure    M4READER_StreamDataOption
- * @brief        This structure defines a generic stream data option
- * @note        It is used is used to set or get a stream specific data defined
- *              by a relevant reader option ID.
- ************************************************************************
-*/
-typedef struct _M4READER_StreamDataOption
-{
-    M4_StreamHandler*     m_pStreamHandler; /**< identifier of the stream */
-    M4OSA_Void*           m_pOptionValue;   /**< value of the data option to get or to set */
-
-} M4READER_StreamDataOption;
-
-/**
- ************************************************************************
- * enumeration    M4_EncodingFormat
- * @brief        Text encoding format
- ************************************************************************
-*/
-// typedef enum
-// {
-//     M4_kEncFormatUnknown    = 0,    /**< Unknown format                                    */
-//     M4_kEncFormatASCII        = 1,  /**< ISO-8859-1. Terminated with $00                   */
-//     M4_kEncFormatUTF8        = 2,   /**< UTF-8 encoded Unicode . Terminated with $00       */
-//     M4_kEncFormatUTF16        = 3   /**< UTF-16 encoded Unicode. Terminated with $00 00    */
-/*}  M4_EncodingFormat;*/
-
-/**
- ************************************************************************
- * structure    M4_StringAttributes
- * @brief        This structure defines string attribute
- ************************************************************************
-*/
-// typedef struct
-// {
-//     M4OSA_Void*            m_pString;            /**< Pointer to text        */
-//     M4OSA_UInt32        m_uiSize;            /**< Size of text            */
-//     M4_EncodingFormat    m_EncodingFormat;    /**< Text encoding format    */
-// } M4_StringAttributes;
-
-
-/**
- ************************************************************************
- * structure    M4READER_Buffer
- * @brief        This structure defines a buffer in all readers
- ************************************************************************
-*/
-typedef struct
-{
-    M4OSA_UInt8*   m_pData;
-    M4OSA_UInt32   m_uiBufferSize;
-} M4READER_Buffer;
-
-typedef struct
-{
-     M4OSA_UInt32            m_uiSessionId;
-    M4OSA_UInt32            m_uiMediaId;
-    M4OSA_UInt32            m_uiNbInstance;
-    M4OSA_Char**            m_pInstance;
-} M4_SdpAssetInstance;
-/*
-typedef enum
-{
-     M4READER_kUnknownFormat    = 0,
-     M4READER_kTagID3V1,
-     M4READER_kTagID3V2,
-    M4READER_kASFContentDesc,
-    M4READER_k3GppAssetBoxFromUDTA,
-    M4READER_k3GppAssetBoxFromSDP,
-    M4READER_kJpegExif
-} M4READER_MetaDataType;*/
-
-
-/**
- ************************************************************************
- * structure    M4_3gpAssetFields
- * @brief        This structure defines fields of a 3gpp asset information
- ************************************************************************
-*/
-typedef struct
-{
-    M4COMMON_MetaDataFields    m_metadata;
-
-    M4OSA_UInt32            m_uiSessionID;    /* For SDP */
-    M4OSA_UInt32            m_uiMediaID;    /* For SDP */
-
-
-    /* Note: The two following fields were added for internal use
-        (For Music manager project..) !! */
-    M4_StreamType       m_VideoStreamType;    /**< Video stream type */
-    M4_StreamType       m_AudioStreamType;    /**< Audio stream type */
-
-} M4_MetaDataFields;
-
-
-#define M4_METADATA_STR_NB    22 /* one string in album art structure*/
-
-typedef struct
-{
-    M4OSA_UInt32            m_uiNbBuffer;
-    M4_SdpAssetInstance*    m_pAssetInfoInst;    /* Set of 3gpp asset boxes */
-    M4COMMON_MetaDataAlbumArt        m_albumArt;            /* RC: PV specific album art:added
-                                                               here because this type is used by
-                                                               union below in streaming */
-
-} M4READER_netInfos;
-
-
-typedef union
-{
-    M4READER_Buffer        m_pTagID3Buffer[2];        /* Tag ID3 V1, V2 */
-    struct
-    {
-        M4READER_Buffer        m_pAsfDescContent;    /* ASF description content buffer */
-        M4READER_Buffer        m_pAsfExtDescContent; /* ASF extended description content buffer */
-    } m_asf;
-    M4_MetaDataFields    m_pMetadataFields;      /* Already parsed and filled 3gpp asset fields */
-    M4READER_netInfos    m_pAssetInfoInstance;   /* Set of 3gpp asset boxes in the sdp file */
-
-} M4_MetadataBuffer;
-
-
-
-
-/*********** READER GLOBAL Interface ************************************/
-
-/**
- ************************************************************************
- * @brief    create an instance of the reader
- * @note    create the context
- * @param    pContext:            (OUT)    pointer on a reader context
- * @return    M4NO_ERROR                     there is no error
- * @return    M4ERR_PARAMETER                at least one parameter is not properly set
- * @return    M4ERR_ALLOC                    a memory allocation has failed
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4READER_create_fct)          (M4OSA_Context* pContext);
-
-/**
- ************************************************************************
- * @brief    destroy the instance of the reader
- * @note    after this call the context is invalid
- * @param    context:            (IN)    Context of the reader
- * @return    M4NO_ERROR                     there is no error
- * @return    M4ERR_PARAMETER                at least one parameter is not properly set
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4READER_destroy_fct)         (M4OSA_Context context);
-
-
-/**
- ************************************************************************
- * @brief    open the reader and initializes its created instance
- * @note    this function, for the network reader, sends the DESCRIBE
- * @param    context:            (IN)    Context of the reader
- * @param    pFileDescriptor:    (IN)    Pointer to proprietary data identifying the media to open
- * @return    M4NO_ERROR                     there is no error
- * @return    M4ERR_PARAMETER                the context is NULL
- * @return    M4ERR_BAD_CONTEXT            provided context is not a valid one
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4READER_open_fct)    (M4OSA_Context context, M4OSA_Void* pFileDescriptor);
-
-
-/**
- ************************************************************************
- * @brief    close the reader
- * @note
- * @param    context:        (IN)    Context of the reader
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            the context is NULL
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- ************************************************************************
-*/
-typedef M4OSA_ERR   (M4READER_close_fct)    (M4OSA_Context context);
-
-
-
-/**
- ************************************************************************
- * @brief    Get the next stream found in the media
- * @note
- * @param    context:        (IN)    Context of the reader
- * @param    pMediaFamily:    (OUT)    pointer to a user allocated M4READER_MediaFamily that will
- *                                     be filled with the media family of the found stream
- * @param    pStreamHandler:    (OUT)    pointer to a stream handler that will be allocated and
- *                                       filled with the found stream description
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4WAR_NO_MORE_STREAM    no more available stream in the media (all streams found)
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4READER_getNextStream_fct)   (M4OSA_Context context,
-                                                     M4READER_MediaFamily *pMediaFamily,
-                                                     M4_StreamHandler **pStreamHandler);
-
-
-/**
- ************************************************************************
- * @brief    fill the access unit structure with initialization values
- * @note
- * @param    context:        (IN)     Context of the reader
- * @param    pStreamHandler:    (IN)     pointer to the stream handler to which the access unit
- *                                           will be associated
- * @param    pAccessUnit:    (IN/OUT) pointer to the access unit (allocated by the caller)
- *                                           to initialize
- * @return    M4NO_ERROR                  there is no error
- * @return    M4ERR_BAD_CONTEXT         provided context is not a valid one
- * @return    M4ERR_PARAMETER             at least one parameter is not properly set
- * @return    M4ERR_ALLOC                 there is no more memory available
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4READER_fillAuStruct_fct)    (M4OSA_Context context,
-                                                   M4_StreamHandler *pStreamHandler,
-                                                   M4_AccessUnit *pAccessUnit);
-
-/**
- ************************************************************************
- * @brief    starts the instance of the reader
- * @note    only needed for network until now...
- * @param    context:        (IN)    Context of the reader
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            the context is NULL
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4READER_start_fct)   (M4OSA_Context context);
-
-/**
- ************************************************************************
- * @brief    stop reading
- * @note    only needed for network until now... (makes a pause)
- * @param    context:        (IN)    Context of the reader
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            the context is NULL
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4READER_stop_fct)   (M4OSA_Context context);
-
-
-/**
- ************************************************************************
- * @brief    get an option value from the reader
- * @note    this function follows the set/get option mechanism described in OSAL 3.0
- *          it allows the caller to retrieve a property value:
- *          -the duration of the longest stream of the media
- *          -the version number of the reader
- *
- * @param    context:        (IN)    Context of the reader
- * @param    optionId:        (IN)    indicates the option to get
- * @param    pValue:            (OUT)    pointer to structure or value (allocated by user)
- *                                          where option is stored
- *
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4READER_getOption_fct)       (M4OSA_Context context, M4OSA_OptionID optionId,
-                                                     M4OSA_DataOption pValue);
-
-
-/**
- ************************************************************************
- * @brief   set en option value of the readder
- * @note    this function follows the set/get option mechanism described in OSAL 3.0
- *          it allows the caller to set a property value:
- *          - nothing for the moment
- *
- * @param    context:        (IN)    Context of the reader
- * @param    optionId:        (IN)    indicates the option to set
- * @param    pValue:            (IN)    pointer to structure or value (allocated by user) where
- *                                          option is stored
- *
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
- ************************************************************************
-*/
-typedef M4OSA_ERR (M4READER_setOption_fct)       (M4OSA_Context context, M4OSA_OptionID optionId,
-                                                     M4OSA_DataOption pValue);
-
-
-/**
- ************************************************************************
- * @brief    jump into the stream at the specified time
- * @note
- * @param    context:        (IN)     Context of the reader
- * @param    pStreamHandler    (IN)     the stream handler of the stream to make jump
- * @param    pTime            (IN/OUT) IN:  the time to jump to (in ms)
- *                                     OUT: the time to which the stream really jumped
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4ERR_ALLOC                there is no more memory available
- * @return    M4ERR_BAD_STREAM_ID        the streamID does not exist
- ************************************************************************
-*/
-typedef M4OSA_ERR   (M4READER_jump_fct)     (M4OSA_Context context,
-                                                M4_StreamHandler *pStreamHandler,
-                                                M4OSA_Int32* pTime);
-
-
-/**
- ************************************************************************
- * @brief    reset the stream, that is seek it to beginning and make it ready to be read
- * @note
- * @param    context:        (IN)    Context of the reader
- * @param    pStreamHandler    (IN)    The stream handler of the stream to reset
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4ERR_ALLOC                there is no more memory available
- * @return    M4ERR_BAD_STREAM_ID        the streamID does not exist
- ************************************************************************
-*/
-typedef M4OSA_ERR   (M4READER_reset_fct)    (M4OSA_Context context,
-                                                M4_StreamHandler *pStreamHandler);
-
-
-/**
- ************************************************************************
- * @brief    get the time of the closest RAP access unit before the given time
- * @note
- * @param    context:        (IN)     Context of the reader
- * @param    pStreamHandler    (IN)     the stream handler of the stream to search
- * @param    pTime            (IN/OUT) IN:  the time to search from (in ms)
- *                                     OUT: the time (cts) of the preceding RAP AU.
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4ERR_BAD_STREAM_ID        the streamID does not exist
- ************************************************************************
-*/
-typedef M4OSA_ERR   (M4READER_getPrevRapTime_fct) (M4OSA_Context context,
-                                                    M4_StreamHandler *pStreamHandler,
-                                                    M4OSA_Int32* pTime);
-
-
-/**
- ************************************************************************
- * structure    M4READER_GlobalInterface
- * @brief        This structure defines the generic media reader GLOBAL interface
- * @note        This structure stores the pointers to functions concerning
- *                creation and control of one reader type.
- *                The reader type is one of the M4READER_MediaType
- ************************************************************************
-*/
-typedef struct _M4READER_GlobalInterface
-/*****************************************/
-{
-    M4READER_create_fct*            m_pFctCreate;
-    M4READER_destroy_fct*           m_pFctDestroy;
-    M4READER_open_fct*              m_pFctOpen;
-    M4READER_close_fct*             m_pFctClose;
-    M4READER_getOption_fct*         m_pFctGetOption;
-    M4READER_setOption_fct*         m_pFctSetOption;
-    M4READER_getNextStream_fct*     m_pFctGetNextStream;
-    M4READER_fillAuStruct_fct*      m_pFctFillAuStruct;
-    M4READER_start_fct*             m_pFctStart;
-    M4READER_stop_fct*              m_pFctStop;
-    M4READER_jump_fct*              m_pFctJump;
-    M4READER_reset_fct*             m_pFctReset;
-    M4READER_getPrevRapTime_fct*    m_pFctGetPrevRapTime;
-
-} M4READER_GlobalInterface;
-
-
-/************* READER DATA Interface ************************************/
-
-
-
-/**
- ************************************************************************
- * @brief    Gets an access unit (AU) from the stream handler source.
- * @note    An AU is the smallest possible amount of data to be decoded by a decoder (audio/video).
- *
- * @param    context:        (IN)        Context of the reader
- * @param    pStreamHandler    (IN)        The stream handler of the stream to make jump
- * @param    pAccessUnit        (IN/OUT)   Pointer to an access unit to fill with read data
- *                                         (the au structure is allocated by the user, and must be
- *                                         initialized by calling M4READER_fillAuStruct_fct after
- *                                         creation)
- * @return    M4NO_ERROR                     there is no error
- * @return    M4ERR_BAD_CONTEXT            provided context is not a valid one
- * @return    M4ERR_PARAMETER                at least one parameter is not properly set
- * @returns    M4ERR_ALLOC                    memory allocation failed
- * @returns    M4ERR_BAD_STREAM_ID            at least one of the stream Id. does not exist.
- * @returns    M4WAR_NO_DATA_YET            there is no enough data on the stream for new
- *                                          access unit
- * @returns    M4WAR_NO_MORE_AU            there are no more access unit in the stream
- *                                          (end of stream)
- ************************************************************************
-*/
-typedef M4OSA_ERR   (M4READER_getNextAu_fct)(M4OSA_Context context,
-                                             M4_StreamHandler *pStreamHandler,
-                                             M4_AccessUnit *pAccessUnit);
-
-
-/**
- ************************************************************************
- * structure    M4READER_DataInterface
- * @brief        This structure defines the generic media reader DATA interface
- * @note        This structure stores the pointers to functions concerning
- *                data access for one reader type.(those functions are typically called from
- *                a decoder) The reader type is one of the M4READER_MediaType
- ************************************************************************
-*/
-typedef struct _M4READER_DataInterface
-{
-    M4READER_getNextAu_fct*   m_pFctGetNextAu;
-
-    /**
-    stores the context created by the M4READER_create_fct() function
-    so it is accessible without  decoder
-    */
-    M4OSA_Context m_readerContext;
-/*****************************************/
-} M4READER_DataInterface;
-/*****************************************/
-
-
-#endif /*__M4READER_COMMON_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4READER_Pcm.h b/libvideoeditor/vss/common/inc/M4READER_Pcm.h
deleted file mode 100755
index f0fc857..0000000
--- a/libvideoeditor/vss/common/inc/M4READER_Pcm.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
- * @file    M4READER_Pcm.h
- * @brief    Generic encapsulation of the core wav reader
- * @note    This file declares the generic shell interface retrieving function
- *            of the wav reader
-*************************************************************************
-*/
-#ifndef __M4READER_PCM_H__
-#define __M4READER_PCM_H__
-
-#include "M4READER_Common.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/**
-*************************************************************************
-* @brief Retrieves the generic interfaces implemented by the reader
-*
-* @param pMediaType             : Pointer on a M4READER_MediaType (allocated by the caller)
-*                              that will be filled with the media type supported by this reader
-* @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface
-*                              implemented by this reader. The interface is a structure allocated
-*                              by the function and must be un-allocated by the caller.
-* @param pRdrDataInterface   : Address of a pointer that will be set to the data interface
-*                              implemented by this reader. The interface is a structure allocated
-*                              by the function and must be un-allocated by the caller.
-*
-* @returns : M4NO_ERROR     if OK
-*             ERR_ALLOC      if an allocation failed
-*            ERR_PARAMETER  at least one parameter is not properly set (in DEBUG only)
-*************************************************************************
-*/
-M4OSA_ERR M4READER_PCM_getInterfaces(M4READER_MediaType *pMediaType,
-                                        M4READER_GlobalInterface **pRdrGlobalInterface,
-                                        M4READER_DataInterface **pRdrDataInterface);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /*__M4READER_PCM_H__*/
diff --git a/libvideoeditor/vss/common/inc/M4SYS_AccessUnit.h b/libvideoeditor/vss/common/inc/M4SYS_AccessUnit.h
deleted file mode 100755
index f50367c..0000000
--- a/libvideoeditor/vss/common/inc/M4SYS_AccessUnit.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file         M4SYS_AccessUnit.h
- * @brief        Access unit manipulation
- * @note         This file defines the access unit structure,
- *               and declares functions to manipulate it.
- ************************************************************************
-*/
-
-#ifndef M4SYS_ACCESSUNIT_H
-#define M4SYS_ACCESSUNIT_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Time.h"
-#include "M4SYS_Stream.h"
-
-/** The attribute of a fragment*/
-typedef enum {
-  M4SYS_kFragAttrOk        = 01, /**< The fragment is correct, there is no error
-                                         (size cannot be 0)*/
-  M4SYS_kFragAttrCorrupted = 02, /**< The fragment is corrupted (there is at least a bit or byte
-                                        error somewhere in the fragment (size cannot be 0)*/
-  M4SYS_kFragAttrLost      = 03  /**< The fragment is lost, so the size must be 0.*/
-} M4SYS_FragAttr;
-
-
-/** A Fragment is a piece of access unit. It can be decoded without decoding the others*/
-typedef struct {
-  M4OSA_MemAddr8  fragAddress;   /**< The data pointer. All fragments of the same access unit
-                                        must be contiguous in memory*/
-  M4OSA_UInt32    size;          /**< The size of the fragment. It must be 0 if fragment is
-                                        flagged 'lost'*/
-  M4SYS_FragAttr  isCorrupted;   /**< The attribute of this fragment*/
-} M4SYS_Frag;
-
-/**< The attribute of an access unit*/
-typedef M4OSA_UInt8 M4SYS_AU_Attr;
-
-#define AU_Corrupted   0x01 /**< At least one fragment of the access unit is flagged corrupted.*/
-#define AU_P_Frame     0x02 /**< The access unit is a P_frame*/
-#define AU_RAP         0x04 /**< The access unit is a random access point.*/
-
-
-/** An access unit is the smallest piece of data with timing information.*/
-typedef struct {
-  M4SYS_StreamDescription*    stream ;
-  M4OSA_MemAddr32             dataAddress; /**< The data pointer. The size of this block
-                                            (allocated size) must be a 32-bits integer multiple*/
-  M4OSA_UInt32                size;        /**< The size in bytes of the dataAddress. The size may
-                                                 not match a 32-bits word boundary.*/
-  M4OSA_Time                  CTS;         /**< The Composition Time Stamp*/
-  M4OSA_Time                  DTS;         /**< The Decoded Time Stamp*/
-  M4SYS_AU_Attr               attribute;   /**< The attribute of the access unit*/
-  M4OSA_UInt8                 nbFrag;      /**< The number of fragments. It can be 0 if there is
-                                                no fragment.*/
-  M4SYS_Frag**                frag;        /**< An array of 'nbFrag' fragments. It stores the
-                                                fragments structure. The original definition
-                                              < of frag has been changed from M4SYS_Frag* frag[]
-                                                to M4SYS_Frag** frag since the support
-                                              < of such syntax is only a Microsoft extension of
-                                                the C compiler. */
-} M4SYS_AccessUnit;
-
-/* Error codes */
-#define M4ERR_AU_NO_MORE_FRAG      M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000001)
-#define M4ERR_AU_BUFFER_OVERFLOW   M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000002)
-#define M4ERR_AU_BAD_INDEX         M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000003)
-#define M4ERR_NOT_ENOUGH_FRAG      M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000004)
-
-
-
-#endif /*M4SYS_ACCESSUNIT_H*/
-
diff --git a/libvideoeditor/vss/common/inc/M4SYS_Stream.h b/libvideoeditor/vss/common/inc/M4SYS_Stream.h
deleted file mode 100755
index bab0ce7..0000000
--- a/libvideoeditor/vss/common/inc/M4SYS_Stream.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
-
- ************************************************************************
- * @file         M4SYS_Stream.h
- * @brief        Stream manipulation
- * @note         This file defines the stream structure.
- ************************************************************************
-*/
-
-#ifndef M4SYS_STREAM_H
-#define M4SYS_STREAM_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Time.h"
-
-typedef M4OSA_UInt32 M4SYS_StreamID;
-
-/** The streamType type provides a way to distinguish all streams (AAC, AMR, YUV420, MPEG-4 Video,
-     H263). Stream types can be sorted in 2 ways:
-@arg   Some of them are raw data, others are encoded
-@arg   Some of them are related to an audio media, a video media...
-@n So a specific naming convention has been designed to allow a quick parsing of the streamType
-    value to return the above categories. StreamType is an un-signed integer on 16 bits.
-@arg   The first byte (MSB) defines the codec type. It can be either Audio,Video, Picture,
-         Text or Scene.
-@arg   The second byte (LSB) defines the sub-codecs type (ie YUV420, PCM_16 bits, AMR...).
-        Moreover if this value is greater than 0x80 the stream is a raw stream, else the stream
-        is an encoded one
-@n   0x0000 is a forbidden value, it describes an unknown stream */
-
-typedef enum {
-   M4SYS_kUnknown       = 0x0000,
-   /* Stream type definition
-       0xYYZZ   : YY is the codec type (Audio, Video, Picture, Scene ...)
-                  ZZ is the sub-codec type (AAC, AMR , ...)
-                     if ZZ is greater than 0x80 it is a raw format*/
-
-   /* Audio ones   : Range from [0x0100-0x01FF]*/
-   M4SYS_kAudioUnknown  = 0x0100,
-   M4SYS_kAAC           = 0x0101,
-   M4SYS_kCELP          = 0x0102,
-   M4SYS_kAMR           = 0x0103,
-   M4SYS_kAMR_WB        = 0x0104,
-   M4SYS_kMP3           = 0x0105,
-   M4SYS_kMIDI          = 0x0106,
-   M4SYS_kWMA           = 0x0107,
-   M4SYS_kREALAUDIO     = 0x0108,
-   M4SYS_kEVRC            = 0x0109,
-   M4SYS_kPCM_16bitsS   = 0x0181, /* PCM 16 bits Signed */
-   M4SYS_kPCM_16bitsU   = 0x0182, /* PCM 16 bits Un-signed */
-   M4SYS_kPCM_8bitsU    = 0x0183, /* PCM  8 bits Un-signed */
-/* FixAA 2008/03/03 types: M4SYS_kPCM_16bitsS, M4SYS_kPCM_16bitsU and M4SYS_kPCM_8bitsU
-   are now only used by AudioMixer and ReaderAVI => An update is necessary in the future for use
-   type M4SYS_kPCM */
-   M4SYS_kXMF            = 0x0184,
-   M4SYS_kSMAF          = 0x0185,
-   M4SYS_kIMEL          = 0x0186,
-   M4SYS_kBBA            = 0x0187,
-   M4SYS_kBPC            = 0x0188,
-   M4SYS_kADPCM         = 0x0189,  /* ADPCM added */
-   M4SYS_kPCM           = 0x0190,  /* stream type added: PCM;  PR2569 fixAA */
-   M4SYS_kAudioAll        = 0x01FF,  /* all audio streams */
-
-   /* Video ones   : Range [0x0200-0x02FF]*/
-   M4SYS_kVideoUnknown  = 0x0200,
-   M4SYS_kMPEG_4        = 0x0201,
-   M4SYS_kH263          = 0x0202,
-   M4SYS_kH263pp        = 0x0203,
-   M4SYS_kH264          = 0x0204,
-   M4SYS_kREALVIDEO     = 0x0205,
-   M4SYS_kYUV420        = 0x0281,
-   M4SYS_kRGB32         = 0x0282,
-   M4SYS_kBGR32         = 0x0283,
-   M4SYS_kRGB24         = 0x0284,
-   M4SYS_kBGR24         = 0x0285,
-   M4SYS_kVideoAll        = 0x02FF,  /* all video streams */
-
-  /* Picture ones : Range [0x0300-0x03FF]*/
-   M4SYS_kPictureUnknown = 0x0300,
-   M4SYS_kJPEG           = 0x0301,
-   M4SYS_kGIF            = 0x0302,
-   M4SYS_kBMP            = 0x0383,
-   M4SYS_kStillAll         = 0x03FF,  /* all still picture streams */
-
-   /* Text ones    : Range [0x0400-0x04FF]*/
-   M4SYS_kTextUnknown  = 0x0400,
-   M4SYS_kTimedText    = 0x0401,
-   M4SYS_kUTF8         = 0x0481,
-   M4SYS_kUTF16        = 0x0482,
-   M4SYS_kUCS2         = 0x0483,
-   M4SYS_kTextAll       = 0x04FF,  /* all text streams */
-
-   /* Scene & Graphics ones   : Range [0x0500-0x05FF]*/
-   M4SYS_kSceneUnknown  = 0x0500,
-   M4SYS_kSMIL          = 0x0501,
-   M4SYS_kBIFS          = 0x0502,
-   M4SYS_kSceneAll        = 0x05FF,  /* all scene streams */
-
-   /* hinted ones   : Range [0x0600-0x06FF]*/
-   M4SYS_kHintedUnknown = 0x0600,
-   M4SYS_kRTP           = 0x0601,
-   M4SYS_kMPEG2_TS      = 0x0602,
-   M4SYS_kHintedAll        = 0x06FF,  /* all packetized streams */
-
-   /* MPEG-4 system ones : Range [0x0700-0x07FF]*/
-   M4SYS_kSysUnknown    = 0x0700,
-   M4SYS_kODS           = 0x0701,
-   M4SYS_kIPMP          = 0x0702,
-   M4SYS_kOCI           = 0x0703,
-   M4SYS_kSysAll        = 0x07FF /* all system streams*/
-} M4SYS_StreamType ;
-
-typedef struct {
-   M4SYS_StreamID     streamID ;
-   M4OSA_UInt32      value ;
-} M4SYS_StreamIDValue ;
-
-typedef struct {
-   M4SYS_StreamID    streamID ;
-   M4OSA_UInt32      size ;
-   M4OSA_MemAddr32   addr ;
-} M4SYS_StreamIDmemAddr ;
-
-/** This strucure defines a set of properties associated to a stream*/
-typedef struct {
-  M4SYS_StreamID   streamID;    /**< The ID of the stream. It must be unique for a media
-                                (ie in a MP4 file, two tracks can not have two times the same ID).
-                                 0 is forbidden.*/
-  M4SYS_StreamType streamType;    /**< The stream type of the stream*/
-  M4OSA_UInt8      profileLevel;  /**< The profile & level of a stream. It is related to the
-                                       stream type & the definition comes from the standard bodies
-                                       (i.e. MPEG-4 Video & MPEG-4 Audio). Some values are
-                                       pre-defined: 0xFE=userPrivate 0xFF=no Profile &
-                                       Level specified*/
-  M4OSA_UInt32     decoderSpecificInfoSize;  /**< The decoder configuration. These bytes are
-                                                   needed to initialise a decoder.*/
-  M4OSA_MemAddr32  decoderSpecificInfo; /**< The size (in bytes) of the decoder specific info.*/
-  M4OSA_UInt32     timeScale;     /**< The time scale of the stream. It means that all timing
-                                        duration of this stream are computed in this timescale
-                                        (ie timeScale = 8000, means there are 8000 ticks in
-                                        one second)*/
-  M4OSA_Time       duration;        /**< The stream duration of this stream. The time unit is the
-                                        time scale. The value can be set to M4SYS_UnknownTime if
-                                        the duration is not known.*/
-  M4OSA_Int32      averageBitrate;  /**< The average bitrate (in bit per second) of this stream.
-                                         The average bitrate is computed on the stream duration.
-                                         -1 value means either there is no average bitrate or no
-                                         average bitrate is provided.*/
-  M4OSA_Int32      maxBitrate;      /**< The maximum bitrate (in bit per second) of this stream.
-                                         The maximum bitrate is computed on a sliding window of 1
-                                         second. -1 value means either there is no max. bitrate or
-                                         no max. bitrate is provided.*/
-} M4SYS_StreamDescription;
-
-typedef enum {
-   M4SYS_kPreviousRAP      = 0x01 ,
-   M4SYS_kNextRAP          = 0x02 ,
-   M4SYS_kClosestRAP       = 0x03 ,
-   M4SYS_kNoRAPprevious    = 0x11 ,
-   M4SYS_kNoRAPnext        = 0x12 ,
-   M4SYS_kNoRAPclosest     = 0x13 ,
-   M4SYS_kBeginning        = 0x20
-} M4SYS_SeekAccessMode ;
-
-#endif /*M4SYS_STREAM_H*/
-
-
-
diff --git a/libvideoeditor/vss/common/inc/M4TOOL_VersionInfo.h b/libvideoeditor/vss/common/inc/M4TOOL_VersionInfo.h
deleted file mode 100755
index 7016b8d..0000000
--- a/libvideoeditor/vss/common/inc/M4TOOL_VersionInfo.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file   M4TOOL_VersionInfo.h
- * @brief  defines a common version information structure
- * @note
- *
- ************************************************************************
-*/
-#ifndef __M4TOOL_VERSIONINFO_H__
-#define __M4TOOL_VERSIONINFO_H__
-
-#include "M4OSA_Types.h"
-
-/**
- * structure    M4_VersionInfo
- * @brief        This structure describes version of core component
- * @note        This structure is typically used to retrieve version information
- *                of a component via getOption function
- */
-typedef struct _M4_VersionInfo
-{
-    M4OSA_UInt32 m_major;        /*major version of the component*/
-    M4OSA_UInt32 m_minor;        /*minor version of the component*/
-    M4OSA_UInt32 m_revision;    /*revision version of the component*/
-
-    /* Structure size */
-    M4OSA_UInt32 m_structSize;
-
-} M4_VersionInfo;
-
-
-#endif /*__M4TOOL_VERSIONINFO_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Interface.h b/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Interface.h
deleted file mode 100755
index 90bfcb6..0000000
--- a/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Interface.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __M4VD_EXTERNAL_INTERFACE_H__
-#define __M4VD_EXTERNAL_INTERFACE_H__
-
-#include "M4DECODER_Common.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* ----- DSI bitstream parser ----- */
-
-/* This function is available to clients of the shell to allow them to analyse clips
-(useful for video editing) without having to instanciate a decoder, which can be useful precisely
-if HW decoders are a possibility. */
-
-M4OSA_ERR M4DECODER_EXTERNAL_ParseVideoDSI(
-                      M4OSA_UInt8* pVol, M4OSA_Int32 aVolSize,
-                      M4DECODER_MPEG4_DecoderConfigInfo* pDci,
-                      M4DECODER_VideoSize* pVideoSize);
-
-M4OSA_ERR getAVCProfileAndLevel(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
-                      M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel);
-
-M4OSA_ERR getH263ProfileAndLevel(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
-                      M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel);
-
-M4OSA_ERR getMPEG4ProfileAndLevel(M4OSA_UInt8 profileAndLevel,
-                      M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* __M4VD_EXTERNAL_INTERFACE_H__ */
diff --git a/libvideoeditor/vss/common/inc/M4VD_Tools.h b/libvideoeditor/vss/common/inc/M4VD_Tools.h
deleted file mode 100755
index 3ca36ac..0000000
--- a/libvideoeditor/vss/common/inc/M4VD_Tools.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __M4VD_TOOLS_H__
-#define __M4VD_TOOLS_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "NXPSW_CompilerSwitches.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Types.h"
-/* ----- bitstream parser ----- */
-
-typedef struct
-{
-    M4OSA_UInt32 stream_byte;
-    M4OSA_UInt32 stream_index;
-    M4OSA_MemAddr8 in;
-
-} M4VS_Bitstream_ctxt;
-
-M4OSA_UInt32 M4VD_Tools_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
-                                            M4OSA_UInt32 nb_bits);
-M4OSA_ERR M4VD_Tools_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
-                                         M4OSA_MemAddr32 dest_bits,
-                                         M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* __M4VD_TOOLS_H__ */
diff --git a/libvideoeditor/vss/common/inc/M4VFL_transition.h b/libvideoeditor/vss/common/inc/M4VFL_transition.h
deleted file mode 100755
index 77f76cb..0000000
--- a/libvideoeditor/vss/common/inc/M4VFL_transition.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- /**
- ******************************************************************************
- * @file        M4TRAN_transition.h
- * @brief
- * @note
- ******************************************************************************
-*/
-
-#ifndef __M4VFL_TRANSITION_H__
-#define __M4VFL_TRANSITION_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-typedef unsigned char UInt8;
-typedef unsigned long UInt32;
-
-typedef    struct S_M4ViComImagePlane
-{
-    UInt32        u_width;            /* active width, in pixels */
-    UInt32        u_height;            /* active height, in lines */
-    UInt32        u_topleft;            /* index of 1st active pixel */
-    UInt32        u_stride;            /* line stride, in bytes */
-    UInt8        *pac_data;            /* buffer address */
-}    M4ViComImagePlane;
-
-typedef struct S_M4VFL_modifLumParam
-{
-    unsigned short lum_factor;
-    unsigned short copy_chroma;
-} M4VFL_ModifLumParam;
-
-#define     M4VIFI_OK                       0
-#define     M4VIFI_ILLEGAL_FRAME_HEIGHT     8
-#define     M4VIFI_ILLEGAL_FRAME_WIDTH      9
-
-unsigned char M4VFL_modifyLumaByStep(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
-                                         M4VFL_ModifLumParam *lum_param, void *user_data);
-
-unsigned char M4VFL_modifyLumaWithScale(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
-                                         unsigned long lum_factor, void *user_data);
-
-/**
- *************************************************************************************************
- * M4OSA_ERR M4VIFI_ImageBlendingonYUV420 (void *pUserData,
- *                                                  M4VIFI_ImagePlane *pPlaneIn1,
- *                                                  M4VIFI_ImagePlane *pPlaneIn2,
- *                                                  M4VIFI_ImagePlane *pPlaneOut,
- *                                                  M4VIFI_UInt32 Progress)
- * @brief   Blends two YUV 4:2:0 Planar images.
- * @note    Blends YUV420 planar images,
- *          Map the value of progress from (0 - 1000) to (0 - 1024)
- *          Set the range of blendingfactor,
- *                  1. from 0 to (Progress << 1)            ;for Progress <= 512
- *                  2. from (( Progress - 512)<< 1) to 1024 ;otherwise
- *          Set the increment of blendingfactor for each element in the image row by the factor,
- *                  =  (Range-1) / (image width-1)  ;for width >= range
- *                  =  (Range) / (image width)      ;otherwise
- *          Loop on each(= i) row of output Y plane (steps of 2)
- *              Loop on each(= j) column of output Y plane (steps of 2)
- *                  Get four Y samples and one U & V sample from two input YUV4:2:0 images and
- *                  Compute four Y sample and one U & V sample for output YUV4:2:0 image
- *                      using the following,
- *                  Out(i,j) = blendingfactor(i,j) * In1(i,j)+ (l - blendingfactor(i,j)) * In2(i,j)
- *              end loop column
- *          end loop row.
- * @param   pUserData: (IN)  User Specific Parameter
- * @param   pPlaneIn1: (IN)  Pointer to an array of image plane structures maintained for Y, U
- *                            and V planes.
- * @param   pPlaneIn2: (IN)  Pointer to an array of image plane structures maintained for Y, U
- *                            and V planes.
- * @param   pPlaneOut: (OUT) Pointer to an array of image plane structures maintained for Y, U
- *                            and V planes.
- * @param   Progress:  (IN)  Progress value (varies between 0 and 1000)
- * @return  M4VIFI_OK: No error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  Error in width
- ***********************************************************************************************/
-unsigned char M4VIFI_ImageBlendingonYUV420 (void *pUserData, M4ViComImagePlane *pPlaneIn1,
-                                                M4ViComImagePlane *pPlaneIn2,
-                                                M4ViComImagePlane *pPlaneOut, UInt32 Progress);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif // __M4VFL_TRANSITION_H__
diff --git a/libvideoeditor/vss/common/inc/M4VIFI_Clip.h b/libvideoeditor/vss/common/inc/M4VIFI_Clip.h
deleted file mode 100755
index 1f07616..0000000
--- a/libvideoeditor/vss/common/inc/M4VIFI_Clip.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file        M4VIFI_Clip.h
- * @brief        Global Table definition
- * @note        This file defines the Clipping and Division table address
- ******************************************************************************
-*/
-
-#ifndef    _M4VIFI_CLIP_H_
-#define    _M4VIFI_CLIP_H_
-
-/* Clipping matrix for RGB values */
-EXTERN CNST M4VIFI_UInt8    *M4VIFI_ClipTable_zero;
-/* Division table for (65535/x); x = 0 to 512 */
-EXTERN CNST M4VIFI_UInt16    *M4VIFI_DivTable_zero;
-
-#endif /* _M4VIFI_CLIP_H_ */
-
-/* End of file M4VIFI_Clip.h */
-
diff --git a/libvideoeditor/vss/common/inc/M4VIFI_Defines.h b/libvideoeditor/vss/common/inc/M4VIFI_Defines.h
deleted file mode 100755
index e4591e5..0000000
--- a/libvideoeditor/vss/common/inc/M4VIFI_Defines.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file        M4VIFI_Defines.h
- * @brief        Macro Definition
- * @note        This file defines all the macro used in the filter library
- ******************************************************************************
-*/
-
-#ifndef _M4VIFI_DEFINES_H_
-#define _M4VIFI_DEFINES_H_
-
-/**
- *****************************************************************************
- *                    Macros used for color transform RGB565 to YUV
- *****************************************************************************
-*/
-#define CST_RGB_16_SIZE 2
-#define Y16(r, g, b) CLIP(  ( ( (80593 * r)+(77855 * g)+(30728 * b)) >> 15))
-#define U16(r, g, b) CLIP(128+ ( ( -(45483 * r)-(43936 * g)+(134771 * b)) >> 15 ))
-#define V16(r, g, b) CLIP(128+ ( ( (134771 * r)-(55532 * g)-(21917 * b)) >> 15  ))
-
-
-/**
- *****************************************************************************
- *    Macros used for color transform YUV to RGB
- *    B = 1.164(Y - 16)                  + 2.018(U - 128)
- *  G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128)
- *  R = 1.164(Y - 16) + 1.596(V - 128)
- *  Above Conversion Formula is implemented for fixed point operation
- *****************************************************************************
-*/
-
-#define CST_RGB_24_SIZE 3
-
-#ifdef __RGB_V1__
-#define DEMATRIX(Rx,Gx,Bx,Yx37,Ux,Vx) \
-    Rx = CLIP(((Yx37 + (Vx * 51) + 16) >> 5) - 223); \
-    Gx = CLIP(((Yx37 - ((Ux+(Vx<<1)) * 13) +16) >> 5) + 135); \
-    Bx = CLIP(((Yx37 + (Ux * 65) + 16) >> 5) - 277)
-#else
-#define DEMATRIX(Rx,Gx,Bx,Yx2568,Ux,Vx) \
-    Rx = CLIP(((Yx2568 +                 (Vx * 0x3343) + (M4VIFI_Int32)0xffe40800) >> 13)); \
-    Gx = CLIP(((Yx2568 - (Ux * 0x0c92) - (Vx * 0x1a1e) + (M4VIFI_Int32)0x00110180) >> 13)); \
-    Bx = CLIP(((Yx2568 + (Ux * 0x40cf)                    + (M4VIFI_Int32)0xffdd4200) >> 13));
-#endif /* __RGB_V1__ */
-
-/**
- *****************************************************************************
- *    Packing and Unpacking is different for little and big endian
- *  r, g, b, Rx, Gx, Bx are 8 bit color value
- *    a, data are 16 bit pixel value
- *****************************************************************************
- */
-
-/* Pack computations common for little endian and big endian modes */
-#define    PACK_BGR24(rgb_ptr,Rx,Gx,Bx) {rgb_ptr[0] = (M4VIFI_UInt8)Bx; rgb_ptr[1] =\
-                         (M4VIFI_UInt8)Gx; rgb_ptr[2] = (M4VIFI_UInt8)Rx;}
-#define    PACK_RGB24(rgb_ptr,Rx,Gx,Bx) {rgb_ptr[0] = (M4VIFI_UInt8)Rx; rgb_ptr[1] =\
-                         (M4VIFI_UInt8)Gx; rgb_ptr[2] = (M4VIFI_UInt8)Bx;}
-
-#ifdef BIG_ENDIAN
-#define    PACK_RGB565(a, Rx, Gx, Bx) (((Rx >> 3) << (11 + (a)))\
-                 | ((Gx >> 2) << (5 + (a))) | ((Bx >> 3) << (a)))
-#define    PACK_BGR565(a, Rx, Gx, Bx) (((Bx >> 3) << (11 + (a)))\
-                 | ((Gx >> 2) << (5 + (a))) | ((Rx >> 3) << (a)))
-#define GET_RGB565(r, g, b, data) {b = ((data) & 31); g =\
-                     ((data >> 5) & 63); r = ((data >> 11) & 31);}
-#define GET_BGR565(b, g, r, data) \
-    r = ((data) & 31); \
-    g = ((data >> 5) & 63); \
-    b = ((data >> 11) & 31 );
-#else /* LITTLE endian: 0x12345678 -> 78 56 34 12 */
-#define    PACK_RGB565(a, Rx, Gx, Bx) (((Bx >> 3) << (8 + (a))) \
-                  | (((Gx >> 2)&0x7) << (13 + (a))) | ((Gx >> 5) << (a)) | ((Rx >> 3) << (3 + a)))
-#define    PACK_BGR565(a, Rx, Gx, Bx) (((Rx >> 3) << (11 + (a))) \
-                  | ((Gx >> 2) << (5 + (a))) | ((Bx >> 3) << (a)))
-#define GET_RGB565(r, g, b, data) { b = (M4VIFI_UInt8)(((data) & 0x1F00) >> 8); g =\
-             (M4VIFI_UInt8)((((data) & 0x7) << 3) | (((data) & 0xE000) >> 13)); r =\
-             (M4VIFI_UInt8)(((data) & 0xF8) >> 3);}
-#define GET_BGR565(b, g, r, data) \
-    b = ((data) & 31); \
-    g = ((data >> 5) & 63); \
-    r = ((data >> 11) & 31 );
-#endif /* BIG_ENDIAN */
-
-
-#define CST_RGB_24_SIZE 3
-#define Y24(r,g,b) CLIP(( ( (19595 * r) + (38470 * g) + (9437 * b) ) >>16))
-#define U24(r,g,b) CLIP(128 + ( ( -(11059 * r) - (21709 * g) + (32768 * b)) >>16))
-#define V24(r,g,b) CLIP(128 + ( ( (32768 * r) - (27426 * g) - (5329 * b))  >>16))
-#define GET_RGB24(r,g,b,s,o) r = s[o]; g = s[o + 1]; b = s[o + 2];
-
-/**
- ***********************************************************************************
- *                    Macro for clipping using the clipping matrix for RGB values
- ***********************************************************************************
-*/
-/** Clip function ensures values with range of 0 and 255 */
-#define        CLIP(x)    *(M4VIFI_ClipTable_zero + (x))
-#define        CLIP_OVF        500
-#define     CLIP_LUT_SIZE     (256 + 2 * CLIP_OVF)
-/** Division table for RGB565 to HLS conversion */
-#define        DIVCLIP(x)    *(M4VIFI_DivTable_zero + (x))
-
-/**
- *****************************************************************************
- *                    Endianness (default configuration is Little Endian)
- *****************************************************************************
-*/
-#if (!defined(LITTLE_ENDIAN) && !defined(BIG_ENDIAN))
-/** Default endian setting */
-#define LITTLE_ENDIAN
-#endif
-
-/**
- *****************************************************************************
- *                    Other macros and define
- *****************************************************************************
-*/
-/** YUV plane index */
-#define PLANES    3
-#define YPlane    0
-#define UPlane    1
-#define VPlane    2
-
-/** Check for value is EVEN */
-#ifndef IS_EVEN
-#define IS_EVEN(a)    (!(a & 0x01))
-#endif
-
-/* Used for fixed point implementation */
-#ifndef MAX_SHORT
-#define MAX_SHORT    0x10000
-#endif
-
-#endif /* _M4VIFI_DEFINES_H_ */
-
-/* End of file M4VIFI_Defines.h */
-
diff --git a/libvideoeditor/vss/common/inc/M4VIFI_FiltersAPI.h b/libvideoeditor/vss/common/inc/M4VIFI_FiltersAPI.h
deleted file mode 100755
index 3d2fc9d..0000000
--- a/libvideoeditor/vss/common/inc/M4VIFI_FiltersAPI.h
+++ /dev/null
@@ -1,785 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file        M4VIFI_FiltersAPI.h
- * @brief        External API and Data definitions for the video filter library
- * @note        This file defines and declares data common to the video filter library:
- *                    -# data types
- *                    -# error codes
- *                    -# external API's
- *                    -# API level structure definition
- ******************************************************************************
-*/
-
-#ifndef _M4VIFI_FILTERSAPI_H_
-
-#define _M4VIFI_FILTERSAPI_H_
-
-#ifdef __cplusplus
-
-extern "C" {
-
-#endif /* __cplusplus */
-
-    /**
-     ***********************************************************
-     *                    Data types definition
-     ***********************************************************
-    */
-
-    typedef unsigned char M4VIFI_UInt8;
-    typedef char M4VIFI_Int8;
-    typedef unsigned short M4VIFI_UInt16;
-    typedef unsigned long M4VIFI_UInt32;
-    typedef short M4VIFI_Int16;
-    typedef long M4VIFI_Int32;
-    typedef float M4VIFI_Float;
-    typedef double M4VIFI_Double;
-    typedef unsigned char M4VIFI_ErrorCode;
-
-/**
- ***********************************************************
- *                    Error codes definition
- ***********************************************************
-*/
-#define M4VIFI_OK                        0
-#define M4VIFI_INVALID_PARAM            7
-#define M4VIFI_ILLEGAL_FRAME_HEIGHT        8
-#define M4VIFI_ILLEGAL_FRAME_WIDTH        9
-
-/**
- ***********************************************************
- *                    Other basic definitions
- ***********************************************************
-*/
-#define CNST    const
-#define EXTERN    extern
-
-#ifndef NULL
-#define NULL    0
-
-#endif
-#ifndef FALSE
-#define FALSE    0
-#define TRUE    !FALSE
-
-#endif
-
-/**
- ***********************************************************
- *                    Structures definition
- ***********************************************************
-*/
-
-/**
- ******************************************************************************
- * structure    M4VIFI_ImagePlane
- * @brief        Texture (YUV) planes structure
- * @note        This structure details the image planes for the output textures:
- *                sizes (in pixels) are luma plane sizes, the 3 pointers point
- *                to the Y, U and V buffers which store data in planar format.
- ******************************************************************************
-*/
-
-    typedef struct
-        {
-        M4VIFI_UInt32 u_width;   /**< Width of luma in pixel unit */
-        M4VIFI_UInt32 u_height;  /**< Height of luma in pixel unit */
-        M4VIFI_UInt32 u_topleft; /**< Pointer to first texture active pixel */
-        M4VIFI_UInt32 u_stride;  /**< Stride value */
-        M4VIFI_UInt8 *pac_data;  /**< Pointer to the data */
-        } M4VIFI_ImagePlane;
-
-/**
- ******************************************************************************
- * structure    M4VIFI_FramingData
- * @brief        Data necessary to add an overlay on an image
- * @note        This structure details the position and the data of the overlay
- ******************************************************************************
-*/
-    typedef struct
-        {
-        M4VIFI_UInt32
-            m_xPosStep; /**< X positioning of the overlay vs main picture.
-                                  X positioning is expressed in percentage vs the main
-                                   picture width.
-                                  m_xPosStep must be expressed by step of 1% and between
-                                  -50/+50%.
-                                  0% means overlay is centered vs main picture on
-                                   X abscissa. */
-        M4VIFI_UInt32
-            m_yPosStep; /**< Y positioning of the overlay vs main picture.
-                                  Y positioning is expressed in percentage vs the main
-                                   picture width.
-                                  m_xPosStep must be expressed by step of 1% and between
-                                   -50/+50%.
-                                  0% means overlay is centered vs main picture on
-                                   Y abscissa. */
-
-        M4VIFI_ImagePlane
-            *
-                m_imagePlane; /**< Pointer to the framing image with alpha channel */
-        } M4VIFI_FramingData;
-
-/**
- ******************************************************************************
- * structure    M4VIFI_HLSoffset
- * @brief        HLS offset structure
- * @note        This structure have the hue, saturation and lightness value
- *                for quality enhancement. Range of values neccessarily be
- *                hue = -360 to 360, sat = 0 to 100 and light = 0 t0 100
- ******************************************************************************
-*/
-    typedef struct
-        {
-        M4VIFI_Int16 hue;   /**< Hue offset */
-        M4VIFI_Int16 sat;   /**< Saturation offset */
-        M4VIFI_Int16 light; /**< Light offset */
-        } M4VIFI_HLSoffset;
-
-/**
- ******************************************************************************
- * structure    M4VIFI_Tranformation
- * @brief        Image Tranformation Structure
- * @note        Image Tranformation Request
- *                rotation : 1 -> +90deg Rotation
- *                          -1 -> -90deg Rotation
- *                           0 ->  No Rotation
- ******************************************************************************
-*/
-    typedef struct
-        {
-        M4VIFI_Int32 i32_rotation; /**< Rotation Flag        */
-        } M4VIFI_Tranformation;
-
-/**
- ******************************************************************************
- * structure    M4VIFI_pContext
- * @brief        New Structures
- * @note        -# Structure of M4VIFI_HLSoffset
- ******************************************************************************
-*/
-    typedef struct
-        {
-        M4VIFI_HLSoffset hlsOffset; /**< HLS offset structure */
-        } M4VIFI_pContext;
-
-    /*
-     *****************************************************
-     *                    External API functions
-     *****************************************************
-    */
-
-    /**< Effect filters */
-    M4VIFI_UInt8 M4VIFI_SepiaYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_GrayscaleYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_ContrastYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_NegativeYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_FlipYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_MirrorYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_Rotate180YUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_Rotate90RightYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_Rotate90LeftYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_ColorRYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_ColorGYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_ColorBYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_FramingRGB565toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_FramingYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_SetHueInYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_ColdYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    M4VIFI_UInt8 M4VIFI_WarmYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-/*                ADS Compiler                */
-
-/*        Generic ARM assembly functions        */
-#if defined ADS_ARM
-
-    /** Apply grayscale effect RGB565toRGB565 */
-
-    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear RGB888toRGB888 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear RGB565toRGB565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV420toYUV420 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** RGB565 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_RGB565toYUV420AdsArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** BGR565 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_BGR565toYUV420AdsArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** YUV422 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_UYVYtoYUV420AdsArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** YUV420 to RGB565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toRGB565AdsArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** YUV420 to BGR565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toBGR565AdsArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize Bilinear YUV420toRGB565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565AdsArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Resize Bilinear YUV420toBGR565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565AdsArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Modify HLS in RGB565 */
-    M4VIFI_UInt8 M4VIFI_SetHLSinRGB565AdsArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Modify HLS in BGR565 */
-    M4VIFI_UInt8 M4VIFI_SetHLSinBGR565AdsArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-#define M4VIFI_RGB565toYUV420                                M4VIFI_RGB565toYUV420AdsArm
-#define M4VIFI_BGR565toYUV420                                M4VIFI_BGR565toYUV420AdsArm
-#define M4VIFI_UYVYtoYUV420                                    M4VIFI_UYVYtoYUV420AdsArm
-#define M4VIFI_YUV420toRGB565                                M4VIFI_YUV420toRGB565AdsArm
-#define M4VIFI_YUV420toBGR565                                M4VIFI_YUV420toBGR565AdsArm
-#define M4VIFI_ResizeBilinearYUV420toRGB565             \
-                           M4VIFI_ResizeBilinearYUV420toRGB565AdsArm
-
-#define M4VIFI_ResizeBilinearYUV420toBGR565             \
-                           M4VIFI_ResizeBilinearYUV420toBGR565AdsArm
-
-#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
-                           M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm
-
-#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft  \
-                           M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm
-
-#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
-                           M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm
-
-#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft  \
-                           M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm
-
-#define M4VIFI_SetHLSinRGB565                                M4VIFI_SetHLSinRGB565AdsArm
-#define M4VIFI_SetHLSinBGR565                                M4VIFI_SetHLSinBGR565AdsArm
-
-/*        ARM9E assembly functions        */
-#elif defined ADS_ARM9E
-
-    /** Apply grayscale effect RGB565toRGB565 */
-
-    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV888toYUV888 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV565toYUV565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /** Resize Bilinear YUV420toYUV420 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** RGB565 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_RGB565toYUV420AdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** BGR565 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_BGR565toYUV420AdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** YUV422 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_UYVYtoYUV420AdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** YUV420 to RGB565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toRGB565AdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** YUV420 to BGR565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toBGR565AdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize Bilinear YUV420toRGB565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565AdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Resize Bilinear YUV420toBGR565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565AdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm9E(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm9E(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm9E(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm9E(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Modify HLS in RGB565 */
-    M4VIFI_UInt8 M4VIFI_SetHLSinRGB565AdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Modify HLS in BGR565 */
-    M4VIFI_UInt8 M4VIFI_SetHLSinBGR565AdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize YUV420toYUV420 from QCIF to QVGA*/
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoYUV420QVGAAdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /**Resize YUV420toRGB565 from QCIF to QVGA*/
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGAAdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /**Resize YUV420toRGB565 from QCIF to QVGA with rotation +90*/
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RRAdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /**Resize YUV420toRGB565 from QCIF to QVGA with rotation -90*/
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RLAdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-/** Resizes YUV420 Planar Image and stores in YUV420 Linear format with/without +or-90 rotation*/
-    M4VIFI_UInt8 M4VIFI_YUV420PlanartoYUV420LinearAdsArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-#define M4VIFI_RGB565toYUV420                                M4VIFI_RGB565toYUV420AdsArm9E
-#define M4VIFI_BGR565toYUV420                                M4VIFI_BGR565toYUV420AdsArm9E
-#define M4VIFI_UYVYtoYUV420                                    M4VIFI_UYVYtoYUV420AdsArm9E
-#define M4VIFI_YUV420toRGB565                                M4VIFI_YUV420toRGB565AdsArm9E
-#define M4VIFI_YUV420toBGR565                                M4VIFI_YUV420toBGR565AdsArm9E
-#define M4VIFI_ResizeBilinearYUV420toRGB565 \
-                           M4VIFI_ResizeBilinearYUV420toRGB565AdsArm9E
-#define M4VIFI_ResizeBilinearYUV420toBGR565 \
-                           M4VIFI_ResizeBilinearYUV420toBGR565AdsArm9E
-#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
-                           M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm9E
-#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
-                           M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm9E
-#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
-                           M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm9E
-#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft \
-                           M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm9E
-#define M4VIFI_SetHLSinRGB565                            M4VIFI_SetHLSinRGB565AdsArm9E
-#define M4VIFI_SetHLSinBGR565                            M4VIFI_SetHLSinBGR565AdsArm9E
-#define M4VIFI_YUV420QCIFtoYUV420QVGA                    M4VIFI_YUV420QCIFtoYUV420QVGAAdsArm9E
-#define M4VIFI_YUV420QCIFtoRGB565QVGA                    M4VIFI_YUV420QCIFtoRGB565QVGAAdsArm9E
-#define M4VIFI_YUV420QCIFtoRGB565QVGA_RR                 M4VIFI_YUV420QCIFtoRGB565QVGA_RRAdsArm9E
-#define M4VIFI_YUV420QCIFtoRGB565QVGA_RL                 M4VIFI_YUV420QCIFtoRGB565QVGA_RLAdsArm9E
-#define M4VIFI_YUV420PlanartoYUV420Linear                M4VIFI_YUV420PlanartoYUV420LinearAdsArm9E
-/*                GCC Compiler                */
-/*        Generic ARM assembly functions        */
-
-#elif defined GCC_ARM
-
-    /** Apply grayscale effect RGB565toRGB565 */
-
-    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV888toYUV888 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV565toYUV565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV420toYUV420 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** RGB565 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_RGB565toYUV420GccArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** BGR565 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_BGR565toYUV420GccArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** YUV420 to RGB565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toRGB565GccArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** YUV420 to BGR565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toBGR565GccArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize Bilinear YUV420toRGB565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565GccArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize Bilinear YUV420toBGR565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565GccArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Modify HLS in RGB565 */
-    M4VIFI_UInt8 M4VIFI_SetHLSinRGB565GccArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Modify HLS in BGR565 */
-    M4VIFI_UInt8 M4VIFI_SetHLSinBGR565GccArm(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-#define M4VIFI_RGB565toYUV420                                M4VIFI_RGB565toYUV420GccArm
-#define M4VIFI_BGR565toYUV420                                M4VIFI_BGR565toYUV420GccArm
-#define M4VIFI_YUV420toRGB565                                M4VIFI_YUV420toRGB565GccArm
-#define M4VIFI_YUV420toBGR565                                M4VIFI_YUV420toBGR565GccArm
-#define M4VIFI_ResizeBilinearYUV420toRGB565 \
-                               M4VIFI_ResizeBilinearYUV420toRGB565GccArm
-#define M4VIFI_ResizeBilinearYUV420toBGR565 \
-                               M4VIFI_ResizeBilinearYUV420toBGR565GccArm
-#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
-                               M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm
-#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
-                               M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm
-#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
-                               M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm
-#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft \
-                               M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm
-#define M4VIFI_SetHLSinRGB565                                M4VIFI_SetHLSinRGB565GccArm
-#define M4VIFI_SetHLSinBGR565                                M4VIFI_SetHLSinBGR565GccArm
-
-/*        ARM9E assembly functions        */
-#elif defined GCC_ARM9E
-
-    /** Apply grayscale effect RGB565toRGB565 */
-
-    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV888toYUV888 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV565toYUV565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV420toYUV420 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** RGB565 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_RGB565toYUV420GccArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** BGR565 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_BGR565toYUV420GccArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** YUV420 to RGB565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toRGB565GccArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** YUV420 to BGR565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toBGR565GccArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize Bilinear YUV420toRGB565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565GccArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Resize Bilinear YUV420toBGR565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565GccArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm9E(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm9E(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm9E(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm9E(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Modify HLS in RGB565 */
-    M4VIFI_UInt8 M4VIFI_SetHLSinRGB565GccArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Modify HLS in BGR565 */
-    M4VIFI_UInt8 M4VIFI_SetHLSinBGR565GccArm9E(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-#define M4VIFI_RGB565toYUV420                                M4VIFI_RGB565toYUV420GccArm9E
-#define M4VIFI_BGR565toYUV420                                M4VIFI_BGR565toYUV420GccArm9E
-#define M4VIFI_YUV420toRGB565                                M4VIFI_YUV420toRGB565GccArm9E
-#define M4VIFI_YUV420toBGR565                                M4VIFI_YUV420toBGR565GccArm9E
-#define M4VIFI_ResizeBilinearYUV420toRGB565 \
-                                   M4VIFI_ResizeBilinearYUV420toRGB565GccArm9E
-#define M4VIFI_ResizeBilinearYUV420toBGR565 \
-                                   M4VIFI_ResizeBilinearYUV420toBGR565GccArm9E
-#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
-                                   M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm9E
-#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
-                                   M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm9E
-#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
-                                   M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm9E
-#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft \
-                                   M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm9E
-#define M4VIFI_SetHLSinBGR565                                M4VIFI_SetHLSinBGR565GccArm9E
-#define M4VIFI_SetHLSinRGB565                                M4VIFI_SetHLSinRGB565GccArm9E
-
-/* TI CCS assembly files */
-#elif defined TI411_ARM9E
-
-    /** Apply grayscale effect RGB565toRGB565 */
-
-    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV888toYUV888 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV565toYUV565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV420toYUV420 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** YUV420 (Planar) to RGB565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** YUV420 (Planar) to Resized RGB565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** YUV420 (Planar) to Resized RGB888 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB888(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** YUV420(Planar) to Resized and Rotated (-90) RGB565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** YUV420(Planar) to Resized and Rotated (+90) RGB565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** YUV420(Planar) to Resized YUV420(Planar) */
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoYUV420QVGA(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** Resize YUV420(Planar) of QCIF to RGB565 of QVGA resolution */
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-/** Resize YUV420(Planar) of QCIF to RGB565 of QVGA resolution with rotation(-90) */
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RL(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-/** Resize YUV420(Planar) of QCIF to RGB565 of QVGA resolution with rotation(+90) */
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RR(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-#define M4VIFI_YUV420toRGB565                             M4VIFI_YUV420toRGB565Ti411Arm9E
-#define M4VIFI_ResizeBilinearYUV420toRGB565 \
-                                M4VIFI_ResizeBilinearYUV420toRGB565Ti411Arm9E
-#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
-                               M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftTi411Arm9E
-#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
-                               M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightTi411Arm9E
-
-#define M4VIFI_YUV420QCIFtoYUV420QVGA       M4VIFI_YUV420QCIFtoYUV420QVGATi411Arm9E
-#define M4VIFI_YUV420QCIFtoRGB565QVGA       M4VIFI_YUV420QCIFtoRGB565QVGATi411Arm9E
-#define M4VIFI_YUV420QCIFtoRGB565QVGA_RL  M4VIFI_YUV420QCIFtoRGB565QVGA_RLTi411Arm9E
-#define M4VIFI_YUV420QCIFtoRGB565QVGA_RR  M4VIFI_YUV420QCIFtoRGB565QVGA_RRTi411Arm9E
-
-/*        ANSI C Functions        */
-#else
-
-    /** Apply grayscale effect RGB565toRGB565 */
-
-    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV888toYUV888 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV565toYUV565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** Resize Bilinear YUV420toYUV420 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-    /** RGB565 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_RGB565toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** BRG565 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_BGR565toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** BRG888 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_BGR888toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3]);
-    /** RGB888 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3]);
-
-    /** YUV422 to YUV420 */
-    M4VIFI_UInt8 M4VIFI_UYVYtoYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-
-    /** YUV420 to RGB565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** YUV420 to BGR565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toBGR565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /** YUV420 to BGR565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toBGR565RotatedLeft(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /** YUV420 to BGR565 */
-    M4VIFI_UInt8 M4VIFI_YUV420toBGR565RotatedRight(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /** YUV420 to BGR24 */
-    M4VIFI_UInt8 M4VIFI_YUV420toBGR24(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /** YUV420 to RGB24 */
-    M4VIFI_UInt8 M4VIFI_YUV420toRGB24(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /** Resize Bilinear YUV420toYUV420 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /** Resize Bilinear YUV420toRGB565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB888(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Resize Bilinear YUV420toBGR565 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight(
-        void *pUserData,
-            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Modify HLS in RGB565 */
-    M4VIFI_UInt8 M4VIFI_SetHLSinRGB565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /** Modify HLS in BGR565 */
-    M4VIFI_UInt8 M4VIFI_SetHLSinBGR565(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    /**Resize YUV420toYUV420 from QCIF to QVGA*/
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoYUV420QVGA(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /**Resize YUV420toRGB565 from QCIF to QVGA*/
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /**Resize YUV420toRGB565 from QCIF to QVGA with rotation +90*/
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RR(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-    /**Resize YUV420toRGB565 from QCIF to QVGA with rotation -90*/
-    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RL(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-/** Resizes YUV420 Planar Image and stores in YUV420 Linear format with/without +or-90 rotation*/
-    M4VIFI_UInt8 M4VIFI_YUV420PlanartoYUV420Linear(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-
-/** Resizes YUV420 Planar Image and stores in YUV422 Interleaved format
-     with/without +or-90 rotation*/
-    M4VIFI_UInt8 M4VIFI_YUV420PlanartoYUV422Interleaved(void *pUserData,
-        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
-#endif
-
-    /** definition of the converter function types */
-
-    typedef M4VIFI_UInt8 M4VIFI_PlanConverterFunctionType(void
-        *pContext, M4VIFI_ImagePlane* in, M4VIFI_ImagePlane* out);
-
-    /** definition of the preprocessing function types */
-    typedef M4VIFI_UInt8 M4VIFI_PreprocessFunctionType(void
-        *pContext, M4VIFI_ImagePlane* in, M4VIFI_ImagePlane* out);
-
-    M4VIFI_UInt8 M4VIFI_YUV420toYUV420(void *user_data,
-        M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_YUV420PlanarToYUV420Semiplanar(void *user_data,
-        M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut);
-    M4VIFI_UInt8 M4VIFI_SemiplanarYUV420toYUV420(void *user_data,
-        M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut);
-#ifdef __cplusplus
-
-}
-
-#endif /* __cplusplus */
-
-#endif /* _M4VIFI_FILTERSAPI_H_ */
-
-/* End of file M4VIFI_FiltersAPI.h */
diff --git a/libvideoeditor/vss/common/inc/M4VPP_API.h b/libvideoeditor/vss/common/inc/M4VPP_API.h
deleted file mode 100755
index 965ca22..0000000
--- a/libvideoeditor/vss/common/inc/M4VPP_API.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4VPP_API.h
- * @brief    Video preprocessing API public functions prototypes.
- * @note
- ******************************************************************************
-*/
-
-#ifndef M4VPP_API_H
-#define M4VPP_API_H
-
-#include "M4OSA_Types.h"            /**< Include for common OSAL types */
-#include "M4OSA_Error.h"            /**< Include for common OSAL errors */
-
-/**
- *    Include Video filters interface definition (for the M4VIFI_ImagePlane type) */
-#include "M4VIFI_FiltersAPI.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/**
- ******************************************************************************
- * Public type of the Video Preprocessing execution context
- ******************************************************************************
-*/
-typedef M4OSA_Void*    M4VPP_Context;
-
-typedef enum
-{
-    M4VPP_kIYUV420=0,    /**< YUV 4:2:0 planar (standard input for mpeg-4 video) */
-    M4VPP_kIYUV422,        /**< YUV422 planar */
-    M4VPP_kIYUYV,        /**< YUV422 interlaced, luma first */
-    M4VPP_kIUYVY,        /**< YUV422 interlaced, chroma first */
-    M4VPP_kIJPEG,        /**< JPEG compressed frames */
-    M4VPP_kIRGB444,        /**< RGB 12 bits 4:4:4 */
-    M4VPP_kIRGB555,        /**< RGB 15 bits 5:5:5 */
-    M4VPP_kIRGB565,        /**< RGB 16 bits 5:6:5 */
-    M4VPP_kIRGB24,        /**< RGB 24 bits 8:8:8 */
-    M4VPP_kIRGB32,        /**< RGB 32 bits  */
-    M4VPP_kIBGR444,        /**< BGR 12 bits 4:4:4 */
-    M4VPP_kIBGR555,        /**< BGR 15 bits 5:5:5 */
-    M4VPP_kIBGR565,        /**< BGR 16 bits 5:6:5 */
-    M4VPP_kIBGR24,        /**< BGR 24 bits 8:8:8 */
-    M4VPP_kIBGR32        /**< BGR 32 bits  */
-} M4VPP_InputVideoFormat;
-
-
-/**
- ******************************************************************************
- * @brief    Prototype of the main video preprocessing function
- * @note    Preprocess one frame
- * @param    pContext:    (IN) Execution context of the VPP.
- * @param    pPlaneIn:    (INOUT)    Input Image
- * @param    pPlaneOut:    (INOUT)    Output Image
- ******************************************************************************
-*/
-typedef M4OSA_ERR (M4VPP_apply_fct) (M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
-                                     M4VIFI_ImagePlane* pPlaneOut);
-
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VPP_initVideoPreprocessing(M4VPP_Context* pContext)
- * @brief    This function allocates a new execution context for the Video Preprocessing component.
- * @note
- * @param    pContext:    (OUT) Execution context allocated by the function.
- * @return    M4NO_ERROR: there is no error.
- * @return    M4ERR_ALLOC: there is no more available memory.
- * @return    M4ERR_PARAMETER: pContext is NULL (debug only).
- ******************************************************************************
-*/
-M4OSA_ERR M4VPP_initVideoPreprocessing(M4VPP_Context* pContext);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VPP_applyVideoPreprocessing(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
- *                                           M4VIFI_ImagePlane* pPlaneOut)
- * @brief    Preprocess one frame.
- * @note
- * @param    pContext:    (IN) Execution context.
- * @param    pPlaneIn:    (INOUT)    Input Image
- * @param    pPlaneOut:    (INOUT)    Output Image
- * @return    M4NO_ERROR: there is no error.
- * @return    M4ERR_PARAMETER: pContext or pPlaneIn or pPlaneOut is NULL (debug only).
- * @return    M4ERR_STATE: Video Preprocessing is not in an appropriate state for this function
- *                           to be called
- ******************************************************************************
-*/
-M4OSA_ERR M4VPP_applyVideoPreprocessing(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
-                                         M4VIFI_ImagePlane* pPlaneOut);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VPP_cleanUpVideoPreprocessing(M4VPP_Context pContext)
- * @brief    This method frees the execution context for the Video Preprocessing component.
- *            Any further usage of the context will lead to unpredictable result.
- * @note
- * @param    pContext:    (IN) Execution context.
- * @return    M4NO_ERROR: there is no error.
- * @return    M4ERR_PARAMETER: pContext is NULL (debug only).
- ******************************************************************************
-*/
-M4OSA_ERR M4VPP_cleanUpVideoPreprocessing(M4VPP_Context pContext);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VPP_setVideoPreprocessingMode(M4VPP_Context pContext, M4VES_InputVideoFormat format)
- * @brief    This method apply the video preprocessing to the input plane. Result is put into the
- *           output plan.
- * @param    pContext:    (IN) Execution context.
- * @param    format  :    (IN) Format of input plane (rgb, yuv, ...)
- * @return    M4NO_ERROR: there is no error
- ******************************************************************************
-*/
-M4OSA_ERR M4VPP_setVideoPreprocessingMode(M4VPP_Context pContext, M4VPP_InputVideoFormat format);
-
-/**
- ******************************************************************************
- * @brief    Definition of the errors specific to this module.
- ******************************************************************************
-*/
-
-/**< Input and output planes have incompatible properties */
-#define M4VPP_ERR_IMCOMPATIBLE_IN_AND_OUT_PLANES    M4OSA_ERR_CREATE( M4_ERR,\
-     M4PREPROCESS_VIDEO, 0x000001);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* M4VPP_API_H */
-
diff --git a/libvideoeditor/vss/common/inc/M4WRITER_common.h b/libvideoeditor/vss/common/inc/M4WRITER_common.h
deleted file mode 100755
index abb7b86..0000000
--- a/libvideoeditor/vss/common/inc/M4WRITER_common.h
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- /**
- ******************************************************************************
- * @file    M4WRITER_common.h
- * @brief    VES writers shell interface.
- * @note    This file defines the types internally used by the VES to abstract writers
- ******************************************************************************
-*/
-#ifndef __M4WRITER_COMMON_H__
-#define __M4WRITER_COMMON_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#include "M4OSA_Types.h"
-#include "M4OSA_FileWriter.h"   /* for M4OSA_FileWriterPointer */
-#include "M4OSA_FileReader.h"   /* for M4OSA_FileWriterPointer */
-#include "M4OSA_OptionID.h"     /* for M4OSA_OPTION_ID_CREATE() */
-#include "M4OSA_CoreID.h"       /* for M4WRITER_COMMON */
-
-#include "M4SYS_Stream.h"       /* for M4SYS_StreamID */
-#include "M4SYS_AccessUnit.h"   /* for M4SYS_AccessUnit */
-
-/**
- ******************************************************************************
- * MP4W Errors & Warnings definition
- ******************************************************************************
-*/
-#define M4WAR_WRITER_STOP_REQ        M4OSA_ERR_CREATE(M4_WAR, M4WRITER_COMMON ,0x000001)
-
-/**
- ******************************************************************************
- * enum        M4WRITER_OutputFileType
- * @brief    This enum defines the avalaible output file format.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4WRITER_kUnknown=-1,
-    M4WRITER_k3GPP=0,            /**< 3GPP compliant file */
-    M4WRITER_kAVI=1,            /**< AVI file */
-    M4WRITER_kAMR=2,            /**< AMR file */
-    M4WRITER_kNETWORK3GPP=3,    /**< 3GPP via TCP */
-    M4WRITER_kPCM=4,            /**< PCM file */
-    M4WRITER_kJPEG=5,            /**< JPEG EXIF writer */
-    M4WRITER_kMP3=6,            /**< MP3 writer */
-
-    M4WRITER_kType_NB  /* number of writers, keep it as last enum entry */
-
-} M4WRITER_OutputFileType;
-
-/**
- ******************************************************************************
- * enum    M4WRITER_OptionID
- * @brief    This enums defines all avalaible options. All the reuturned values are in
- *           M4OSA_UInt32 type.
- ******************************************************************************
-*/
-typedef enum {
-    M4WRITER_kMaxAUSize        = M4OSA_OPTION_ID_CREATE (M4_READ|M4_WRITE, M4WRITER_COMMON, 0x01),
-    M4WRITER_kMaxChunckSize    = M4OSA_OPTION_ID_CREATE (M4_READ|M4_WRITE, M4WRITER_COMMON, 0x02),
-    M4WRITER_kFileSize          = M4OSA_OPTION_ID_CREATE (M4_READ            , \
-        M4WRITER_COMMON, 0x03),  /**< File size if the process was ended when we call the method */
-    M4WRITER_kFileSizeAudioEstimated= M4OSA_OPTION_ID_CREATE (M4_READ    ,\
-         M4WRITER_COMMON, 0x04),    /**< File size if the process was ended when we call the
-                                     method, estimated size for audio */
-    M4WRITER_kEmbeddedString  = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
-         M4WRITER_COMMON, 0x05),    /**< String embedded at the end of the file(SW - VES) */
-    M4WRITER_kEmbeddedVersion = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
-         M4WRITER_COMMON, 0x06),    /**< Version embedded at the end of the file */
-    M4WRITER_kIntegrationTag  = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
-         M4WRITER_COMMON, 0x07),    /**< String embedded at the end of the file (char[60]
-                                         for integration purpose) */
-    M4WRITER_kMaxFileSize      = M4OSA_OPTION_ID_CREATE (M4_WRITE        , \
-        M4WRITER_COMMON, 0x08),    /**< Maximum file size limitation */
-    M4WRITER_kMaxFileDuration = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
-         M4WRITER_COMMON, 0x09),    /**< Maximum file duration limitation */
-    M4WRITER_kSetFtypBox      = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
-         M4WRITER_COMMON, 0x0A),    /**< Set 'ftyp' atom */
-    M4WRITER_kMetaData          = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
-         M4WRITER_COMMON, 0x0B),    /**< Additionnal information to set in the file */
-    M4WRITER_kDSI          = M4OSA_OPTION_ID_CREATE (M4_WRITE        , \
-        M4WRITER_COMMON, 0x0C),    /**< To set DSI of the file (Decoder specifc info) */
-    M4WRITER_kJpegReserveFPData     = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
-         M4WRITER_COMMON, 0x0D),    /**< Reserve some space in the file for JPEG fast
-                                        processing data */
-    M4WRITER_kJpegSetFPData     = M4OSA_OPTION_ID_CREATE (M4_WRITE        , \
-        M4WRITER_COMMON, 0x0E),    /**< Write Fast Processing Data in the file*/
-    /* + CRLV6775 -H.264 trimming */
-    M4WRITER_kMUL_PPS_SPS       = M4OSA_OPTION_ID_CREATE (M4_WRITE        , M4WRITER_COMMON, 0x0F)
-    /* - CRLV6775 -H.264 trimming */
-} M4WRITER_OptionID;
-
-
-/**
- ******************************************************************************
- * struct    M4WRITER_Header
- * @brief    This structure defines the buffer where an header is put.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_MemAddr8    pBuf;        /**< Buffer for the header */
-    M4OSA_UInt32    Size;        /**< Size of the data */
-} M4WRITER_Header;
-
-
-/**
- ******************************************************************************
- * struct    M4WRITER_StreamVideoInfos
- * @brief    This structure defines the specific video stream infos, extension to
- *           M4SYS_StreamDescription.
- ******************************************************************************
-*/
-typedef struct {
-    M4OSA_UInt32    height;                /**< Frame height */
-    M4OSA_UInt32    width;                /**< Frame Width */
-    M4OSA_Double    fps;                /**< Targetted framerate of the video */
-    M4WRITER_Header    Header;                /**< Sequence header of the video stream,
-                                        member set to NULL if no header present */
-} M4WRITER_StreamVideoInfos;
-
-
-/**
- ******************************************************************************
- * struct    M4WRITER_StreamAudioInfos
- * @brief    This structure defines the specific audio stream infos, extension to
-             M4SYS_StreamDescription.
- ******************************************************************************
-*/
-typedef struct {
-    M4OSA_UInt32    nbSamplesPerSec;    /**< Number of Samples per second */
-    M4OSA_UInt16    nbBitsPerSample;    /**< Number of Bits in 1 sample */
-    M4OSA_UInt16    nbChannels;            /**< Number of channels */
-    M4WRITER_Header    Header;                /**< Decoder Specific Info of the audiostream,
-                                             member set to NULL if no DSI present */
-} M4WRITER_StreamAudioInfos;
-
-
-/**
- ******************************************************************************
- * enum        M4WRITER_Orientation
- * @brief    This enum defines the possible orientation of a frame as described
- *            in the EXIF standard.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4WRITER_OrientationUnknown = 0,
-    M4WRITER_OrientationTopLeft,
-    M4WRITER_OrientationTopRight,
-    M4WRITER_OrientationBottomRight,
-    M4WRITER_OrientationBottomLeft,
-    M4WRITER_OrientationLeftTop,
-    M4WRITER_OrientationRightTop,
-    M4WRITER_OrientationRightBottom,
-    M4WRITER_OrientationLeftBottom
-}M4WRITER_Orientation ;
-
-/**
- ******************************************************************************
- * struct    M4WRITER_MetaData
- * @brief    This structure defines all the meta data to store in the encoded file.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_Char*                Description ;
-    M4OSA_Char*                PhoneManufacturer ;
-    M4OSA_Char*                PhoneModel ;
-    M4OSA_Char*                Artist ;
-    M4OSA_Char*                Copyright ;
-    M4OSA_Char*                Software ;
-    M4OSA_Char*                CreationDate;
-    M4WRITER_Orientation    Orientation ;
-
-    M4OSA_UInt32            Width ;
-    M4OSA_UInt32            Height ;
-
-    M4OSA_UInt32            ThumbnailWidth ;
-    M4OSA_UInt32            ThumbnailHeight ;
-    M4OSA_Bool                ThumbnailPresence ;
-}M4WRITER_MetaData;
-
-
-typedef void* M4WRITER_Context;
-
-typedef M4OSA_ERR (M4WRITER_openWrite)        (M4WRITER_Context* hContext,\
-                                             void* outputFileDescriptor,\
-                                             M4OSA_FileWriterPointer* pFileWriterPointer,\
-                                             void* tempFileDescriptor, \
-                                             M4OSA_FileReadPointer* pFileReaderPointer);
-typedef M4OSA_ERR (M4WRITER_addStream)        (M4WRITER_Context  pContext,\
-                                            M4SYS_StreamDescription*streamDescription);
-typedef M4OSA_ERR (M4WRITER_startWriting)    (M4WRITER_Context  pContext);
-typedef M4OSA_ERR (M4WRITER_closeWrite)        (M4WRITER_Context  pContext);
-typedef M4OSA_ERR (M4WRITER_setOption)        (M4WRITER_Context  pContext, \
-                                            M4OSA_UInt32 optionID, \
-                                            M4OSA_DataOption optionValue);
-typedef M4OSA_ERR (M4WRITER_getOption)        (M4WRITER_Context  pContext, \
-                                            M4OSA_UInt32 optionID, \
-                                            M4OSA_DataOption optionValue);
-
-
-/**
- ******************************************************************************
- * struct    M4WRITER_GlobalInterface
- * @brief    Defines all the functions required for a writer shell.
- ******************************************************************************
-*/
-typedef struct _M4WRITER_GlobalInterface
-{
-    M4WRITER_openWrite*             pFctOpen;
-    M4WRITER_addStream*                pFctAddStream;
-    M4WRITER_startWriting*          pFctStartWriting;
-    M4WRITER_closeWrite*            pFctCloseWrite;
-    M4WRITER_setOption*                pFctSetOption;
-    M4WRITER_getOption*                pFctGetOption;
-} M4WRITER_GlobalInterface;
-
-typedef M4OSA_ERR  M4WRITER_startAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,\
-                                     M4SYS_AccessUnit* pAU);
-typedef M4OSA_ERR  M4WRITER_processAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,\
-                                     M4SYS_AccessUnit* pAU);
-
-/**
- ******************************************************************************
- * struct    M4WRITER_DataInterface
- * @brief    Defines all the functions required to write data with a writer shell.
- ******************************************************************************
-*/
-typedef struct _M4WRITER_DataInterface
-{
-    M4WRITER_startAU*    pStartAU;
-    M4WRITER_processAU* pProcessAU;
-
-    M4WRITER_Context    pWriterContext;
-
-} M4WRITER_DataInterface;
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /*__M4WRITER_COMMON_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4_BitStreamParser.h b/libvideoeditor/vss/common/inc/M4_BitStreamParser.h
deleted file mode 100755
index c875458..0000000
--- a/libvideoeditor/vss/common/inc/M4_BitStreamParser.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file   M4_BitStreamParser.h
- * @brief  MPEG-4 File Format bit stream utility
- * @note   This file contains utility functions used to parse MPEG specific
- *         data structures.
- ************************************************************************
-*/
-#ifndef __M4_BITSTREAMPARSER_H__
-#define __M4_BITSTREAMPARSER_H__
-
-#include "M4OSA_Types.h"
-
-/**
-* M4_BitStreamParser_Init.
-*
-* Allocates the context and initializes internal data
-*
-* @param pContext   : A pointer to the context internally used by the package - ALLOCATED BY THE
-*                    FUNCTION (M4OSA_NULL if allocation fails)
-* @param bitStream  : A pointer to the bitstream - must be 32 bits as access are 32 bits
-* @param size        : The size of the bitstream in bytes
-*
-*/
-void M4_BitStreamParser_Init(void** pContext, void* pBitStream, M4OSA_Int32 size);
-
-/**
- ************************************************************************
- * @brief    Clean up context
- * @param    pContext    (IN/OUT)  M4_BitStreamParser context.
- ************************************************************************
-*/
-void M4_BitStreamParser_CleanUp(void* pContext);
-
-/**
- ************************************************************************
- * @brief    Read the next <length> bits in the bitstream.
- * @note    The function does not update the bitstream pointer.
- * @param    pContext    (IN/OUT) M4_BitStreamParser context.
- * @param    length        (IN) The number of bits to extract from the bitstream
- * @return    the read bits
- ************************************************************************
-*/
-M4OSA_UInt32 M4_BitStreamParser_ShowBits(void* pContext, M4OSA_Int32 length);
-
-/**
- ************************************************************************
- * @brief    Increment the bitstream pointer of <length> bits.
- * @param    pContext    (IN/OUT) M4_BitStreamParser context.
- * @param    length        (IN) The number of bit to shift the bitstream
- ************************************************************************
-*/
-void M4_BitStreamParser_FlushBits(void* pContext, M4OSA_Int32 length);
-
-/**
- ************************************************************************
- * @brief    Get a pointer to the current byte pointed by the bitstream pointer.
- * It does not update the bitstream pointer
- *
- * @param pContext   : A pointer to the context internally used by the package
- * @param length        : The number of bit to extract from the bitstream
- *
- * @returns the read bits
-*/
-M4OSA_UInt32 M4_BitStreamParser_GetBits(void* pContext,M4OSA_Int32 bitPos, M4OSA_Int32 length);
-
-/**
-* M4_BitStreamParser_Restart resets the bitstream indexes.
-*
-* @param pContext   : A pointer to the context internally used by the package
-*
-*/
-void M4_BitStreamParser_Restart(void* pContext);
-
-/**
- ************************************************************************
- * @brief    Get a pointer to the current byte pointed by the bitstream pointer.
- * @returns pointer to the current location in the bitstream
- * @note    It should be used carefully as the pointer is in the bitstream itself
- *            and no copy is made.
- * @param    pContext    (IN/OUT)  M4_BitStreamParser context.
-*/
-M4OSA_UInt8*  M4_BitStreamParser_GetCurrentbitStreamPointer(void* pContext);
-
-/**
-* M4_BitStreamParser_GetSize gets the size of the bitstream in bytes
-*
-* @param pContext   : A pointer to the context internally used by the package
-*
-* @returns the size of the bitstream in bytes
-*/
-M4OSA_Int32 M4_BitStreamParser_GetSize(void* pContext);
-
-void M4_MPEG4BitStreamParser_Init(void** pContext, void* pBitStream, M4OSA_Int32 size);
-
-/**
-* getMpegLengthFromInteger returns a decoded size value from an encoded one (SDL)
-*
-* @param pContext   : A pointer to the context internally used by the package
-* @param val : encoded value
-*
-* @returns size in a human readable form
-*/
-
-M4OSA_Int32 M4_MPEG4BitStreamParser_GetMpegLengthFromInteger(void* pContext, M4OSA_UInt32 val);
-
-
-/**
- ************************************************************************
- * @brief    Decode an MPEG4 Systems descriptor size from an encoded SDL size data.
- * @note    The value is read from the current bitstream location.
- * @param    pContext    (IN/OUT)  M4_BitStreamParser context.
- * @return    Size in a human readable form
- ************************************************************************
-*/
-M4OSA_Int32 M4_MPEG4BitStreamParser_GetMpegLengthFromStream(void* pContext);
-
-#endif /*__M4_BITSTREAMPARSER_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4_Common.h b/libvideoeditor/vss/common/inc/M4_Common.h
deleted file mode 100755
index 760a7da..0000000
--- a/libvideoeditor/vss/common/inc/M4_Common.h
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
- * @file   M4_Common.h
- * @brief  Common data structure between shells
- * @note
-*************************************************************************
-*/
-#ifndef __M4_COMMON_H__
-#define __M4_COMMON_H__
-
-#include "M4OSA_Types.h"
-
-/**
- ************************************************************************
- * structure    _parameterSet
- * @brief        This structure defines the structure of parameters for the avc
- *               decoder specific info
- * @note
- ************************************************************************
-*/
-typedef struct _parameterSet
-{
-    M4OSA_UInt16 m_length;                /* Number of items*/
-    M4OSA_UInt8* m_pParameterSetUnit;   /* Array of items*/
-} ParameterSet ;
-
-/**
- ************************************************************************
- * structure    _avcSpecificInfo
- * @brief        This structure defines the structure of specific info for the avc decoder
- * @note
- ************************************************************************
-*/
-typedef struct _avcSpecificInfo
-{
-    M4OSA_UInt8        m_nalUnitLength;                /* length in bytes of the NALUnitLength
-                                                            field in a AVC sample */
-    M4OSA_UInt8        m_numOfSequenceParameterSets;   /* Number of sequence parameter sets*/
-    M4OSA_UInt8        m_numOfPictureParameterSets;    /* Number of picture parameter sets*/
-    ParameterSet    *m_pSequenceParameterSet;        /* Sequence parameter sets array*/
-    ParameterSet    *m_pPictureParameterSet;        /* Picture parameter sets array*/
-} AvcSpecificInfo ;
-
-/**
- ************************************************************************
- * structure    M4_SynthesisAudioInfo
- * @brief        This structure contains specific pointers used for synthesis audio format
- ************************************************************************
-*/
-typedef struct _synthesisAudioInfo
-{
-    M4OSA_Void*        m_pInputBuf;
-    M4OSA_Void*        m_pInputInfo;
-    M4OSA_UInt16    m_uiNbSubFramePerStep;
-    M4OSA_UInt32    m_uiUsedBytes;
-} M4_SynthesisAudioInfo;
-
-
-/*
- ************************************************************************
- * enum     M4_AACDownsamplingMode
- * @brief   This enum states modes for Down sampling
- ************************************************************************
-*/
-typedef enum
-{
-    AAC_kDS_OFF    = 0,        /**< No Down sampling */
-    AAC_kDS_BY_2   = 1,        /**< Down sampling by 2
-                                 Profile = AAC :
-                                            output sampling rate = aac_samp_freq/2
-                                 Profile = HE_AAC and input is AAC:
-                                            Output sampling rate = aac_samp_freq.(No downsamping).
-                                 Profile = HE_AAC and input is HE_AAC:
-                                            Output sampling rate = aac_samp_freq (Downsampling
-                                            occurs in SBR tool).
-                                 case profile = HE_AAC_v2 :
-                                            Not Supported */
-    AAC_kDS_BY_3   = 2,        /**< Down sampling by 3  - only for AAC profile */
-    AAC_kDS_BY_4   = 3,        /**< Down sampling by 4  - only for AAC profile */
-    AAC_kDS_BY_8   = 4        /**< Down sampling by 8  - only for AAC profile */
-
-} M4_AACDownsamplingMode;
-
-
-/*
- ************************************************************************
- * enum     M4_AACOutputMode
- * @brief   This enum defines the output mode
- ************************************************************************
-*/
-typedef enum
-{
-    AAC_kMono      = 0,    /**< Output is Mono  */
-    AAC_kStereo    = 1     /**< Output is Stereo */
-} M4_AACOutputMode;
-
-
-/*
- ************************************************************************
- * enum     M4_AACDecProfile
- * @brief   This enum defines the AAC decoder profile
- ************************************************************************
-*/
-typedef enum
-{
-    AAC_kAAC       = 0,        /**< AAC profile (only AAC LC object are supported) */
-    AAC_kHE_AAC    = 1,        /**< HE AAC or AAC+ profile (SBR in LP Mode)  */
-    AAC_kHE_AAC_v2 = 2        /**< HE AAC v2 or Enhanced AAC+ profile (SBR Tool in HQ Mode) */
-} M4_AACDecProfile;
-
-
-/**
- ************************************************************************
- * structure    M4_AacDecoderConfig
- * @brief        This structure defines specific settings according to
- *                the user requirements
- ************************************************************************
-*/
-typedef struct
-{
-    M4_AACDecProfile        m_AACDecoderProfile;
-    M4_AACDownsamplingMode    m_DownSamplingMode;
-    M4_AACOutputMode        m_OutputMode;
-
-} M4_AacDecoderConfig;
-
-
-/**
- ************************************************************************
- * structure M4READER_AudioSbrUserdata
- * @brief    This structure defines the user's data needed to decode the
- *            AACplus stream
- * @note    The field m_pFirstAU is used in case of local files    and
- *            the field m_bIsSbrEnabled is used in streaming case.
- ************************************************************************
-*/
-typedef struct
-{
-  M4OSA_Void*            m_pFirstAU;                /**< The first AU from where SBR data are
-                                                         extracted (local file case)*/
-  M4OSA_Bool            m_bIsSbrEnabled;        /**< A boolean that indicates if the stream is
-                                                    AACplus (streaming case)*/
-  M4_AacDecoderConfig*    m_pAacDecoderUserConfig;/**< Decoder specific user setting */
-
-} M4READER_AudioSbrUserdata;
-
-#endif /* __M4_COMMON_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4_Utils.h b/libvideoeditor/vss/common/inc/M4_Utils.h
deleted file mode 100755
index a1e0829..0000000
--- a/libvideoeditor/vss/common/inc/M4_Utils.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
- * @file    M4_Utils.h
- * @brief    Utilities
- * @note    This file defines utility macros
-*************************************************************************
-*/
-#ifndef __M4_UTILS_H__
-#define __M4_UTILS_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*    M4_MediaTime definition
-    This type is used internally by some shell components */
-#include "M4OSA_Types.h"
-typedef M4OSA_Double    M4_MediaTime;
-
-/*    GET_MEMORY32 macro definition
-    This macro is used by the 3GP reader*/
-#ifdef __BIG_ENDIAN
-#define GET_MEMORY32(x) (x)
-#else
-#define GET_MEMORY32(x) ( (((x)&0xff)<<24) | (((x)&0xff00)<<8) |\
-     (((x)&0xff0000)>>8) | (((x)&0xff000000)>>24) )
-#endif /*__BIG_ENDIAN*/
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __M4_UTILS_H__*/
-
diff --git a/libvideoeditor/vss/common/inc/M4_VideoEditingCommon.h b/libvideoeditor/vss/common/inc/M4_VideoEditingCommon.h
deleted file mode 100755
index 9e7d03f..0000000
--- a/libvideoeditor/vss/common/inc/M4_VideoEditingCommon.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4_VideoEditingCommon.h
- * @brief    Video Editing (VSS3GPP, MCS, PTO3GPP) common definitions
- * @note
- ******************************************************************************
-*/
-
-#ifndef __M4_VIDEOEDITINGCOMMON_H__
-#define __M4_VIDEOEDITINGCOMMON_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- *    Version */
-/* CHANGE_VERSION_HERE */
-#define M4VIDEOEDITING_VERSION_MAJOR    3
-#define M4VIDEOEDITING_VERSION_MINOR    1
-#define M4VIDEOEDITING_VERSION_REVISION    0
-
-#define M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE 0x7fffffff
-#define M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL 0x7fffffff
-
-/**
- ******************************************************************************
- * enum        M4VIDEOEDITING_FileType
- * @brief    This enum defines the file format type to be used
- ******************************************************************************
-*/
-typedef enum {
-    M4VIDEOEDITING_kFileType_3GPP        = 0,      /**< 3GPP file media type : input & output */
-    M4VIDEOEDITING_kFileType_MP4         = 1,      /**< MP4  file media type : input          */
-    M4VIDEOEDITING_kFileType_AMR         = 2,      /**< AMR  file media type : input & output */
-    M4VIDEOEDITING_kFileType_MP3         = 3,      /**< MP3  file media type : input          */
-    M4VIDEOEDITING_kFileType_PCM         = 4,      /**< PCM RAW file media type : input    RC */
-    M4VIDEOEDITING_kFileType_JPG         = 5,      /**< STILL PICTURE FEATURE: JPG file media
-                                                        type : input AND OUTPUT */
-    M4VIDEOEDITING_kFileType_BMP         = 6,      /**< STILL PICTURE FEATURE: BMP file media
-                                                        type : input only */
-    M4VIDEOEDITING_kFileType_GIF         = 7,      /**< STILL PICTURE FEATURE: GIF file media
-                                                        type : input only */
-    M4VIDEOEDITING_kFileType_PNG         = 8,      /**< STILL PICTURE FEATURE: PNG file media
-                                                        type : input only */
-    M4VIDEOEDITING_kFileType_ARGB8888    = 9,      /**< STILL PICTURE FEATURE: ARGB8888 file
-                                                        media type : input only */
-    M4VIDEOEDITING_kFileType_M4V         = 10,     /**< M4V  file media type : input only     */
-    M4VIDEOEDITING_kFileType_Unsupported = 255     /**< Unsupported file media type           */
-} M4VIDEOEDITING_FileType;
-
-
-/**
- ******************************************************************************
- * enum        M4VIDEOEDITING_VideoFormat
- * @brief    This enum defines the avalaible video compression formats.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4VIDEOEDITING_kNoneVideo = 0, /**< Video not present */
-    M4VIDEOEDITING_kH263 = 1, /**< H263 video */
-    M4VIDEOEDITING_kH264 = 2,    /**< H264 video */
-    M4VIDEOEDITING_kMPEG4 = 3, /**< MPEG-4 video */
-    M4VIDEOEDITING_kNullVideo = 254,  /**< Do not care video type, use NULL encoder */
-    M4VIDEOEDITING_kUnsupportedVideo = 255    /**< Unsupported video stream type */
-} M4VIDEOEDITING_VideoFormat;
-
-/**
- ******************************************************************************
- * enum        M4VIDEOEDITING_AudioFormat
- * @brief    This enum defines the avalaible audio format.
- * @note    HE_AAC, HE_AAC_v2 and MP3 can not be used for the output audio format
- ******************************************************************************
-*/
-typedef enum {
-    M4VIDEOEDITING_kNoneAudio            = 0,    /**< Audio not present */
-    M4VIDEOEDITING_kAMR_NB              = 1,    /**< AMR Narrow Band audio */
-    M4VIDEOEDITING_kAAC                    = 2,    /**< AAC audio */
-    M4VIDEOEDITING_kAACplus                = 3,    /**< AAC+ audio */
-    M4VIDEOEDITING_keAACplus             = 4,    /**< Enhanced AAC+ audio */
-    M4VIDEOEDITING_kMP3                 = 5,    /**< MP3 audio */
-    M4VIDEOEDITING_kEVRC                = 6,    /**< EVRC audio */
-    M4VIDEOEDITING_kPCM                 = 7,    /**< PCM audio */
-    M4VIDEOEDITING_kNullAudio           = 254,  /**< Do not care audio type, use NULL encoder */
-    M4VIDEOEDITING_kUnsupportedAudio    = 255    /**< Unsupported audio stream type */
-} M4VIDEOEDITING_AudioFormat;
-
-/**
- ******************************************************************************
- * enum        M4VIDEOEDITING_VideoFrameSize
- * @brief    This enum defines the available output frame sizes.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4VIDEOEDITING_kSQCIF=0, /**< SQCIF 128x96  */
-    M4VIDEOEDITING_kQQVGA,   /**< QQVGA 160x120 */
-    M4VIDEOEDITING_kQCIF,    /**< QCIF  176x144 */
-    M4VIDEOEDITING_kQVGA,    /**< QVGA  320x240 */
-    M4VIDEOEDITING_kCIF,     /**< CIF   352x288 */
-    M4VIDEOEDITING_kVGA,     /**< VGA   640x480 */
-/* +PR LV5807 */
-    M4VIDEOEDITING_kWVGA,    /**< WVGA 800x480 */
-    M4VIDEOEDITING_kNTSC,    /**< NTSC 720x480 */
-/* -PR LV5807 */
-
-/* +CR Google */
-    M4VIDEOEDITING_k640_360,  /**< 640x360 */
-    M4VIDEOEDITING_k854_480,  /**< 854x480 */
-    M4VIDEOEDITING_k1280_720, /**< 720p 1280x720 */
-    M4VIDEOEDITING_k1080_720, /**< 720p 1080x720 */
-    M4VIDEOEDITING_k960_720,  /**< 720p 960x720 */
-    M4VIDEOEDITING_k1920_1080 /**<1080p 1920x1080*/
-/* -CR Google */
-
-} M4VIDEOEDITING_VideoFrameSize;
-
-
-/**
- ******************************************************************************
- * enum        M4VIDEOEDITING_Videoframerate
- * @brief    This enum defines the available video framerates.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4VIDEOEDITING_k5_FPS = 0,
-    M4VIDEOEDITING_k7_5_FPS,
-    M4VIDEOEDITING_k10_FPS,
-    M4VIDEOEDITING_k12_5_FPS,
-    M4VIDEOEDITING_k15_FPS,
-    M4VIDEOEDITING_k20_FPS,
-    M4VIDEOEDITING_k25_FPS,
-    M4VIDEOEDITING_k30_FPS
-} M4VIDEOEDITING_VideoFramerate;
-
-
-/**
- ******************************************************************************
- * enum        M4VIDEOEDITING_AudioSamplingFrequency
- * @brief    This enum defines the available output audio sampling frequencies
- * @note    8 kHz is the only supported frequency for AMR-NB output
- * @note    16 kHz is the only supported frequency for AAC output
- * @note    The recommended practice is to use the Default value when setting the encoding parameters
- ******************************************************************************
-*/
-typedef enum {
-    M4VIDEOEDITING_kDefault_ASF    = 0,    /**< Default Audio Sampling Frequency for selected
-                                                 Audio output format */
-    M4VIDEOEDITING_k8000_ASF    = 8000,    /**< Note: Default audio Sampling Frequency for
-                                                    AMR-NB output */
-    M4VIDEOEDITING_k11025_ASF    = 11025,
-    M4VIDEOEDITING_k12000_ASF    = 12000,
-    M4VIDEOEDITING_k16000_ASF    = 16000,    /**< Note: Default audio Sampling Frequency
-                                                     for AAC output */
-    M4VIDEOEDITING_k22050_ASF    = 22050,
-    M4VIDEOEDITING_k24000_ASF    = 24000,
-    M4VIDEOEDITING_k32000_ASF    = 32000,
-    M4VIDEOEDITING_k44100_ASF    = 44100,
-    M4VIDEOEDITING_k48000_ASF    = 48000
-
-} M4VIDEOEDITING_AudioSamplingFrequency;
-
-
-/**
- ******************************************************************************
- * enum        M4VIDEOEDITING_Bitrate
- * @brief    This enum defines the available audio or video bitrates.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4VIDEOEDITING_kVARIABLE_KBPS = -1,     /* no regulation */
-    M4VIDEOEDITING_kUndefinedBitrate = 0,   /* undefined */
-    M4VIDEOEDITING_k8_KBPS = 8000,
-    M4VIDEOEDITING_k9_2_KBPS = 9200,        /* evrc only */
-    M4VIDEOEDITING_k12_2_KBPS = 12200,      /* amr only */
-    M4VIDEOEDITING_k16_KBPS = 16000,
-    M4VIDEOEDITING_k24_KBPS = 24000,
-    M4VIDEOEDITING_k32_KBPS = 32000,
-    M4VIDEOEDITING_k40_KBPS = 40000,
-    M4VIDEOEDITING_k48_KBPS = 48000,
-    M4VIDEOEDITING_k56_KBPS = 56000,
-    M4VIDEOEDITING_k64_KBPS = 64000,
-    M4VIDEOEDITING_k80_KBPS = 80000,
-    M4VIDEOEDITING_k96_KBPS = 96000,
-    M4VIDEOEDITING_k112_KBPS = 112000,
-    M4VIDEOEDITING_k128_KBPS = 128000,
-    M4VIDEOEDITING_k160_KBPS = 160000,
-    M4VIDEOEDITING_k192_KBPS = 192000,
-    M4VIDEOEDITING_k224_KBPS = 224000,
-    M4VIDEOEDITING_k256_KBPS = 256000,
-    M4VIDEOEDITING_k288_KBPS = 288000,
-    M4VIDEOEDITING_k320_KBPS = 320000,
-    M4VIDEOEDITING_k384_KBPS = 384000,
-    M4VIDEOEDITING_k512_KBPS = 512000,
-    M4VIDEOEDITING_k800_KBPS = 800000,
-/*+ New Encoder bitrates */
-    M4VIDEOEDITING_k2_MBPS = 2000000,
-    M4VIDEOEDITING_k5_MBPS = 5000000,
-    M4VIDEOEDITING_k8_MBPS = 8000000,
-/*- New Encoder bitrates */
-} M4VIDEOEDITING_Bitrate;
-
-
-/**
- ******************************************************************************
- * structure    M4VIDEOEDITING_FtypBox
- * @brief        Information to build the 'ftyp' atom
- ******************************************************************************
-*/
-#define M4VIDEOEDITING_MAX_COMPATIBLE_BRANDS 10
-typedef struct
-{
-    /* All brand fields are actually char[4] stored in big-endian integer format */
-
-    M4OSA_UInt32    major_brand;           /* generally '3gp4'            */
-    M4OSA_UInt32    minor_version;         /* generally '0000' or 'x.x '  */
-    M4OSA_UInt32    nbCompatibleBrands;    /* number of compatible brands */
-    M4OSA_UInt32    compatible_brands[M4VIDEOEDITING_MAX_COMPATIBLE_BRANDS]; /* array of
-                                                                         max compatible brands */
-
-} M4VIDEOEDITING_FtypBox;
-
-/* Some useful brands */
-#define M4VIDEOEDITING_BRAND_0000  0x00000000
-#define M4VIDEOEDITING_BRAND_3G2A  0x33673261
-#define M4VIDEOEDITING_BRAND_3GP4  0x33677034
-#define M4VIDEOEDITING_BRAND_3GP5  0x33677035
-#define M4VIDEOEDITING_BRAND_3GP6  0x33677036
-#define M4VIDEOEDITING_BRAND_AVC1  0x61766331
-#define M4VIDEOEDITING_BRAND_EMP   0x656D7020
-#define M4VIDEOEDITING_BRAND_ISOM  0x69736F6D
-#define M4VIDEOEDITING_BRAND_MP41  0x6D703431
-#define M4VIDEOEDITING_BRAND_MP42  0x6D703432
-#define M4VIDEOEDITING_BRAND_VFJ1  0x76666A31
-
-/**
- ******************************************************************************
- * enum     M4VIDEOEDITING_ClipProperties
- * @brief   This structure gathers the information related to an input file
- ******************************************************************************
-*/
-typedef struct {
-
-    /**
-     * Common */
-    M4OSA_Bool                          bAnalysed;           /**< Flag to know if the file has
-                                                                  been already analysed or not */
-    M4OSA_UInt8                         Version[3];          /**< Version of the libraries used to
-                                                                  perform the clip analysis */
-    M4OSA_UInt32                        uiClipDuration;      /**< Clip duration (in ms) */
-    M4VIDEOEDITING_FileType             FileType;            /**< .3gp, .amr, .mp3 */
-    M4VIDEOEDITING_FtypBox              ftyp;                /**< 3gp 'ftyp' atom, major_brand =
-                                                                    0 if not used */
-
-    /**
-     * Video */
-    M4VIDEOEDITING_VideoFormat          VideoStreamType;     /**< Format of the video stream */
-    M4OSA_UInt32                        uiClipVideoDuration; /**< Video track duration (in ms) */
-    M4OSA_UInt32                        uiVideoBitrate;      /**< Video average bitrate (in bps)*/
-    M4OSA_UInt32                        uiVideoMaxAuSize;    /**< Maximum Access Unit size of the
-                                                                  video stream */
-    M4OSA_UInt32                        uiVideoWidth;        /**< Video frame width */
-    M4OSA_UInt32                        uiVideoHeight;       /**< Video frame height */
-    M4OSA_UInt32                        uiVideoTimeScale;    /**< Video time scale */
-    M4OSA_Float                         fAverageFrameRate;   /**< Average frame rate of the video
-                                                                  stream */
-    M4OSA_Int32 uiVideoLevel;   /**< video level*/
-    M4OSA_Int32 uiVideoProfile; /**< video profile */
-
-    M4OSA_Bool                          bMPEG4dataPartition; /**< MPEG-4 uses data partitioning */
-    M4OSA_Bool                          bMPEG4rvlc;          /**< MPEG-4 uses RVLC tool */
-    M4OSA_Bool                          bMPEG4resynchMarker; /**< MPEG-4 stream uses Resynch
-                                                                   Marker */
-
-    /**
-     * Audio */
-    M4VIDEOEDITING_AudioFormat          AudioStreamType;     /**< Format of the audio stream */
-    M4OSA_UInt32                        uiClipAudioDuration; /**< Audio track duration (in ms) */
-    M4OSA_UInt32                        uiAudioBitrate;      /**< Audio average bitrate (in bps) */
-    M4OSA_UInt32                        uiAudioMaxAuSize;    /**< Maximum Access Unit size of the
-                                                                    audio stream */
-    M4OSA_UInt32                        uiNbChannels;        /**< Number of channels
-                                                                    (1=mono, 2=stereo) */
-    M4OSA_UInt32                        uiSamplingFrequency; /**< Sampling audio frequency
-                                                           (8000 for amr, 16000 or more for aac) */
-    M4OSA_UInt32                        uiExtendedSamplingFrequency; /**< Extended frequency for
-                                                                         AAC+, eAAC+ streams */
-    M4OSA_UInt32                        uiDecodedPcmSize;    /**< Size of the decoded PCM data */
-
-    /**
-     * Video editing compatibility chart */
-    M4OSA_Bool      bVideoIsEditable;                        /**< Video stream can be decoded and
-                                                                 re-encoded */
-    M4OSA_Bool      bAudioIsEditable;                        /**< Audio stream can be decoded and
-                                                                  re-encoded */
-    M4OSA_Bool      bVideoIsCompatibleWithMasterClip;        /**< Video properties match reference
-                                                                  clip properties */
-    M4OSA_Bool      bAudioIsCompatibleWithMasterClip;        /**< Audio properties match reference
-                                                                   clip properties */
-
-    /**
-     * Still Picture */
-    M4OSA_UInt32                        uiStillPicWidth;        /**< Image width */
-    M4OSA_UInt32                        uiStillPicHeight;       /**< Image height */
-    M4OSA_UInt32                        uiClipAudioVolumePercentage;
-    M4OSA_Bool                          bSetImageData;
-
-    M4OSA_Int32     videoRotationDegrees;        /**< Video rotation degree */
-
-} M4VIDEOEDITING_ClipProperties;
-
-
-#ifdef __cplusplus
-    }
-#endif
-
-#endif /* __M4_VIDEOEDITINGCOMMON_H__ */
-
diff --git a/libvideoeditor/vss/common/inc/MonoTo2I_16.h b/libvideoeditor/vss/common/inc/MonoTo2I_16.h
deleted file mode 100755
index 74b1c8a..0000000
--- a/libvideoeditor/vss/common/inc/MonoTo2I_16.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _MONOTO2I_16_H_
-#define _MONOTO2I_16_H_
-
-
-void MonoTo2I_16(  const short *src,
-                         short *dst,
-                         short n);
-
-/**********************************************************************************/
-
-#endif  /* _MONOTO2I_16_H_ */
-
-/**********************************************************************************/
-
diff --git a/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches_MCS.h b/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches_MCS.h
deleted file mode 100755
index e1b62e1..0000000
--- a/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches_MCS.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef NXPSW_COMPILERSWITCHES_MCS_H
-#define NXPSW_COMPILERSWITCHES_MCS_H
-
-                            /***********/
-                            /* READERS */
-                            /***********/
-
-/* -----  AMR reader support ----- */
-#define M4VSS_SUPPORT_READER_AMR        /**< [default] Support .amr files */
-
-/* ----- 3GPP  reader support ----- */
-#define M4VSS_SUPPORT_READER_3GP        /**< [default] Support .mp4, .3gp files */
-
-
-/* ----- MP3 reader support ----- */
-#define M4VSS_SUPPORT_READER_MP3        /**< [default] Support .mp3 files */
-
-/* ----- RAW reader support ----- */
-#define M4VSS_SUPPORT_READER_PCM        /**< [default] Support .pcm files */
-
-
-                            /************/
-                            /* DECODERS */
-                            /************/
-
-/* -----  AMR NB decoder support ----- */
-#define M4VSS_SUPPORT_AUDEC_AMRNB       /**< [default] Support AMR NB streams */
-
-/* ----- AAC decoder support ----- */
-#define M4VSS_SUPPORT_AUDEC_AAC            /**< [default] Support AAC, AAC+ and eAAC+ streams */
-#define M4VSS_SUPPORT_VIDEC_NULL
-
-/* ----- MP4/H263 video decoder support ----- */
-#define M4VSS_SUPPORT_VIDEC_3GP         /**< [default] Support mpeg4 and H263 decoders */
-
-#ifdef M4VSS_SUPPORT_VIDEC_3GP
-#define GET_DECODER_CONFIG_INFO
-#endif
-
-#define M4VSS_SUPPORT_VIDEO_AVC            /**< [default] Support H264 decoders */
-
-/* ----- MP3 decoder support----- */
-#define M4VSS_SUPPORT_AUDEC_MP3         /**< [default] Support MP3 decoders */
-
-
-/* ----- NULL decoder support----- */
-#define M4VSS_SUPPORT_AUDEC_NULL        /** [default] Support PCM reading */
-
-
-                            /***********/
-                            /* WRITERS */
-                            /***********/
-
-/* ----- 3gp writer ----- */
-#define M4VSS_SUPPORT_WRITER_3GPP       /**< [default] support encapsulating in 3gp format
-                                             {amr,aac} x {mpeg4,h263} */
-
-
-
-
-
-                            /************/
-                            /* ENCODERS */
-                            /************/
-
-/* ----- mpeg4 & h263 encoder ----- */
-#define M4VSS_SUPPORT_ENCODER_MPEG4     /**< [default] support encoding in mpeg4 and
-                                             h263 format {yuv,rgb} */
-
-/* ----- h264 encoder ----- */
-#define M4VSS_SUPPORT_ENCODER_AVC
-
-/* ----- amr encoder ----- */
-#define M4VSS_SUPPORT_ENCODER_AMR  /**< [default] support encoding in amr 12.2 format {amr,wav} */
-
-/* ----- aac encoder ----- */
-#define M4VSS_SUPPORT_ENCODER_AAC       /**< [default] support encoding in aac format {amr,wav} */
-
-
-/* ----- mp3 encoder ----- */
-#define M4VSS_SUPPORT_ENCODER_MP3       /**< [default] support encoding in mp3 format {mp3} */
-
-                            /************/
-                            /* FEATURES */
-                            /************/
-
-/* ----- VSS3GPP & xVSS ----- */
-#define M4VSS_SUPPORT_EXTENDED_FEATURES /**< [default] if defined, implementation is xVSS else
-                                            it is VSS3GPP */
-
-/* ----- SPS ----- */
-#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
-
-//#define M4SPS_GIF_NOT_SUPPORTED  /**< [option] do not support GIF format in still picture api */
-//#define M4SPS_JPEG_NOT_SUPPORTED /**< [option] do not support JPEG format in still picture api */
-//#define M4SPS_PNG_NOT_SUPPORTED  /**< [option] do not support PNG format in still picture api */
-#define M4SPS_WBMP_NOT_SUPPORTED   /**< [option] do not support WBMP format in still picture api */
-#define M4SPS_BGR565_COLOR_OUTPUT  /**< [option] output in still picture api is BGR565
-                                        (default = BGR24) */
-
-#else
-
-#define M4SPS_GIF_NOT_SUPPORTED    /**< [option] do not support GIF format in still picture api */
-//#define M4SPS_JPEG_NOT_SUPPORTED /**< [option] do not support JPEG format in still picture api */
-#define M4SPS_PNG_NOT_SUPPORTED    /**< [option] do not support PNG format in still picture api */
-#define M4SPS_WBMP_NOT_SUPPORTED   /**< [option] do not support WBMP format in still picture api */
-//#define M4SPS_BGR565_COLOR_OUTPUT /**< [option] output in still picture api is BGR565
-//                                          (default = BGR24) */
-
-#endif
-
-#define M4VSS_ENABLE_EXTERNAL_DECODERS
-
-#define M4VSS_SUPPORT_OMX_CODECS
-
-#endif /* NXPSW_COMPILERSWITCHES_MCS_H */
-
diff --git a/libvideoeditor/vss/common/inc/SSRC.h b/libvideoeditor/vss/common/inc/SSRC.h
deleted file mode 100755
index 2b1cfcf..0000000
--- a/libvideoeditor/vss/common/inc/SSRC.h
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/****************************************************************************************/
-/*                                                                                      */
-/*     Project::                                                                        */
-/*     %name:          SSRC.h % */
-/*                                                                                      */
-/****************************************************************************************/
-
-/*
-    The input and output blocks of the SRC are by default blocks of 40 ms.  This means that
-    the following default block sizes are used:
-
-          Fs     Default Block size
-        -----        ----------
-         8000           320
-        11025           441
-        12000           480
-        16000           640
-        22050           882
-        24000           960
-        32000          1280
-        44100          1764
-        48000          1920
-
-    An API is provided to change the default block size into any multiple of the minimal
-    block size.
-
-    All the sampling rates above are supported as input and as output sampling rate
-*/
-
-#ifndef __SSRC_H__
-#define __SSRC_H__
-
-/****************************************************************************************
-   INCLUDES
-*****************************************************************************************/
-
-#include "LVM_Types.h"
-
-/****************************************************************************************
-   DEFINITIONS
-*****************************************************************************************/
-
-#define SSRC_INSTANCE_SIZE          548
-#define SSRC_INSTANCE_ALIGNMENT     4
-#define SSRC_SCRATCH_ALIGNMENT      4
-
-/****************************************************************************************
-   TYPE DEFINITIONS
-*****************************************************************************************/
-
-/* Status return values */
-typedef enum
-{
-    SSRC_OK                     = 0,                /* Successful return from a routine */
-    SSRC_INVALID_FS             = 1,                /* The input or the output sampling rate is
-                                                        invalid */
-    SSRC_INVALID_NR_CHANNELS    = 2,                /* The number of channels is not equal to mono
-                                                         or stereo */
-    SSRC_NULL_POINTER           = 3,                /* One of the input pointers is NULL */
-    SSRC_WRONG_NR_SAMPLES       = 4,                /* Invalid number of samples */
-    SSRC_ALLINGMENT_ERROR       = 5,                /* The instance memory or the scratch memory
-                                                        is not alligned */
-    SSRC_INVALID_MODE           = 6,                /* A wrong value has been used for the mode
-                                                        parameter */
-    SSRC_INVALID_VALUE          = 7,                /* An invalid (out of range) value has been
-                                                     used for one of the parameters */
-    LVXXX_RETURNSTATUS_DUMMY = LVM_MAXENUM
-} SSRC_ReturnStatus_en;
-
-/* Instance memory */
-typedef struct
-{
-    LVM_INT32 Storage [ SSRC_INSTANCE_SIZE/4 ];
-} SSRC_Instance_t;
-
-/* Scratch memory */
-typedef LVM_INT32 SSRC_Scratch_t;
-
-/* Nuber of samples mode */
-typedef enum
-{
-    SSRC_NR_SAMPLES_DEFAULT     = 0,
-    SSRC_NR_SAMPLES_MIN         = 1,
-    SSRC_NR_SAMPLES_DUMMY       = LVM_MAXENUM
-} SSRC_NR_SAMPLES_MODE_en;
-
-/* Instance parameters */
-typedef struct
-{
-    LVM_Fs_en           SSRC_Fs_In;
-    LVM_Fs_en           SSRC_Fs_Out;
-    LVM_Format_en       SSRC_NrOfChannels;
-    LVM_INT16           NrSamplesIn;
-    LVM_INT16           NrSamplesOut;
-} SSRC_Params_t;
-
-
-/****************************************************************************************
-   FUNCTION PROTOTYPES
-*****************************************************************************************/
-
-
-/****************************************************************************************/
-/*                                                                                      */
-/* FUNCTION:                SSRC_GetNrSamples                                           */
-/*                                                                                      */
-/* DESCRIPTION:                                                                         */
-/*  This function retrieves the number of samples (or sample pairs for stereo) to be    */
-/*  used as input and as output of the SSRC module.                                     */
-/*                                                                                      */
-/* PARAMETERS:                                                                          */
-/*  Mode                    There are two modes:                                        */
-/*                              - SSRC_NR_SAMPELS_DEFAULT.  In this mode, the function  */
-/*                                will return the number of samples for 40 ms blocks    */
-/*                              - SSRC_NR_SAMPELS_MIN will return the minimal number    */
-/*                                of samples that is supported for this conversion      */
-/*                                ratio.  Each integer multiple of this ratio will      */
-/*                                be accepted by the SSRC_Init function                 */
-/*                                                                                      */
-/*  pSSRC_Params            pointer to the instance parameters                          */
-/*                                                                                      */
-/* RETURNS:                                                                             */
-/*  SSRC_OK                 Succeeded                                                   */
-/*  SSRC_INVALID_FS         When the requested input or output sampling rates           */
-/*                          are invalid.                                                */
-/*  SSRC_INVALID_NR_CHANNELS When the channel format is not equal to LVM_MONO           */
-/*                          or LVM_STEREO                                               */
-/*  SSRC_NULL_POINTER       When pSSRC_Params is a NULL pointer                         */
-/*  SSRC_INVALID_MODE       When Mode is not a valid setting                            */
-/*                                                                                      */
-/*                                                                                      */
-/* NOTES:                                                                               */
-/*                                                                                      */
-/****************************************************************************************/
-
-SSRC_ReturnStatus_en SSRC_GetNrSamples( SSRC_NR_SAMPLES_MODE_en  Mode,
-                                        SSRC_Params_t*           pSSRC_Params );
-
-
-/****************************************************************************************/
-/*                                                                                      */
-/* FUNCTION:                SSRC_GetScratchSize                                         */
-/*                                                                                      */
-/* DESCRIPTION:                                                                         */
-/*  This function retrieves the scratch size for a given conversion ratio and           */
-/*  for given buffer sizes at the input and at the output                               */
-/*                                                                                      */
-/* PARAMETERS:                                                                          */
-/*  pSSRC_Params            pointer to the instance parameters                          */
-/*  pScratchSize            pointer to the scratch size.  The SSRC_GetScratchSize       */
-/*                          function will fill in the correct value (in bytes).         */
-/*                                                                                      */
-/* RETURNS:                                                                             */
-/*  SSRC_OK                 when the function call succeeds                             */
-/*  SSRC_INVALID_FS         When the requested input or output sampling rates           */
-/*                          are invalid.                                                */
-/*  SSRC_INVALID_NR_CHANNELS When the channel format is not equal to LVM_MONO           */
-/*                          or LVM_STEREO                                               */
-/*  SSRC_NULL_POINTER       When any of the input pointers is a NULL pointer            */
-/*  SSRC_WRONG_NR_SAMPLES   When the number of samples on the input or on the output    */
-/*                          are incorrect                                               */
-/*                                                                                      */
-/* NOTES:                                                                               */
-/*                                                                                      */
-/****************************************************************************************/
-
-SSRC_ReturnStatus_en SSRC_GetScratchSize(   SSRC_Params_t*    pSSRC_Params,
-                                            LVM_INT32*        pScratchSize );
-
-
-/****************************************************************************************/
-/*                                                                                      */
-/* FUNCTION:                SSRC_Init                                                   */
-/*                                                                                      */
-/* DESCRIPTION:                                                                         */
-/*  This function is used to initialize the SSRC module instance.                       */
-/*                                                                                      */
-/* PARAMETERS:                                                                          */
-/*  pSSRC_Instance          Instance pointer                                            */
-/*                                                                                      */
-/*  pSSRC_Scratch           pointer to the scratch memory                               */
-/*  pSSRC_Params            pointer to the instance parameters                          */
-/*  pInputInScratch,        pointer to a location in the scratch memory that can be     */
-/*                          used to store the input samples (e.g. to save memory)       */
-/*  pOutputInScratch        pointer to a location in the scratch memory that can be     */
-/*                          used to store the output samples (e.g. to save memory)      */
-/*                                                                                      */
-/* RETURNS:                                                                             */
-/*  SSRC_OK                 Succeeded                                                   */
-/*  SSRC_INVALID_FS         When the requested input or output sampling rates           */
-/*                          are invalid.                                                */
-/*  SSRC_INVALID_NR_CHANNELS When the channel format is not equal to LVM_MONO           */
-/*                          or LVM_STEREO                                               */
-/*  SSRC_WRONG_NR_SAMPLES   When the number of samples on the input or the output       */
-/*                          are incorrect                                               */
-/*  SSRC_NULL_POINTER       When any of the input pointers is a NULL pointer            */
-/*  SSRC_ALLINGMENT_ERROR   When the instance memory or the scratch memory is not       */
-/*                          4 bytes alligned                                            */
-/*                                                                                      */
-/* NOTES:                                                                               */
-/*  1. The init function will clear the internal state                                  */
-/*                                                                                      */
-/****************************************************************************************/
-
-SSRC_ReturnStatus_en SSRC_Init( SSRC_Instance_t* pSSRC_Instance,
-                                SSRC_Scratch_t*  pSSRC_Scratch,
-                                SSRC_Params_t*   pSSRC_Params,
-                                LVM_INT16**      ppInputInScratch,
-                                LVM_INT16**      ppOutputInScratch);
-
-
-/****************************************************************************************/
-/*                                                                                      */
-/* FUNCTION:                SSRC_SetGains                                               */
-/*                                                                                      */
-/* DESCRIPTION:                                                                         */
-/*  This function sets headroom gain and the post gain of the SSRC                      */
-/*                                                                                      */
-/* PARAMETERS:                                                                          */
-/*  bHeadroomGainEnabled    parameter to enable or disable the headroom gain of the     */
-/*                          SSRC.  The default value is LVM_MODE_ON.  LVM_MODE_OFF      */
-/*                          can be used in case it can be guaranteed that the input     */
-/*                          level is below -6dB in all cases (the default headroom      */
-/*                          is -6 dB)                                                   */
-/*                                                                                      */
-/*  bOutputGainEnabled      parameter to enable or disable the output gain.  The        */
-/*                          default value is LVM_MODE_ON                                */
-/*                                                                                      */
-/*  OutputGain              the value of the output gain.  The output gain is a linear  */
-/*                          gain value. 0x7FFF is equal to +6 dB and 0x0000 corresponds */
-/*                          to -inf dB.  By default, a 3dB gain is applied, resulting   */
-/*                          in an overall gain of -3dB (-6dB headroom + 3dB output gain)*/
-/*                                                                                      */
-/* RETURNS:                                                                             */
-/*  SSRC_OK                 Succeeded                                                   */
-/*  SSRC_NULL_POINTER       When pSSRC_Instance is a NULL pointer                       */
-/*  SSRC_INVALID_MODE       Wrong value used for the bHeadroomGainEnabled or the        */
-/*                          bOutputGainEnabled parameters.                              */
-/*  SSRC_INVALID_VALUE      When OutputGain is out to the range [0;32767]               */
-/*                                                                                      */
-/* NOTES:                                                                               */
-/*  1. The SSRC_SetGains function is an optional function that should only be used      */
-/*     in rare cases.  Preferably, use the default settings.                            */
-/*                                                                                      */
-/****************************************************************************************/
-
-SSRC_ReturnStatus_en SSRC_SetGains( SSRC_Instance_t* pSSRC_Instance,
-                                    LVM_Mode_en      bHeadroomGainEnabled,
-                                    LVM_Mode_en      bOutputGainEnabled,
-                                    LVM_INT16        OutputGain );
-
-
-/****************************************************************************************/
-/*                                                                                      */
-/* FUNCTION:                SSRC_Process                                                */
-/*                                                                                      */
-/* DESCRIPTION:                                                                         */
-/*  Process function for the SSRC module.                                               */
-/*                                                                                      */
-/* PARAMETERS:                                                                          */
-/*  pSSRC_Instance          Instance pointer                                            */
-/*  pSSRC_AudioIn           Pointer to the input data                                   */
-/*  pSSRC_AudioOut          Pointer to the output data                                  */
-/*                                                                                      */
-/* RETURNS:                                                                             */
-/* SSRC_OK                  Succeeded                                                   */
-/* SSRC_NULL_POINTER        When one of pSSRC_Instance, pSSRC_AudioIn or pSSRC_AudioOut */
-/*                          is NULL                                                     */
-/*                                                                                      */
-/* NOTES:                                                                               */
-/*                                                                                      */
-/****************************************************************************************/
-
-SSRC_ReturnStatus_en SSRC_Process(  SSRC_Instance_t* pSSRC_Instance,
-                                    LVM_INT16*       pSSRC_AudioIn,
-                                    LVM_INT16*       pSSRC_AudioOut);
-
-/****************************************************************************************/
-
-#endif /* __SSRC_H__ */
diff --git a/libvideoeditor/vss/common/inc/VideoEditorResampler.h b/libvideoeditor/vss/common/inc/VideoEditorResampler.h
deleted file mode 100755
index b8497d3..0000000
--- a/libvideoeditor/vss/common/inc/VideoEditorResampler.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VIDEOEDITORRESAMPLER_H
-#define VIDEOEDITORRESAMPLER_H
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#include "M4OSA_Types.h"
-
-M4OSA_Context LVAudioResamplerCreate(M4OSA_Int32 bitDepth, M4OSA_Int32 inChannelCount,
-                                     M4OSA_Int32 sampleRate, M4OSA_Int32 quality);
-void LVAudiosetSampleRate(M4OSA_Context resamplerContext,M4OSA_Int32 inSampleRate);
-void LVAudiosetVolume(M4OSA_Context resamplerContext, M4OSA_Int16 left, M4OSA_Int16 right) ;
-void LVAudioresample_LowQuality(M4OSA_Int16* out, M4OSA_Int16* input,
-                                     M4OSA_Int32 outFrameCount, M4OSA_Context resamplerContext);
-void LVDestroy(M4OSA_Context resamplerContext);
-
-void MonoTo2I_16( const M4OSA_Int16 *src,
-                        M4OSA_Int16 *dst,
-                        M4OSA_Int16 n);
-
-void From2iToMono_16( const M4OSA_Int16 *src,
-                            M4OSA_Int16 *dst,
-                            M4OSA_Int16 n);
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-#endif /* VIDEOEDITORRESAMPLER_H */
-
-
diff --git a/libvideoeditor/vss/inc/M4EXIFC_CommonAPI.h b/libvideoeditor/vss/inc/M4EXIFC_CommonAPI.h
deleted file mode 100755
index e23c02a..0000000
--- a/libvideoeditor/vss/inc/M4EXIFC_CommonAPI.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4EXIFC_CommonAPI.h
- * @brief    EXIF common data header
- * @note    The types, structures and macros defined in this file allow reading
- *            and writing EXIF JPEG images compliant spec EXIF 2.2
- ******************************************************************************
-*/
-
-
-#ifndef __M4_EXIF_COMMON_API_H__
-#define __M4_EXIF_COMMON_API_H__
-
-#include "M4TOOL_VersionInfo.h"
-#include "M4Common_types.h"
-#include "M4OSA_Debug.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Types.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_CoreID.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- ************************************************************************
- * type M4EXIFC_Context
- ************************************************************************
-*/
-typedef M4OSA_Void*    M4EXIFC_Context;
-
-/**
- ******************************************************************************
- * Errors & Warnings
- ******************************************************************************
-*/
-
-#define M4EXIFC_NO_ERR              0x00000000    /**< invalid parameter */
-#define M4EXIFC_ERR_PARAMETER       0x00000001    /**< invalid parameter */
-#define M4EXIFC_ERR_ALLOC           0x00000002    /**< allocation error */
-#define M4EXIFC_ERR_BAD_CONTEXT     0x00000003    /**< invalid context */
-#define M4EXIFC_ERR_NOT_COMPLIANT   0x00000004    /**< the image in buffer is not
-                                                       JPEG compliant */
-#define M4EXIFC_ERR_NO_APP_FOUND    0x00000005    /**< the JPEG image does not contain any APP1
-                                                        Exif 2.2 compliant */
-#define M4EXIFC_WAR_NO_THUMBNAIL    0x00000006    /**< the Exif part does not contain any
-                                                        thumbnail */
-#define M4EXIFC_ERR_APP_TRUNCATED   0x00000007    /**< The APP1 section in input buffer is
-                                                        not complete */
-
-
-/**
- ******************************************************************************
- * structure    M4EXIFC_BasicTags
- * @brief        This structure stores the basic tags values.
- * @note        This Exif reader focuses on a set of "Entry Tags".
- *                This structure contains the corresponding "Entry Values" of these tags.
- *                M4EXIFC_Char* fields of structure are Null terminated Strings.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_Int32        width;                /**< image width in pixels */
-    M4OSA_Int32        height;               /**< image height in pixels */
-    M4OSA_Char        *creationDateTime;     /**< date and time original image was generated */
-    M4OSA_Char        *lastChangeDateTime;   /**< file change date and time */
-    M4OSA_Char        *description;          /**< image title */
-    M4OSA_Char        *make;                 /**< manufacturer of image input equipment */
-    M4OSA_Char        *model;                /**< model of image input equipment */
-    M4OSA_Char        *software;             /**< software used */
-    M4OSA_Char        *artist;               /**< person who created the image */
-    M4OSA_Char        *copyright;            /**< copyright holder */
-    M4COMMON_Orientation orientation;        /**< orientation of image */
-    M4OSA_Int32        thumbnailSize;        /**< size of the thumbnail */
-    M4OSA_UInt8        *thumbnailImg;        /**< pointer to the thumbnail in main image buffer*/
-    M4OSA_Char        *latitudeRef;          /**< latitude reference */
-    M4COMMON_Location latitude;              /**< latitude */
-    M4OSA_Char        *longitudeRef;         /**< longitude reference */
-    M4COMMON_Location longitude;             /**< longitude */
-
-} M4EXIFC_BasicTags;
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4EXIFC_getVersion    (M4_VersionInfo *pVersion)
- * @brief    get the version numbers of the exif library.
- * @note    This function retrieves the version numbers in a structure.
- * @param    pVersion:    (OUT)        the structure containing version numbers
- * @return    M4NO_ERROR:                there is no error
- * @return    M4EXIFC_ERR_PARAMETER:        (Debug only) the parameter is M4EXIFC_NULL.
- ******************************************************************************
-*/
-M4OSA_ERR M4EXIFC_getVersion (M4_VersionInfo *pVersion);
-
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus*/
-#endif /* __M4_EXIF_COMMON_API_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4PTO3GPP_API.h b/libvideoeditor/vss/inc/M4PTO3GPP_API.h
deleted file mode 100755
index 4aa20d2..0000000
--- a/libvideoeditor/vss/inc/M4PTO3GPP_API.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4PTO3GPP_API.h
- * @brief    The Pictures to 3GPP Converter.
- * @note    M4PTO3GPP produces 3GPP compliant audio/video  files
- *            from an AMR NB audio file and raw pictures into a MPEG-4/h263 3GPP file.
- ******************************************************************************
- */
-
-#ifndef __M4PTO3GPP_API_H__
-#define __M4PTO3GPP_API_H__
-
-/**
- *    OSAL basic types and errors */
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-
-/**
- *    OSAL types for file access */
-#include "M4OSA_FileReader.h"
-#include "M4OSA_FileWriter.h"
-
-/**
- *    Definition of M4_VersionInfo */
-#include "M4TOOL_VersionInfo.h"
-
-/**
- * Definitions of M4VIFI_ImagePlane */
-#include "M4VIFI_FiltersAPI.h"
-
-/**
- * Common definitions of video editing components */
-#include "M4_VideoEditingCommon.h"
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- *    Public type of the M4PTO3GPP context */
-typedef M4OSA_Void* M4PTO3GPP_Context;
-
-
-/**
- ******************************************************************************
- * enum        M4PTO3GPP_ReplaceAudioMode
- * @brief    This enumeration defines the way the audio is managed if it is shorter than the video
- ******************************************************************************
- */
-typedef enum
-{
-    M4PTO3GPP_kAudioPaddingMode_None = 0,  /**< Audio track is kept shorter than the video track*/
-    M4PTO3GPP_kAudioPaddingMode_Silence,   /**< If audio is shorter, silence is added at the end*/
-    M4PTO3GPP_kAudioPaddingMode_Loop       /**< If audio is shorter, loop back to the beginning
-                                                when the whole track has been processed */
-} M4PTO3GPP_AudioPaddingMode;
-
-
-/**
- ******************************************************************************
- * struct    M4PTO3GPP_OutputFileMaxSize
- * @brief    Defines the maximum size of the 3GPP file produced by the PTO3GPP
- ******************************************************************************
- */
-typedef enum
-{
-    M4PTO3GPP_k50_KB,            /**< Output 3GPP file size is limited to 50 Kbytes  */
-    M4PTO3GPP_k75_KB,            /**< Output 3GPP file size is limited to 75 Kbytes  */
-    M4PTO3GPP_k100_KB,           /**< Output 3GPP file size is limited to 100 Kbytes */
-    M4PTO3GPP_k150_KB,           /**< Output 3GPP file size is limited to 150 Kbytes */
-    M4PTO3GPP_k200_KB,           /**< Output 3GPP file size is limited to 200 Kbytes */
-    M4PTO3GPP_k300_KB,           /**< Output 3GPP file size is limited to 300 Kbytes */
-    M4PTO3GPP_k400_KB,           /**< Output 3GPP file size is limited to 400 Kbytes */
-    M4PTO3GPP_k500_KB,           /**< Output 3GPP file size is limited to 500 Kbytes */
-    M4PTO3GPP_kUNLIMITED=-1      /**< Output 3GPP file size is not limited           */
-} M4PTO3GPP_OutputFileMaxSize;
-
-/**
- ******************************************************************************
- * M4OSA_ERR (M4PTO3GPP_PictureCallbackFct) (M4OSA_Void* pPictureCtxt,
- * M4VIFI_ImagePlane* pImagePlanes, M4OSA_Double* pPictureDuration);
- * @brief    The integrator must implement a function following this prototype.
- *            Its goal is to feed the PTO3GPP with YUV420 pictures.
- *
- * @note    This function is given to the PTO3GPP in the M4PTO3GPP_Params structure
- * @param    pContext    (IN) The integrator own context
- * @param    pImagePlanes(IN/OUT) Pointer to an array of three valid image planes
- * @param    pPictureDuration(OUT) Duration of the returned picture
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null (bebug only)
- ******************************************************************************
- */
-typedef M4OSA_ERR (M4PTO3GPP_PictureCallbackFct) (M4OSA_Void* pPictureCtxt,
-                                                  M4VIFI_ImagePlane* pImagePlanes,
-                                                  M4OSA_Double* pPictureDuration);
-
-
-/**
- ******************************************************************************
- * struct    M4PTO3GPP_Params
- * @brief    M4PTO3GPP parameters definition
- ******************************************************************************
- */
-typedef struct
-{
-    /**< Output video compression format, H263 or MPEG4 */
-    M4VIDEOEDITING_VideoFormat      OutputVideoFormat;
-    /**< Output frame size : SQCIF to VGA*/
-    M4VIDEOEDITING_VideoFrameSize   OutputVideoFrameSize;
-    /**< Targeted Output bit-rate, see enum*/
-    M4VIDEOEDITING_Bitrate          OutputVideoBitrate;
-    /**< Maximum size of the output 3GPP file, see enum */
-    M4PTO3GPP_OutputFileMaxSize     OutputFileMaxSize;
-    /**< Callback function to be called by the PTO3GPP to get the input pictures*/
-    M4PTO3GPP_PictureCallbackFct*   pPictureCallbackFct;
-    /**< Context to be given as third argument of the picture callback function call*/
-    M4OSA_Void*                     pPictureCallbackCtxt;
-    /**< File descriptor of the input audio track file */
-    M4OSA_Void*                     pInputAudioTrackFile;
-    /**< Format of the audio file */
-    M4VIDEOEDITING_FileType         AudioFileFormat;
-    /**< Type of processing to apply when audio is shorter than video*/
-    M4PTO3GPP_AudioPaddingMode      AudioPaddingMode;
-    /**< File descriptor of the output 3GPP file */
-    M4OSA_Void*                     pOutput3gppFile;
-     /**< File descriptor of the temporary file to store metadata ("moov.bin") */
-    M4OSA_Void*                     pTemporaryFile;
-    /**< Number of input YUV frames to encode */
-    M4OSA_UInt32                    NbVideoFrames;
-    M4OSA_Int32   videoProfile;
-    M4OSA_Int32   videoLevel;
-} M4PTO3GPP_Params;
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
- * @brief    Get the M4PTO3GPP version.
- * @note    Can be called anytime. Do not need any context.
- * @param    pVersionInfo        (OUT) Pointer to a version info structure
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
- */
-M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_Init(M4PTO3GPP_Context* pContext);
- * @brief    Initializes the M4PTO3GPP (allocates an execution context).
- * @note
- * @param    pContext            (OUT) Pointer on the M4PTO3GPP context to allocate
- * @param   pFileReadPtrFct     (IN) Pointer to OSAL file reader functions
- * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL
- * @return    M4ERR_ALLOC:        The context structure could not be allocated
- ******************************************************************************
- */
-M4OSA_ERR M4PTO3GPP_Init(M4PTO3GPP_Context* pContext, M4OSA_FileReadPointer* pFileReadPtrFct,
-                         M4OSA_FileWriterPointer* pFileWritePtrFct);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams);
- * @brief    Set the M4PTO3GPP input and output files.
- * @note    It opens the input file, but the output file may not be created yet.
- * @param    pContext            (IN) M4PTO3GPP context
- * @param    pParams                (IN) Pointer to the parameters for the PTO3GPP.
- * @note    The pointed structure can be de-allocated after this function returns because
- *            it is internally copied by the PTO3GPP
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        M4PTO3GPP is not in an appropriate state
- *                                for this function to be called
- * @return    M4ERR_ALLOC:        There is no more available memory
- * @return    ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263 The output video frame
- *                                size parameter is incompatible with H263 encoding
- * @return    ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT
- *                          The output video format  parameter is undefined
- * @return    ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE
- *                        The output video bit-rate parameter is undefined
- * @return    ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE
- *                        The output video frame size parameter is undefined
- * @return    ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE
- *                          The output file size parameter is undefined
- * @return    ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING
- *                        The output audio padding parameter is undefined
- ******************************************************************************
- */
-M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext);
- * @brief    Perform one step of trancoding.
- * @note
- * @param    pContext            (IN) M4PTO3GPP context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL
- * @return    M4ERR_STATE:        M4PTO3GPP is not in an appropriate state
- *                                for this function to be called
- * @return    M4PTO3GPP_WAR_END_OF_PROCESSING:    Encoding completed
- ******************************************************************************
- */
-M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext);
- * @brief    Finish the M4PTO3GPP transcoding.
- * @note    The output 3GPP file is ready to be played after this call
- * @param    pContext            (IN) M4PTO3GPP context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL
- * @return    M4ERR_STATE:        M4PTO3GPP is not in an appropriate state
- *                                for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext);
- * @brief    Free all resources used by the M4PTO3GPP.
- * @note    The context is no more valid after this call
- * @param    pContext            (IN) M4PTO3GPP context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL
- ******************************************************************************
- */
-M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext);
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* __M4PTO3GPP_API_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4PTO3GPP_ErrorCodes.h b/libvideoeditor/vss/inc/M4PTO3GPP_ErrorCodes.h
deleted file mode 100755
index 57bd54f..0000000
--- a/libvideoeditor/vss/inc/M4PTO3GPP_ErrorCodes.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4PTO3GPP_ErrorCodes.h
- * @brief    Picture to 3gpp Service error definitions.
- * @note
- ******************************************************************************
- */
-
-#ifndef __M4PTO3GPP_ErrorCodes_H__
-#define __M4PTO3GPP_ErrorCodes_H__
-
-/**
- *    OSAL basic types and errors */
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-
-/**
- *    OSAL core ID definitions */
-#include "M4OSA_CoreID.h"
-
-
-/**
- *    The output video format parameter is undefined */
-#define ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0001 )
-/**
- *    The output video frame size parameter is undefined */
-#define ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE        \
-    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0002 )
-/**
- *    The output video bit-rate parameter is undefined */
-#define ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE           \
-    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0003 )
-/**
- *    The output video frame size parameter is incompatible with H263 encoding */
-#define ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263        \
-    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0004 )
-/**
- *    The file size is undefined */
-#define ERR_PTO3GPP_INVALID_FILE_SIZE                M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0005 )
-/**
- * The input audio file contains a track format not handled by PTO3GPP */
-#define ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE         \
-    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0006 )
-/**
- *    The output video format parameter is undefined */
-#define ERR_PTO3GPP_UNDEFINED_OUTPUT_AUDIO_FORMAT    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0007 )
-
-/**
- *    The AMR decoder initialization failed */
-#define ERR_PTO3GPP_AMR_DECODER_INIT_ERROR           M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0020 )
-/**
- *    The AMR decoder failed */
-#define ERR_PTO3GPP_AMR_DECODE_ERROR                 M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0021 )
-/**
- *    The AMR decoder cleanup failed */
-#define ERR_PTO3GPP_AMR_DECODER_DESTROY_ERROR        M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0022 )
-
-/**
- *    The video encoder initialization failed */
-#define ERR_PTO3GPP_VIDEO_ENCODER_INIT_ERROR         M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0023 )
-/**
- *    The video encoder decoding failed */
-#define ERR_PTO3GPP_VIDEO_ENCODE_ERROR               M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0024 )
-/**
- *    The video encoder cleanup failed */
-#define ERR_PTO3GPP_VIDEO_ENCODER_DESTROY_ERROR      M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0025 )
-
-/**
- *    The output file size parameter is undefined */
-#define ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE       M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0026 )
-
-/**
- *    The Encoding is completed */
-#define M4PTO3GPP_WAR_END_OF_PROCESSING              M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0027 )
-
-/**
- *    The Encoding is completed */
-#define M4PTO3GPP_WAR_LAST_PICTURE                   M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0028 )
-
-/**
- *    The output audio padding parameter is undefined */
-#define ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING          M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0029 )
-
-/**
- * The video encoder encountered an Acces Unit error: very probably a file write error */
-#define ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR         M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x002A )
-
-#endif /* __M4PTO3GPP_ErrorCodes_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4PTO3GPP_InternalTypes.h b/libvideoeditor/vss/inc/M4PTO3GPP_InternalTypes.h
deleted file mode 100755
index 592e566..0000000
--- a/libvideoeditor/vss/inc/M4PTO3GPP_InternalTypes.h
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4PTO3GPP_InternalTypes.h
- * @brief    Picture to 3gpp Service internal definitions
- * @note    This file contains all enum and types not visible to the external world.
- ******************************************************************************
- */
-
-
-#ifndef __M4PTO3GPP_INTERNALTYPES_H__
-#define __M4PTO3GPP_INTERNALTYPES_H__
-
-#define M4PTO3GPP_VERSION_MAJOR        3
-#define M4PTO3GPP_VERSION_MINOR        0
-#define M4PTO3GPP_VERSION_REVISION    6
-
-/**
- *    M4PTO3GPP public API and types */
-#include "M4PTO3GPP_API.h"
-#include "M4_Utils.h"
-
-/**
- *    Internally used modules */
-
-#include "M4WRITER_common.h"    /* Write 3GPP file    */
-#include "M4READER_Common.h"    /* Read AMR file    */
-#include "M4ENCODER_common.h"
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- ******************************************************************************
- * enum            M4PTO3GPP_States
- * @brief        Main state machine of the M4PTO3GPP.
- ******************************************************************************
- */
-typedef enum
-{
-    M4PTO3GPP_kState_CREATED         = 0,    /**< M4PTO3GPP_Init has been called */
-    M4PTO3GPP_kState_OPENED          = 1,    /**< M4PTO3GPP_Open has been called */
-    M4PTO3GPP_kState_READY           = 2,    /**< Step can be called */
-    M4PTO3GPP_kState_FINISHED        = 3,    /**< Transcoding is finished */
-    M4PTO3GPP_kState_CLOSED          = 4     /**< Output file has been created */
-}
-M4PTO3GPP_States;
-
-/**
- ******************************************************************************
- * enum            M4PTO3GPP_StreamState
- * @brief        State of a media stream encoding (audio or video).
- ******************************************************************************
- */
-typedef enum
-{
-    M4PTO3GPP_kStreamState_NOSTREAM  = 0,    /**< No stream present */
-    M4PTO3GPP_kStreamState_STARTED   = 1,    /**< The stream encoding is in progress */
-    M4PTO3GPP_kStreamState_FINISHED  = 2    /**< The stream has finished encoding */
-}
-M4PTO3GPP_StreamState;
-
-/*
- * Definition of max AU size */
-#define M4PTO3GPP_VIDEO_MIN_COMPRESSION_RATIO     0.8F    /**< Max AU size will be 0.8 times the
-                                                               YUV4:2:0 frame size */
-#define M4PTO3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO    1.2F /**< Max chunk size will be 1.2 times
-                                                                  the max AU size */
-#define M4PTO3GPP_AUDIO_MAX_AU_SIZE              1000    /**< AAC max AU size seems to be
-                                                              about 850 bytes */
-#define M4PTO3GPP_AUDIO_MAX_CHUNK_SIZE           5000
-
-/**
- ******************************************************************************
- * enum            anonymous enum
- * @brief        enum to keep track of the encoder state
- ******************************************************************************
- */
-enum
-{
-    M4PTO3GPP_kNoEncoder,
-    M4PTO3GPP_kEncoderClosed,
-    M4PTO3GPP_kEncoderStopped,
-    M4PTO3GPP_kEncoderRunning
-};
-
-/**
- ******************************************************************************
- * structure    M4PTO3GPP_InternalContext
- * @brief        This structure defines the M4PTO3GPP context (private)
- * @note        This structure is used for all M4PTO3GPP calls to store the context
- ******************************************************************************
- */
-typedef struct
-{
-    /**
-     *    M4PTO3GPP main variables */
-    M4PTO3GPP_States             m_State;            /**< M4PTO3GPP internal state */
-    M4PTO3GPP_Params             m_Params;           /**< M4PTO3GPP parameters, set by the user */
-    M4PTO3GPP_StreamState        m_VideoState;       /**< State of the video encoding */
-    M4PTO3GPP_StreamState        m_AudioState;       /**< State of the audio encoding */
-
-    /**
-     *    OSAL file read/write functions */
-    M4OSA_FileReadPointer*        pOsalFileRead;     /**< OSAL file read functions,
-                                                           to be provided by user */
-    M4OSA_FileWriterPointer*      pOsalFileWrite;    /**< OSAL file write functions,
-                                                          to be provided by user */
-
-    /**
-     *    Reader stuff */
-    M4_AccessUnit*                m_pReaderAudioAU;    /**< Read audio access unit */
-    M4_AudioStreamHandler*        m_pReaderAudioStream;/**< Description of the read audio stream */
-
-    /**
-     *    Writer stuff */
-    M4SYS_AccessUnit            m_WriterVideoAU;       /**< Written video access unit */
-    M4SYS_AccessUnit            m_WriterAudioAU;       /**< Written audio access unit */
-    M4ENCODER_Header*           m_pEncoderHeader;      /**< Sequence header returned by the
-                                                            encoder at encoder create (if any) */
-    M4SYS_StreamDescription*    m_pWriterVideoStream;  /**< Description of the written
-                                                             video stream */
-    M4SYS_StreamDescription*    m_pWriterAudioStream;  /**< Description of the written
-                                                             audio stream */
-    M4WRITER_StreamVideoInfos*  m_pWriterVideoStreamInfo;    /**< Video properties of the written
-                                                               video stream */
-    M4WRITER_StreamAudioInfos*    m_pWriterAudioStreamInfo;   /**< Audio properties of the written
-                                                               audio stream */
-
-    /**
-     *    Contexts of the used modules  */
-    M4OSA_Void*                    m_pAudioReaderContext; /**< Context of the audio reader module*/
-    M4OSA_Void*                    m_p3gpWriterContext;   /**< Context of the 3GP writer module */
-    M4OSA_Void*                    m_pMp4EncoderContext;  /**< Mp4 encoder context */
-    M4OSA_UInt32                   m_eEncoderState;
-
-    /**
-     * Reader Interfaces */
-    M4READER_GlobalInterface*    m_pReaderGlobInt;    /**< Reader common interface, global part */
-    M4READER_DataInterface*      m_pReaderDataInt;     /**< Reader common interface, data part */
-
-    /**
-     * Writer Interfaces */
-    M4WRITER_GlobalInterface*   m_pWriterGlobInt;     /**< Writer common interface, global part */
-    M4WRITER_DataInterface*     m_pWriterDataInt;     /**< Writer common interface, data part */
-
-    /**
-     * Encoder Interfaces */
-    M4ENCODER_GlobalInterface*  m_pEncoderInt;                /**< Encoder common interface */
-    M4OSA_Void*                 m_pEncoderExternalAPI;
-    M4OSA_Void*                 m_pEncoderUserData;
-
-    /**
-     * */
-    M4VIFI_ImagePlane*            pSavedPlane;
-    M4OSA_UInt32                  uiSavedDuration;
-
-    /**
-     *    Video rate control stuff */
-    M4_MediaTime                m_dLastVideoRegulCts; /**< Last time (CTS) the video bitrate
-                                                           regulation has been called */
-    M4_MediaTime                m_mtCts;         /**< Current video cts */
-    M4_MediaTime                m_mtNextCts;     /**< Next video CTS to transcode */
-    M4_MediaTime                m_mtAudioCts;    /**< Current audio cts */
-    M4_MediaTime                m_AudioOffSet;   /**< Audio Offset to add to the cts in loop mode*/
-    M4_MediaTime                m_PrevAudioCts;  /**< Previous audio cts for AAC looping */
-    M4_MediaTime                m_DeltaAudioCts; /**< Delta audio cts for AAC looping */
-    M4OSA_UInt32                m_CurrentFileSize; /**< Current Output file size  */
-    M4OSA_UInt32                m_MaxFileSize;     /**< Max Output file size  */
-    M4OSA_Bool                  m_IsLastPicture;   /**< A boolean that signals to the encoder that
-                                                       this is the last frame to be encoded*/
-    M4OSA_Bool                  m_bLastInternalCallBack;
-    M4OSA_UInt32                m_NbCurrentFrame;  /**< Index of the current YUV frame encoded */
-
-    /**
-     *    Audio padding mode */
-    M4OSA_Bool                    m_bAudioPaddingSilence;  /**< A boolean that signals that audio
-                                                                AU will be padded by silence */
-} M4PTO3GPP_InternalContext;
-
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
- *                                  M4VIFI_ImagePlane* pPlaneOut)
- * @brief    Call an external callback to get the picture to encode
- * @note    It is called by the video encoder
- * @param    pContext    (IN) VPP context, which actually is the M4PTO3GPP
- *                            internal context in our case
- * @param    pPlaneIn    (IN) Contains the image
- * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the
- *                        output YUV420 image read with the m_pPictureCallbackFct
- * @return    M4NO_ERROR:    No error
- * @return    Any error returned by an underlaying module
- ******************************************************************************
- */
-M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
- M4VIFI_ImagePlane* pPlaneOut);
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __M4PTO3GPP_INTERNALTYPES_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_API.h b/libvideoeditor/vss/inc/M4VSS3GPP_API.h
deleted file mode 100755
index 0bb7141..0000000
--- a/libvideoeditor/vss/inc/M4VSS3GPP_API.h
+++ /dev/null
@@ -1,819 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __M4VSS3GPP_API_H__
-#define __M4VSS3GPP_API_H__
-
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_API.h
- * @brief   Video Studio Service 3GPP public API.
- * @note    VSS allows editing 3GPP files.
- *          It is a straightforward and fully synchronous API.
- ******************************************************************************
- */
-
-/**
- *  OSAL basic types and errors */
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-
-/**
- *  OSAL types for file access */
-#include "M4OSA_FileReader.h"
-#include "M4OSA_FileWriter.h"
-
-/**
- *  Definition of M4_VersionInfo */
-#include "M4TOOL_VersionInfo.h"
-
-/**
- * Image planes definition */
-#include "M4VIFI_FiltersAPI.h"
-
-/**
- * Common definitions of video editing components */
-#include "M4_VideoEditingCommon.h"
-#include "M4ENCODER_AudioCommon.h"
-#include "M4AD_Common.h"
-#include "M4DA_Types.h"
-
-/**
- * Extended API (xVSS) */
-#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
-#include "M4VSS3GPP_Extended_API.h"
-#endif
-
-//#include "M4VD_HW_API.h"
-//#include "M4VE_API.h"
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-/**
- ******************************************************************************
- ******************************************************************************
- ******************************************************************************
- *
- *      Edition Feature
- *
- ******************************************************************************
- ******************************************************************************
- ******************************************************************************
- */
-
-/**
- *  Public type of the VSS edit context */
-typedef M4OSA_Void* M4VSS3GPP_EditContext;
-
-
-/**
- ******************************************************************************
- * enum     M4VSS3GPP_VideoEffectType
- * @brief   This enumeration defines the video effect types of the VSS3GPP
- ******************************************************************************
- */
-typedef enum
-{
-    M4VSS3GPP_kVideoEffectType_None           = 0,  /**< No video effect */
-    M4VSS3GPP_kVideoEffectType_FadeFromBlack  = 8,  /**< Intended for begin effect */
-    M4VSS3GPP_kVideoEffectType_FadeToBlack    = 16, /**< Intended for end effect */
-    M4VSS3GPP_kVideoEffectType_External       = 256 /**< External effect function is used */
-    /* reserved 256 + n */                          /**< External effect number n */
-
-} M4VSS3GPP_VideoEffectType;
-
-
-/**
- ******************************************************************************
- * enum     M4VSS3GPP_AudioEffectType
- * @brief   This enumeration defines the audio effect types of the VSS3GPP
- ******************************************************************************
- */
-typedef enum
-{
-    M4VSS3GPP_kAudioEffectType_None    = 0,
-    M4VSS3GPP_kAudioEffectType_FadeIn  = 8, /**< Intended for begin effect */
-    M4VSS3GPP_kAudioEffectType_FadeOut = 16 /**< Intended for end effect */
-
-} M4VSS3GPP_AudioEffectType;
-
-
-/**
- ******************************************************************************
- * enum     M4VSS3GPP_VideoTransitionType
- * @brief   This enumeration defines the video effect that can be applied during a transition.
- ******************************************************************************
- */
-typedef enum
-{
-    M4VSS3GPP_kVideoTransitionType_None      = 0,
-    M4VSS3GPP_kVideoTransitionType_CrossFade = 1,
-    M4VSS3GPP_kVideoTransitionType_External  = 256
-    /* reserved 256 + n */                          /**< External transition number n */
-
-} M4VSS3GPP_VideoTransitionType;
-
-
-/**
- ******************************************************************************
- * enum     M4VSS3GPP_AudioTransitionType
- * @brief   This enumeration defines the audio effect that can be applied during a transition.
- ******************************************************************************
- */
-typedef enum
-{
-    M4VSS3GPP_kAudioTransitionType_None = 0,
-    M4VSS3GPP_kAudioTransitionType_CrossFade
-
-} M4VSS3GPP_AudioTransitionType;
-
-
-/**
- ******************************************************************************
- * struct   M4VSS3GPP_ExternalProgress
- * @brief   This structure contains information provided to the external Effect
- *          and Transition functions
- * @note    The uiProgress value should be enough for most cases
- ******************************************************************************
- */
-typedef struct
-{
-    /**< Progress of the Effect or the Transition, from 0 to 1000 (one thousand) */
-    M4OSA_UInt32    uiProgress;
-    /**< Index of the current clip (first clip in case of a Transition), from 0 to N */
-    //M4OSA_UInt8     uiCurrentClip;
-    /**< Current time, in milliseconds, in the current clip time-line */
-    M4OSA_UInt32    uiClipTime;
-    /**< Current time, in milliseconds, in the output clip time-line */
-    M4OSA_UInt32    uiOutputTime;
-    M4OSA_Bool        bIsLast;
-
-} M4VSS3GPP_ExternalProgress;
-
-
-/**
- ************************************************************************
- * enum     M4VSS3GPP_codecType
- * @brief    This enum defines the codec types used to create interfaces
- * @note    This enum is used internally by the VSS3GPP services to identify
- *             a currently supported codec interface. Each codec is
- *            registered with one of this type associated.
- *            When a codec instance is needed, this type is used to
- *            identify and retrieve its interface.
- *            This can be extended for other codecs.
- ************************************************************************
- */
-typedef enum
-{
-    /* Video Decoder Types */
-    M4VSS3GPP_kVideoDecMPEG4 = 0,
-    M4VSS3GPP_kVideoDecH264,
-
-    /* Video Encoder Types */
-    M4VSS3GPP_kVideoEncMPEG4,
-    M4VSS3GPP_kVideoEncH263,
-    M4VSS3GPP_kVideoEncH264,
-
-    /* Audio Decoder Types */
-    M4VSS3GPP_kAudioDecAMRNB,
-    M4VSS3GPP_kAudioDecAAC,
-    M4VSS3GPP_kAudioDecMP3,
-
-    /* Audio Encoder Types */
-    M4VSS3GPP_kAudioEncAMRNB,
-    M4VSS3GPP_kAudioEncAAC,
-
-    /* number of codecs, keep it as last enum entry, before invlaid type */
-    M4VSS3GPP_kCodecType_NB,
-    /* invalid codec type */
-    M4VSS3GPP_kCodecTypeInvalid = 255
-
-} M4VSS3GPP_codecType;
-
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_editVideoEffectFct
- * @brief       Begin and End video effect functions implemented by the integrator
- *              must match this prototype.
- * @note        The function is provided with the original image of the clip.
- *              It must apply the video effect to build the output image.
- *              The progress of the effect is given, on a scale from 0 to 1000.
- *              When the effect function is called, all the image plane structures
- *              and buffers are valid and owned by the VSS 3GPP.
- *
- * @param   pFunctionContext    (IN) The function context, previously set by the integrator
- * @param   pInputPlanes        (IN) Input YUV420 image: pointer to an array of three valid
-                                     image planes (Y, U and V)
- * @param   pOutputPlanes       (IN/OUT) Output (filtered) YUV420 image: pointer to an array
-                                         of three valid image planes (Y, U and V)
- * @param   pProgress           (IN) Set of information about the video transition progress.
- * @param   uiExternalEffectId  (IN) Which effect function should be used (for external effects)
- *
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-typedef M4OSA_ERR (*M4VSS3GPP_editVideoEffectFct)
-(
-    M4OSA_Void *pFunctionContext,
-    M4VIFI_ImagePlane *pInputPlanes,
-    M4VIFI_ImagePlane *pOutputPlanes,
-    M4VSS3GPP_ExternalProgress *pProgress,
-    M4OSA_UInt32 uiExternalEffectId
-);
-
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_editVideoTransitionFct
- * @brief       External transition functions implemented by the integrator
- *              must match this prototype.
- * @note        The function is provided with the image of the first clip and
- *              the image of the second clip. It must build the output image
- *              from the two input images.
- *              The progress of the transition is given, on a scale from 0 to 1000.
- *              When the external function is called, all the image plane
- *              structures and buffers are valid and owned by the VSS 3GPP.
- *
- * @param   pFunctionContext    (IN) The function context, previously set by the integrator
- * @param   pClip1InputPlanes   (IN) First input YUV420 image: pointer to an array of three
-                                     valid image planes (Y, U and V)
- * @param   pClip2InputPlanes   (IN) Second input YUV420 image: pointer to an array of three
-                                     valid image planes (Y, U and V)
- * @param   pOutputPlanes       (IN/OUT) Output (filtered) YUV420 image: pointer to an array
-                                         of three valid image planes (Y, U and V)
- * @param   pProgress           (IN) Set of information about the video effect progress.
- * @param   uiExternalTransitionId    (IN) Which transition function should be used
-                                            (for external transitions)
- *
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-typedef M4OSA_ERR (*M4VSS3GPP_editVideoTransitionFct)
-(
-    M4OSA_Void *pFunctionContext,
-    M4VIFI_ImagePlane *pClip1InputPlanes,
-    M4VIFI_ImagePlane *pClip2InputPlanes,
-    M4VIFI_ImagePlane *pOutputPlanes,
-    M4VSS3GPP_ExternalProgress *pProgress,
-    M4OSA_UInt32 uiExternalTransitionId
-);
-
-
-/**
- ******************************************************************************
- * struct   M4VSS3GPP_EffectSettings
- * @brief   This structure defines an audio/video effect for the edition.
- * @note    Effect start time is relative to output clip.
- ******************************************************************************
- */
-typedef struct
-{
-    M4OSA_UInt32                 uiStartTime;           /**< In ms */
-    M4OSA_UInt32                 uiDuration;            /**< In ms */
-    M4VSS3GPP_VideoEffectType    VideoEffectType;       /**< None, FadeIn, FadeOut, etc. */
-    M4VSS3GPP_editVideoEffectFct ExtVideoEffectFct;     /**< External effect function */
-    M4OSA_Void                  *pExtVideoEffectFctCtxt;/**< Context given to the external
-                                                             effect function */
-    M4VSS3GPP_AudioEffectType    AudioEffectType;       /**< None, FadeIn, FadeOut */
-
-#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
-    M4xVSS_EffectSettings         xVSS;
-#endif
-
-} M4VSS3GPP_EffectSettings;
-
-
-/**
- ******************************************************************************
- * enum        M4VSS3GPP_TransitionBehaviour
- * @brief    Transition behavior
- ******************************************************************************
- */
-typedef enum
-{
-    M4VSS3GPP_TransitionBehaviour_SpeedUp = 0,
-    M4VSS3GPP_TransitionBehaviour_Linear,
-    M4VSS3GPP_TransitionBehaviour_SpeedDown,
-    M4VSS3GPP_TransitionBehaviour_SlowMiddle,
-    M4VSS3GPP_TransitionBehaviour_FastMiddle
-} M4VSS3GPP_TransitionBehaviour;
-
-
-/**
- ******************************************************************************
- * struct   M4VSS3GPP_TransitionSettings
- * @brief   This structure defines the transition to be applied when assembling two clips.
- ******************************************************************************
- */
-typedef struct
-{
-    /**< Duration of the transition, in milliseconds (set to 0 to get no transition) */
-    M4OSA_UInt32                     uiTransitionDuration;
-
-    /**< Type of the video transition */
-    M4VSS3GPP_VideoTransitionType    VideoTransitionType;
-
-    /**< External transition video effect function */
-    M4VSS3GPP_editVideoTransitionFct ExtVideoTransitionFct;
-
-    /**< Context of the external transition video effect function */
-    M4OSA_Void                      *pExtVideoTransitionFctCtxt;
-    M4VSS3GPP_AudioTransitionType    AudioTransitionType;   /**< Type of the audio transition */
-    M4VSS3GPP_TransitionBehaviour     TransitionBehaviour;    /**<Transition behaviour*/
-
-#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
-    M4xVSS_TransitionSettings        xVSS;
-#endif
-
-} M4VSS3GPP_TransitionSettings;
-
-
-/**
- ******************************************************************************
- * struct   M4VSS3GPP_ClipSettings
- * @brief   This structure defines an input clip for the edition.
- * @note    It also contains the settings for the cut and begin/end effects applied to the clip.
- ******************************************************************************
- */
-typedef struct
-{
-    M4OSA_Void                     *pFile;            /**< Clip file descriptor */
-    M4VIDEOEDITING_FileType         FileType;         /**< .3gp, .amr, .mp3     */
-    M4OSA_UInt32                    filePathSize;      /**< Clip path size
-                                                           (add because of UTF16 conversion)*/
-    M4VIDEOEDITING_ClipProperties   ClipProperties;   /**< Clip analysis previously computed
-                                                       with M4VSS3GPP_editAnalyseClip */
-    M4OSA_UInt32                    uiBeginCutTime;   /**< Begin cut time, in milliseconds */
-    M4OSA_UInt32                    uiEndCutTime;     /**< End cut time, in milliseconds */
-    M4OSA_Bool                      bTranscodingRequired;
-
-#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
-    M4xVSS_ClipSettings             xVSS;
-#endif
-
-} M4VSS3GPP_ClipSettings;
-
-
-/**
- ******************************************************************************
- * struct   M4VSS3GPP_EditSettings
- * @brief   This structure gathers all the information needed to define a complete
- *          edition operation
- ******************************************************************************
- */
-typedef struct
-{
-      /**< Number of element of the clip list pClipList */
-    M4OSA_UInt8                      uiClipNumber;
-    /**< The properties of this clip will be used as a reference for compatibility checking */
-    M4OSA_UInt8                      uiMasterClip;
-    /**< List of the input clips settings. Pointer to an array of uiClipNumber
-     clip settings pointers */
-    M4VSS3GPP_ClipSettings           **pClipList;
-    /**< List of the transition settings. Pointer to an array of uiClipNumber-1
-     transition settings pointers */
-    M4VSS3GPP_TransitionSettings     **pTransitionList;
-    M4VSS3GPP_EffectSettings         *Effects;         /**< List of effects */
-    M4OSA_UInt8                         nbEffects;     /**< Number of effects in the above list */
-    /**< Frame rate at which the modified video sections will be encoded */
-    M4VIDEOEDITING_VideoFramerate    videoFrameRate;
-    M4OSA_Void                       *pOutputFile;      /**< Output 3GPP clip file descriptor */
-    M4OSA_UInt32                     uiOutputPathSize;    /**< Output file path size*/
-    /**< Temporary file to store metadata ("moov.bin") */
-    M4OSA_Void                       *pTemporaryFile;
-
-#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
-    M4xVSS_EditSettings              xVSS;
-#endif
-    M4OSA_Float                    PTVolLevel;
-} M4VSS3GPP_EditSettings;
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editAnalyseClip()
- * @brief   This function allows checking if a clip is compatible with VSS 3GPP editing
- * @note    It also fills a ClipAnalysis structure, which can be used to check if two
- *          clips are compatible
- * @param   pClip               (IN) File descriptor of the input 3GPP/MP3 clip file.
- * @param   pClipProperties     (IN) Pointer to a valid ClipProperties structure.
- * @param   FileType            (IN) Type of the input file (.3gp, .amr, .mp3)
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return   M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED
- * @return   M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
- * @return   M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED
- * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE
- * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE
- * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC
- * @return   M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
- * @return   M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE
- * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT
- * @return   M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editAnalyseClip(M4OSA_Void *pClip, M4VIDEOEDITING_FileType FileType,
-                                    M4VIDEOEDITING_ClipProperties  *pClipProperties,
-                                    M4OSA_FileReadPointer *pFileReadPtrFct);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility()
- * @brief   This function allows checking if two clips are compatible with each other
- *          for VSS 3GPP editing assembly feature.
- * @note
- * @param   pClip1Properties        (IN) Clip analysis of the first clip
- * @param   pClip2Properties        (IN) Clip analysis of the second clip
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
- * @return  M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_PLATFORM
- * @return  M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT
- * @return  M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE
- * @return  M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE
- * @return  M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING
- * @return  M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY
- * @return  M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility(M4VIDEOEDITING_ClipProperties  *pClip1Properties,
-                                               M4VIDEOEDITING_ClipProperties  *pClip2Properties);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editInit()
- * @brief    Initializes the VSS 3GPP edit operation (allocates an execution context).
- * @note
- * @param    pContext            (OUT) Pointer on the VSS 3GPP edit context to allocate
- * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
- * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        There is no more available memory
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editInit(
-    M4VSS3GPP_EditContext* pContext,
-    M4OSA_FileReadPointer* pFileReadPtrFct,
-    M4OSA_FileWriterPointer* pFileWritePtrFct );
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editCreateClipSettings()
- * @brief    Allows filling a clip settings structure with default values
- *
- * @note    WARNING: pClipSettings->pFile      will be allocated in this function.
- *
- * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param   pFile               (IN) Clip file name
- * @param   filePathSize        (IN) Size of the clip path (needed for UTF16 conversion)
- * @param    nbEffects           (IN) Nb of effect settings to allocate
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editCreateClipSettings(M4VSS3GPP_ClipSettings *pClipSettings,
-                                           M4OSA_Void* pFile, M4OSA_UInt32 filePathSize,
-                                           M4OSA_UInt8 nbEffects);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editDuplicateClipSettings()
- * @brief    Duplicates a clip settings structure, performing allocations if required
- *
- * @param    pClipSettingsDest    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param    pClipSettingsOrig    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param   bCopyEffects        (IN) Flag to know if we have to duplicate effects (deprecated)
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editDuplicateClipSettings(M4VSS3GPP_ClipSettings *pClipSettingsDest,
-                                              M4VSS3GPP_ClipSettings *pClipSettingsOrig,
-                                              M4OSA_Bool bCopyEffects);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editFreeClipSettings()
- * @brief    Free the pointers allocated in the ClipSetting structure (pFile, Effects).
- *
- * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editFreeClipSettings(M4VSS3GPP_ClipSettings *pClipSettings);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editOpen()
- * @brief   Set the VSS 3GPP input and output files, and set the settings.
- * @note
- * @param   pContext            (IN) VSS 3GPP edit context
- * @param   pSettings           (IN) Edit settings
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        VSS is not in an appropriate state for this function to be called
- * @return  M4ERR_ALLOC:        There is no more available memory
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editOpen(M4VSS3GPP_EditContext pContext, M4VSS3GPP_EditSettings *pSettings);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editStep()
- * @brief   Perform one step of editing.
- * @note
- * @param   pContext                (IN) VSS 3GPP edit context
- * @param   pProgress               (OUT) Progress percentage (0 to 100) of the editing operation
- * @return  M4NO_ERROR:             No error
- * @return  M4ERR_PARAMETER:        pContext is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:            VSS 3GPP is not in an appropriate state for this function to
- *                                  be called
- * @return  M4VSS3GPP_WAR_EDITING_DONE:Edition is done, user should now call M4VSS3GPP_editClose()
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editStep(M4VSS3GPP_EditContext pContext, M4OSA_UInt8 *pProgress);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editClose()
- * @brief   Finish the VSS 3GPP edit operation.
- * @note    The output 3GPP file is ready to be played after this call
- * @param   pContext            (IN) VSS 3GPP edit context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        VSS 3GPP is not in an appropriate state for this function
- *                              to be called
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editClose(M4VSS3GPP_EditContext pContext);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editCleanUp()
- * @brief   Free all resources used by the VSS 3GPP edit operation.
- * @note    The context is no more valid after this call
- * @param   pContext            (IN) VSS 3GPP edit context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editCleanUp(M4VSS3GPP_EditContext pContext);
-
-/**
- ******************************************************************************
- ******************************************************************************
- ******************************************************************************
- *
- *      Audio Mixing Feature
- *
- ******************************************************************************
- ******************************************************************************
- ******************************************************************************
- */
-/**
- *  Public type of the VSS audio mixing context */
-typedef M4OSA_Void* M4VSS3GPP_AudioMixingContext;
-
-
-/**
- ******************************************************************************
- * struct   M4VSS3GPP_AudioMixingSettings
- * @brief   This structure defines the settings of the audio mixing operation.
- ******************************************************************************
- */
-typedef struct {
-    M4OSA_Void*                             pOriginalClipFile;      /**< Input 3GPP clip file */
-    M4OSA_Void*                             pAddedAudioTrackFile;   /**< New audio track */
-    M4VIDEOEDITING_FileType                 AddedAudioFileType;     /**< File Format of the new audio file */
-    M4OSA_UInt32                            uiAddCts;               /**< Time, in milliseconds,
-                                                                    at which the added audio track is inserted */
-    M4OSA_UInt32                            uiAddVolume;            /**< Volume, in percentage,
-                                                                        of the added audio track */
-    M4OSA_UInt32                            uiBeginLoop;            /**< Describes in milli-second the
-                                                                        start time of the loop */
-    M4OSA_UInt32                            uiEndLoop;              /**< Describes in milli-second the end
-                                                                    time of the loop (0 means no loop) */
-    M4OSA_Bool                              bRemoveOriginal;      /**< If true, the original audio track
-                                                                     is not taken into account */
-    M4OSA_Void*                             pOutputClipFile;      /**< Output 3GPP clip file */
-    M4OSA_Void*                             pTemporaryFile;       /**< Temporary file to store metadata
-                                                     ("moov.bin") */
-    /**< The following parameters are optionnal. They are just used in case of MP3 replacement. */
-    M4VIDEOEDITING_AudioSamplingFrequency   outputASF;         /**< Output sampling frequency */
-    M4VIDEOEDITING_AudioFormat              outputAudioFormat; /**< Output audio codec(AAC/AMR)*/
-    M4VIDEOEDITING_Bitrate                  outputAudioBitrate; /**< Output audio bitrate */
-    M4OSA_UInt8                             outputNBChannels; /**< Output audio nb of channels */
-    M4OSA_Bool                              b_DuckingNeedeed;
-    M4OSA_Int32                             InDucking_threshold;
-    M4OSA_Float                             fBTVolLevel;
-    M4OSA_Float                             fPTVolLevel;
-    M4OSA_Float                             InDucking_lowVolume;
-    M4OSA_Bool                              bLoop;
-    M4OSA_UInt32                            uiSamplingFrequency;
-    M4OSA_UInt32                            uiNumChannels;
-} M4VSS3GPP_AudioMixingSettings;
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_audioMixingInit(M4VSS3GPP_AudioMixingContext* pContext,
- *                                     M4VSS3GPP_AudioMixingSettings* pSettings)
- * @brief    Initializes the VSS audio mixing operation (allocates an execution context).
- * @note
- * @param    pContext        (OUT) Pointer on the VSS audio mixing context to allocate
- * @param    pSettings        (IN) Pointer to valid audio mixing settings
- * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
- * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return    M4ERR_ALLOC:        There is no more available memory
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_audioMixingInit(
-    M4VSS3GPP_AudioMixingContext* pContext,
-    M4VSS3GPP_AudioMixingSettings* pSettings,
-    M4OSA_FileReadPointer* pFileReadPtrFct,
-    M4OSA_FileWriterPointer* pFileWritePtrFct );
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_audioMixingStep()
- * @brief   Perform one step of audio mixing.
- * @note
- * @param   pContext                        (IN) VSS 3GPP audio mixing context
- * @return  M4NO_ERROR:                     No error
- * @return  M4ERR_PARAMETER:                pContext is M4OSA_NULL (debug only)
- * @param   pProgress                       (OUT) Progress percentage (0 to 100)
-                                                  of the finalization operation
- * @return  M4ERR_STATE:                    VSS is not in an appropriate state for
-                                            this function to be called
- * @return  M4VSS3GPP_WAR_END_OF_AUDIO_MIXING: Audio mixing is over, user should
-                                               now call M4VSS3GPP_audioMixingCleanUp()
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_audioMixingStep(M4VSS3GPP_AudioMixingContext pContext,
-                                     M4OSA_UInt8 *pProgress);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_audioMixingCleanUp()
- * @brief   Free all resources used by the VSS audio mixing operation.
- * @note    The context is no more valid after this call
- * @param   pContext            (IN) VSS 3GPP audio mixing context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_audioMixingCleanUp(M4VSS3GPP_AudioMixingContext pContext);
-
-
-/**
- ******************************************************************************
- ******************************************************************************
- ******************************************************************************
- *
- *      Extract Picture Feature
- *
- ******************************************************************************
- ******************************************************************************
- ******************************************************************************
- */
-/**
- *  Public type of the VSS extract picture context */
-typedef M4OSA_Void* M4VSS3GPP_ExtractPictureContext;
-
-/**
- ******************************************************************************
- * struct   M4VSS3GPP_ExtractPictureSettings
- * @brief   This structure defines the settings of the extract picture audio operation.
- ******************************************************************************
- */
-typedef struct {
-    M4OSA_Void*                         pInputClipFile;  /**< Input 3GPP clip file */
-    M4OSA_Int32                         iExtractionTime; /**< frame time (in ms) to be extracted */
-    M4OSA_Void*                         pOutputYuvPic;   /**< Output YUV picture name */
-} M4VSS3GPP_ExtractPictureSettings;
-
-
-/******************************************************************************
- * M4OSA_ERR M4VSS3GPP_extractPictureInit()
- * @brief    Initializes the VSS extract picture operation (allocates an execution context).
- * @note
- * @param    pContext            (OUT) Pointer on the VSS extract picture context to allocate
- * @param    pSettings            (IN) Pointer to valid extract picture settings
- * @param    pWidth                (OUT) video stream width
- * @param    pHeight                (OUT) video stream height
- * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
- * @return    M4NO_ERROR:                        No error
- * @return    M4ERR_PARAMETER:                At least one parameter is M4OSA_NULL (debug only)
- * @return    M4ERR_ALLOC:                    There is no more available memory
- * @return    M4VSS3GPP_ERR_INVALID_CLIP1:    The input clip is empty
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_extractPictureInit(
-        M4VSS3GPP_ExtractPictureContext* pContext,
-        M4VSS3GPP_ExtractPictureSettings* pSettings,
-        M4OSA_UInt32 *pWidth,
-        M4OSA_UInt32 *pHeight,
-        M4OSA_FileReadPointer* pFileReadPtrFct );
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_extractPictureStep()
- * @brief   Perform one step of picture extraction.
- * @note
- * @param   pContext                        (IN) VSS extract picture context
- * @return  M4NO_ERROR:                     No error
- * @return  M4ERR_PARAMETER:                pContext is M4OSA_NULL (debug only)
- * @param   pDecPlanes                      (OUT) Plane in wich the extracted picture is copied
- * @param   pProgress                       (OUT) Progress percentage (0 to 100)
-                                                 of the picture extraction
- * @return  M4ERR_STATE:                    VSS is not in an appropriate state for this
-                                            function to be called
- * @return  VSS_WAR_END_OF_EXTRACT_PICTURE: Picture extraction  is over, user should now
-                                            call M4VSS3GPP_extractPictureCleanUp()
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_extractPictureStep(M4VSS3GPP_ExtractPictureContext pContext,
-                                       M4VIFI_ImagePlane *pDecPlanes, M4OSA_UInt8 *pProgress);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_extractPictureCleanUp()
- * @brief   Free all resources used by the VSS picture extraction.
- * @note    The context is no more valid after this call
- * @param   pContext            (IN) VSS extract picture context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_extractPictureCleanUp(M4VSS3GPP_ExtractPictureContext pContext);
-
-/**
- ******************************************************************************
- ******************************************************************************
- ******************************************************************************
- *
- *      Common features
- *
- ******************************************************************************
- ******************************************************************************
- ******************************************************************************
- */
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_GetVersion()
- * @brief   Get the VSS version.
- * @note    Can be called anytime. Do not need any context.
- * @param   pVersionInfo        (OUT) Pointer to a version info structure
- * @return  M4NO_ERROR:         No error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
-
-
-#ifdef WIN32
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_GetErrorMessage()
- * @brief   Return a string describing the given error code
- * @note    The input string must be already allocated (and long enough!)
- * @param   err             (IN) Error code to get the description from
- * @param   sMessage        (IN/OUT) Allocated string in which the description will be copied
- * @return  M4NO_ERROR:     Input error is from the VSS3GPP module
- * @return  M4ERR_PARAMETER:Input error is not from the VSS3GPP module
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_GetErrorMessage(M4OSA_ERR err, M4OSA_Char* sMessage);
-#endif /**< WIN32 */
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* __M4VSS3GPP_API_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h b/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h
deleted file mode 100755
index 7c500da..0000000
--- a/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_ErrorCodes.h
- * @brief    Video Studio Service 3GPP error definitions.
- * @note
- ******************************************************************************
- */
-
-#ifndef __M4VSS3GPP_ErrorCodes_H__
-#define __M4VSS3GPP_ErrorCodes_H__
-
-/**
- *    OSAL basic types and errors */
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-
-/**
- *    OSAL core ID definitions */
-#include "M4OSA_CoreID.h"
-
-
-/************************************************************************/
-/* Warning codes                                                        */
-/************************************************************************/
-
-/**
- *    End of edition, user should now call M4VSS3GPP_editClose() */
-#define M4VSS3GPP_WAR_EDITING_DONE             M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0001)
-
-/**
- *    End of audio mixing, user should now call M4VSS3GPP_audioMixingCleanUp() */
-#define M4VSS3GPP_WAR_END_OF_AUDIO_MIXING      M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0010)
-
-/**
- *    End of extract picture, user should now call M4VSS3GPP_extractPictureCleanUp() */
-#define M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE   M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0020)
-/* RC: to know when a file has been processed */
-#define M4VSS3GPP_WAR_SWITCH_CLIP              M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0030)
-
-/************************************************************************/
-/* Error codes                                                          */
-/************************************************************************/
-
-/**
- * Invalid file type */
-#define M4VSS3GPP_ERR_INVALID_FILE_TYPE               M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0001)
-/**
- * Invalid effect kind */
-#define M4VSS3GPP_ERR_INVALID_EFFECT_KIND             M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0002)
-/**
- * Invalid effect type for video */
-#define M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE       M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0003)
-/**
- * Invalid effect type for audio */
-#define M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE       M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0004)
-/**
- * Invalid transition type for video */
-#define M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0005)
-/**
- * Invalid transition type for audio */
-#define M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0006)
-/**
- * Invalid video encoding frame rate */
-#define M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE        \
-                                      M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0007)
- /**
- * External effect function is used without being set */
-#define M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL            M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0008)
-/**
- * External transition function is used without being set */
-#define M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL        M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0009)
-
-/**
- * Begin cut time is larger than the clip duration */
-#define M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION  M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0010)
-/**
- * Begin cut time is larger or equal than end cut */
-#define M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0011)
-/**
- * Two consecutive transitions are overlapping on one clip */
-#define M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS         M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0012)
-
-/**
- * An input 3GPP file is invalid/corrupted */
-#define M4VSS3GPP_ERR_INVALID_3GPP_FILE               M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0016)
-/**
- * A file contains an unsupported video format */
-#define M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT  M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0017)
-/**
- * A file contains an unsupported audio format */
-#define M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT  M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0018)
-
-/**
- * A file format is not supported by the VSS */
-#define M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED         M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0019)
- /**
- *    An input clip has an unexpectedly large Video AU */
-#define M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE        M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001A)
-/**
- *    An input clip has an unexpectedly large Audio AU */
-#define M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE        M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001B)
-/**
- *    An input clip has a corrupted Audio AMR AU */
-#define M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU       M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001C)
-/**
- * The video encoder encountered an Acces Unit error: very probably a file write error */
-#define M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR       M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001D)
-
-
-/************************************************************************/
-/* Errors returned by M4VSS3GPP_editAnalyseClip()                       */
-/************************************************************************/
-
-/**
- * Unsupported video format for Video Editing */
-#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0020)
-/**
- * Unsupported H263 profile for Video Editing */
-#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0021)
-/**
- * Unsupported MPEG-4 profile for Video Editing */
-#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE    \
-                                             M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0022)
-/**
- * Unsupported MPEG-4 RVLC tool for Video Editing */
-#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0023)
-/**
- * Unsupported audio format for Video Editing */
-#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0024)
- /**
- * File contains no supported stream */
-#define M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE    M4OSA_ERR_CREATE( M4_ERR,\
-                                                                            M4VSS3GPP, 0x0025)
-/**
- * File contains no video stream or an unsupported video stream */
-#define M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE    M4OSA_ERR_CREATE( M4_ERR,\
-                                                                                M4VSS3GPP, 0x0026)
-/**
- * Unsupported video profile for Video Editing */
-#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_PROFILE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0027)
-
-/**
- * Unsupported video profile for Video Editing */
-#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_LEVEL M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0028)
-
-/************************************************************************/
-/* Errors returned by M4VSS3GPP_editCheckClipCompatibility()            */
-/************************************************************************/
-
-/**
- * At least one of the clip analysis has been generated by another version of the VSS 3GPP */
-#define M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0030)
-/**
- * Clips don't have the same video format (H263 or MPEG4) */
-#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT       M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0031)
-/**
- *    Clips don't have the same frame size */
-#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0032)
-/**
- *    Clips don't have the same MPEG-4 time scale */
-#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0033)
-/**
- *    Clips don't have the same use of MPEG-4 data partitioning */
-#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING    M4OSA_ERR_CREATE( M4_ERR,\
-                                                                              M4VSS3GPP, 0x0034)
-/**
- *    MP3 clips can't be assembled */
-#define M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY        M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0035)
-/**
- *  Clips don't have the same audio stream type (ex: AMR != AAC) */
-#define M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE  M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0036)
-/**
- *  Clips don't have the same audio number of channels (ex: stereo != mono) */
-#define M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS        M4OSA_ERR_CREATE( M4_WAR,\
-                                                                            M4VSS3GPP, 0x0037)
-/**
- *  Clips don't have the same sampling frequency (ex: 44100Hz != 16000Hz) */
-#define M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY    M4OSA_ERR_CREATE( M4_WAR,\
-                                                                              M4VSS3GPP, 0x0038)
-
-/************************************************************************/
-/* Audio mixing error codes                                            */
-/************************************************************************/
-
-/**
- * The input 3GPP file does not contain any supported audio or video track */
-#define M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE     M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0050)
-/**
- * The Volume of the added audio track (AddVolume) must be strictly superior than zero */
-#define M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO           M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0051)
-/**
- * The time at which the audio track is added (AddCts) can't be superior than the
-   input video track duration */
-#define M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION        M4OSA_ERR_CREATE( M4_ERR,\
-                                                                            M4VSS3GPP, 0x0052)
-/**
- * The audio track file format setting is undefined */
-#define M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT        M4OSA_ERR_CREATE( M4_ERR,\
-                                                                            M4VSS3GPP, 0x0053)
-/**
- * The added audio track stream has an unsupported format */
-#define M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0054)
-/**
- * The audio mixing feature doesn't support EVRC, MP3 audio tracks */
-#define M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED         M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0055)
-/**
- * An added audio track limit the available features: uiAddCts must be 0
-   and bRemoveOriginal must be M4OSA_TRUE */
-#define M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK  M4OSA_ERR_CREATE( M4_ERR,\
-                                                                              M4VSS3GPP, 0x0056)
-/**
- * Input audio track is not AMR-NB nor AAC so it can't be mixed with output */
-#define M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED                    M4OSA_ERR_CREATE( M4_ERR,\
-                                                                              M4VSS3GPP, 0x0057)
-/**
- * Input clip must be a 3gpp file */
-#define M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP              M4OSA_ERR_CREATE( M4_ERR,\
-                                                                              M4VSS3GPP, 0x0058)
-/**
- * Begin loop time is higher than end loop time or higher than added clip duration */
-#define M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP              M4OSA_ERR_CREATE( M4_ERR,\
-                                                                              M4VSS3GPP, 0x0059)
-
-
-/************************************************************************/
-/* Audio mixing and extract picture error code                          */
-/************************************************************************/
-
-/**
- * H263 Profile 3 level 10 is not supported */
-#define M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED            M4OSA_ERR_CREATE( M4_ERR,\
-                                                                            M4VSS3GPP, 0x0060)
-/**
- * File contains no video stream or an unsupported video stream */
-#define M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE        M4OSA_ERR_CREATE( M4_ERR,\
-                                                                            M4VSS3GPP, 0x0061)
-
-
-/************************************************************************/
-/* Internal error and warning codes                                     */
-/************************************************************************/
-
-/**
- * Internal state error */
-#define M4VSS3GPP_ERR_INTERNAL_STATE                 M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0100)
-/**
- * Luminance filter effect error */
-#define M4VSS3GPP_ERR_LUMA_FILTER_ERROR              M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0104)
-/**
- * Transition filter effect error */
-#define M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR        M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0106)
-/**
- * The audio decoder initialization failed */
-#define M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED      M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0110)
-/**
- * The decoder produced an unattended amount of PCM */
-#define M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0115)
-/**
- * Output file must be 3GPP or MP3 */
-#define M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR         M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0117)
-/**
- * Can not find a valid video frame */
-#define M4VSS3GPP_ERR_NO_VALID_VID_FRAME         M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0118)
-
-#endif /* __M4VSS3GPP_ErrorCodes_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_Extended_API.h b/libvideoeditor/vss/inc/M4VSS3GPP_Extended_API.h
deleted file mode 100755
index 9668b67..0000000
--- a/libvideoeditor/vss/inc/M4VSS3GPP_Extended_API.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __M4VSS3GPP_EXTENDED_API_H__
-#define __M4VSS3GPP_EXTENDED_API_H__
-
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_Extended_API.h
- * @brief    API of xVSS
- * @note
- ******************************************************************************
-*/
-
-#ifndef M4VSS_SUPPORT_EXTENDED_FEATURES
-#error "*** the flag M4VSS_SUPPORT_EXTENDED_FEATURES should be activated in CompilerSwitches\
-             for VideoStudio ***"
-#endif
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_getTextRgbBufferFct
- * @brief        External text to RGB buffer functions implemented by the integrator
- *                must match this prototype.
- * @note        The function is provided with the renderingData, the text buffer and
- *                its size. It must build the output RGB image plane containing the text.
- *
- * @param   pRenderingData    (IN) The data given by the user in M4xVSS_EffectSettings
- * @param    pTextBuffer        (IN) Text buffer given by the user in M4xVSS_EffectSettings
- * @param    textBufferSize    (IN) Text buffer size given by the user in M4xVSS_EffectSettings
- * @param    pOutputPlane    (IN/OUT) Output RGB565 image
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- ******************************************************************************
-*/
-typedef M4OSA_ERR (*M4xVSS_getTextRgbBufferFct)
-(
-    M4OSA_Void *pRenderingData,
-    M4OSA_Void *pTextBuffer,
-    M4OSA_UInt32 textBufferSize,
-    M4VIFI_ImagePlane **pOutputPlane
-);
-
-/**
- ******************************************************************************
- * struct    M4xVSS_BGMSettings
- * @brief    This structure gathers all the information needed to add Background music to 3gp file
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_Void                  *pFile;         /**< Input file path */
-    M4VIDEOEDITING_FileType     FileType;       /**< .3gp, .amr, .mp3     */
-    M4OSA_UInt32                uiAddCts;       /**< Time, in milliseconds, at which the added
-                                                      audio track is inserted */
-    M4OSA_UInt32                uiAddVolume;     /**< Volume, in percentage, of the added audio track */
-    M4OSA_UInt32                uiBeginLoop;    /**< Describes in milli-second the start time
-                                                     of the loop */
-    M4OSA_UInt32                uiEndLoop;      /**< Describes in milli-second the end time of the
-                                                     loop (0 means no loop) */
-    M4OSA_Bool                  b_DuckingNeedeed;
-    M4OSA_Int32                 InDucking_threshold;  /**< Threshold value at which background
-                                                            music shall duck */
-    M4OSA_Float                 lowVolume;       /**< lower the background track to this factor
-                                                 and increase the primary track to inverse of this factor */
-    M4OSA_Bool                  bLoop;
-    M4OSA_UInt32                uiSamplingFrequency;
-    M4OSA_UInt32                uiNumChannels;
-} M4xVSS_BGMSettings;
-
-
-/**
- ******************************************************************************
- * enum     M4VSS3GPP_VideoEffectType
- * @brief   This enumeration defines the video effect types of the VSS3GPP
- ******************************************************************************
-*/
-typedef enum
-{
-    M4VSS3GPP_kRGB888           = 0,  /**< RGB888 data type */
-    M4VSS3GPP_kRGB565           = 1  /**< RGB565 data type */
-
-} M4VSS3GPP_RGBType;
-
-/**
- ******************************************************************************
- * struct   M4xVSS_EffectSettings
- * @brief   This structure defines an audio/video effect for the edition.
- ******************************************************************************
-*/
-typedef struct
-{
-    /**< In percent of the cut clip duration */
-    M4OSA_UInt32               uiStartPercent;
-    /**< In percent of the ((clip duration) - (effect starttime)) */
-    M4OSA_UInt32               uiDurationPercent;
-    /**< Framing file path (GIF/PNG file), used only if VideoEffectType == framing */
-    M4OSA_Void                 *pFramingFilePath;
-    /**< Framing RGB565 buffer,  used only if VideoEffectType == framing */
-    M4VIFI_ImagePlane          *pFramingBuffer;
-    /**<RGB Buffer type,used only if VideoEffectType == framing */
-    M4VSS3GPP_RGBType          rgbType;
-    /**< The top-left X coordinate in the output picture where the added frame will be displayed.
-     Used only if VideoEffectType == framing || VideoEffectType == text */
-    M4OSA_UInt32               topleft_x;
-    /**< The top-left Y coordinate in the output picture where the added frame will be displayed.
-     Used only if VideoEffectType == framing || VideoEffectType == text */
-    M4OSA_UInt32               topleft_y;
-    /**< Does framing image is resized to output video size.
-     Used only if VideoEffectType == framing */
-    M4OSA_Bool                 bResize;
-    M4VIDEOEDITING_VideoFrameSize framingScaledSize;
-/**< Size to which the the framing file needs to be resized */
-    /**< Text buffer. Used only if VideoEffectType == text */
-    M4OSA_Void*                pTextBuffer;
-    /**< Text buffer size. Used only if VideoEffectType == text */
-    M4OSA_UInt32               textBufferSize;
-    /**< Pointer containing specific data used by the font engine (size, color...) */
-    M4OSA_Void*                pRenderingData;
-    /**< Text plane width. Used only if VideoEffectType == text */
-    M4OSA_UInt32               uiTextBufferWidth;
-    /**< Text plane height. Used only if VideoEffectType == text */
-    M4OSA_UInt32               uiTextBufferHeight;
-    /**< Processing rate of the effect added when using the Fifties effect */
-    M4OSA_UInt32               uiFiftiesOutFrameRate;
-    /**< RGB16 input color of the effect added when using the rgb16 color effect */
-    M4OSA_UInt16               uiRgb16InputColor;
-
-    M4OSA_UInt8                uialphaBlendingStart;       /*Start percentage of Alpha blending*/
-    M4OSA_UInt8                uialphaBlendingMiddle;      /*Middle percentage of Alpha blending*/
-    M4OSA_UInt8                uialphaBlendingEnd;         /*End percentage of Alpha blending*/
-    M4OSA_UInt8                uialphaBlendingFadeInTime;  /*Duration, in percentage of
-                                                            effect duration, of the FadeIn phase*/
-    M4OSA_UInt8                uialphaBlendingFadeOutTime;   /*Duration, in percentage of effect
-                                                                duration, of the FadeOut phase*/
-    M4OSA_UInt32                width;   /*width of the ARGB8888 clip .
-                                            Used only if video effect is framming */
-    M4OSA_UInt32                height; /*height of the ARGB8888 clip .
-                                            Used only if video effect is framming */
-} M4xVSS_EffectSettings;
-
-/**
- ******************************************************************************
- * struct    M4xVSS_AlphaMagicSettings
- * @brief    This structure defines the alpha magic transition settings
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_Void*            pAlphaFilePath;        /**< Alpha file path (JPG file)  */
-    M4OSA_Int32            blendingPercent;    /**< Blending Percentage between 0 and 100 */
-    M4OSA_Bool             isreverse;            /**< direct effect or reverse */
-    /*To support ARGB8888 : get the width and height */
-    M4OSA_UInt32            width;
-    M4OSA_UInt32            height;
-} M4xVSS_AlphaMagicSettings;
-
-/**
- ******************************************************************************
- * enum        M4xVSS_SlideTransition_Direction
- * @brief    Defines directions for the slide transition
- ******************************************************************************
-*/
-
-typedef enum {
-    M4xVSS_SlideTransition_RightOutLeftIn,
-    M4xVSS_SlideTransition_LeftOutRightIn,
-    M4xVSS_SlideTransition_TopOutBottomIn,
-    M4xVSS_SlideTransition_BottomOutTopIn
-} M4xVSS_SlideTransition_Direction;
-
-/**
- ******************************************************************************
- * struct    M4xVSS_AlphaMagicSettings
- * @brief    This structure defines the slide transition settings
- ******************************************************************************
-*/
-
-typedef struct
-{
-    M4xVSS_SlideTransition_Direction direction; /* direction of the slide */
-} M4xVSS_SlideTransitionSettings;
-
-/**
- ******************************************************************************
- * struct   M4xVSS_TransitionSettings
- * @brief   This structure defines additional transition settings specific to
- *            xVSS, which are appended to the VSS3GPP transition settings
- *            structure.
- ******************************************************************************
-*/
-typedef struct
-{
-    /* Anything xVSS-specific, but common to all transitions, would go here,
-    before the union. */
-    union {
-        /**< AlphaMagic settings, used only if VideoTransitionType ==
-            M4xVSS_kVideoTransitionType_AlphaMagic */
-        M4xVSS_AlphaMagicSettings        *pAlphaMagicSettings;
-        /* only in case of slide transition. */
-        M4xVSS_SlideTransitionSettings    *pSlideTransitionSettings;
-    } transitionSpecific;
-} M4xVSS_TransitionSettings;
-
-
-/**
- ******************************************************************************
- * enum        M4xVSS_MediaRendering
- * @brief    This enum defines different media rendering using exif orientation
- ******************************************************************************
-*/
-typedef enum
-{
-    M4xVSS_kResizing = 0,        /*The picture is resized, the aspect ratio can be different
-                                    from the original one. All of the picture is rendered*/
-    M4xVSS_kCropping,            /*The picture is cropped, the aspect ratio is the same as
-                                    the original one. The picture is not rendered entirely*/
-    M4xVSS_kBlackBorders        /*Black borders are rendered in order to keep the original
-                                    aspect ratio. All the picture is rendered*/
-
-} M4xVSS_MediaRendering;
-
-
-/**
- ******************************************************************************
- * struct   M4xVSS_ClipSettings
- * @brief   This structure defines an input clip for the edition.
- * @note    It also contains the settings for the cut and begin/end effects applied to the clip.
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_UInt32                    uiBeginCutPercent;    /**< Begin cut time, in percent of clip
-                                                                duration (only for 3GPP clip !) */
-    M4OSA_UInt32                    uiEndCutPercent;    /**< End cut time, in percent of clip
-                                                             duration (only for 3GPP clip !) */
-    M4OSA_UInt32                    uiDuration;            /**< Duration of the clip, if different
-                                                                from 0, has priority on
-                                                                uiEndCutTime or uiEndCutPercent */
-    M4OSA_Bool                        isPanZoom;            /**< RC: Boolean used to know if the
-                                                                 pan and zoom mode is enabled */
-    M4OSA_UInt16                    PanZoomXa;            /**< RC */
-    M4OSA_UInt16                    PanZoomTopleftXa;    /**< RC */
-    M4OSA_UInt16                    PanZoomTopleftYa;    /**< RC */
-    M4OSA_UInt16                    PanZoomXb;            /**< RC */
-    M4OSA_UInt16                    PanZoomTopleftXb;    /**< RC */
-    M4OSA_UInt16                    PanZoomTopleftYb;    /**< RC */
-    M4xVSS_MediaRendering            MediaRendering;        /**< FB only used with JPEG: to crop,
-                                                                 resize, or render black borders*/
-
-} M4xVSS_ClipSettings;
-
-/**
- ******************************************************************************
- * struct   M4xVSS_EditSettings
- * @brief   This structure gathers all the information needed to define a complete
- *          edition operation
- ******************************************************************************
-*/
-typedef struct
-{
-    /**< Output video size */
-    M4VIDEOEDITING_VideoFrameSize             outputVideoSize;
-    /**< Output video format (MPEG4 / H263) */
-    M4VIDEOEDITING_VideoFormat                outputVideoFormat;
-    /**< Output audio format (AAC, AMRNB ...) */
-    M4VIDEOEDITING_AudioFormat                outputAudioFormat;
-    /**< Output audio sampling freq (8000Hz,...) */
-    M4VIDEOEDITING_AudioSamplingFrequency     outputAudioSamplFreq;
-    /**< Maximum output file size in BYTES (if set to 0, no limit */
-    M4OSA_UInt32                              outputFileSize;
-    /**< Is output audio must be Mono ? Valid only for AAC */
-    M4OSA_Bool                                bAudioMono;
-    /**< Output video bitrate*/
-    M4OSA_UInt32                              outputVideoBitrate;
-    /**< Output audio bitrate*/
-    M4OSA_UInt32                              outputAudioBitrate;
-    /**< Background music track settings */
-    M4xVSS_BGMSettings                        *pBGMtrack;
-    /**< Function pointer on text rendering engine, if not used, must be set to NULL !! */
-    M4xVSS_getTextRgbBufferFct                pTextRenderingFct;
-    /** output video profile and level*/
-    M4OSA_Int32   outputVideoProfile;
-    M4OSA_Int32   outputVideoLevel;
-
-} M4xVSS_EditSettings;
-
-#endif /* __M4VSS3GPP_EXTENDED_API_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_InternalConfig.h b/libvideoeditor/vss/inc/M4VSS3GPP_InternalConfig.h
deleted file mode 100755
index 2669feb..0000000
--- a/libvideoeditor/vss/inc/M4VSS3GPP_InternalConfig.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __M4VSS3GPP_INTERNALCONFIG_H__
-#define __M4VSS3GPP_INTERNALCONFIG_H__
-
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_InternalConfig.h
- * @brief    This file contains some magical and configuration parameters.
- ******************************************************************************
-*/
-
-/***********************/
-/* VideoEdition config */
-/***********************/
-
-#define M4VSS3GPP_MINIMAL_TRANSITION_DURATION            100    /**< 100 milliseconds */
-#define M4VSS3GPP_NB_AU_PREFETCH                        4        /**< prefect 4 AUs */
-#define M4VSS3GPP_NO_STSS_JUMP_POINT                    40000 /**< If 3gp file does not contain
-                                                                   an STSS table (no rap frames),
-                                                                   jump backward 40 s maximum */
-
-/*****************/
-/* Writer config */
-/*****************/
-
-#define M4VSS3GPP_WRITER_AUDIO_STREAM_ID                1
-#define M4VSS3GPP_WRITER_VIDEO_STREAM_ID                2
-
-/**< Max AU size will be 0.8 times the YUV4:2:0 frame size */
-#define M4VSS3GPP_VIDEO_MIN_COMPRESSION_RATIO            0.9F
-/**< Max chunk size will be 1.2 times the max AU size */
-#define M4VSS3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO    1.2F
-
-/** READ CAREFULLY IN CASE OF REPORTED RUNNING TROUBLES
-The max AU size is used to pre-set max size of AU that can be written in the 3GP writer
-For audio standard with variable AU size, there could be some encoding settings leading to AU size
-exceeding this limit.
-For AAC streams for instance the average AU size is given by:
-av AU size = (av bitrate * 1024)/(sampling freq)
-If VSS returns the message:
->> ERROR: audio AU size (XXXX) to copy larger than allocated one (YYYY) => abort
->> PLEASE CONTACT SUPPORT TO EXTEND MAX AU SIZE IN THE PRODUCT LIBRARY
-Error is most likely to happen when mixing with audio full replacement
- */
-/**< AAC max AU size - READ EXPLANATION ABOVE */
-#define M4VSS3GPP_AUDIO_MAX_AU_SIZE                        2048
-/**< set to x4 max AU size per chunk */
-#define M4VSS3GPP_AUDIO_MAX_CHUNCK_SIZE                    8192
-
-
-/***********************/
-/* H263 / MPEG4 config */
-/***********************/
-
-#define    M4VSS3GPP_EDIT_H263_MODULO_TIME            255
-
-#ifdef BIG_ENDIAN
-/**< 0xb3 01 00 00 Little endian / b00 00 00 01 b3 big endian*/
-#define    M4VSS3GPP_EDIT_GOV_HEADER            0x000001b3
-#else
-/**< 0xb3 01 00 00 Little endian / b00 00 00 01 b3 big endian*/
-#define    M4VSS3GPP_EDIT_GOV_HEADER            0xb3010000
-#endif
-
-/**************/
-/* AMR config */
-/**************/
-
-#define M4VSS3GPP_WRITTEN_AMR_TRACK_TIME_SCALE            8000
-#define M4VSS3GPP_AMR_DECODED_PCM_SAMPLE_NUMBER            160        /**< 20ms at 8000hz -->
-                                                                     20x8=160 samples */
-#define M4VSS3GPP_AMR_DEFAULT_BITRATE                   12200   /**< 12.2 kbps */
-
-/**************/
-/* EVRC config */
-/**************/
-
-#define M4VSS3GPP_EVRC_DEFAULT_BITRATE                  9200   /**< 9.2 kbps */
-
-/**************/
-/* MP3 config */
-/**************/
-
-/** Macro to make a jump on the MP3 track on several steps
-    To avoid to block the system with an long MP3 jump, this process
-    is divided on several steps.
- */
-#define M4VSS3GPP_MP3_JUMPED_AU_NUMBER_MAX 100
-
-/** Macro to define the number of read AU to analyse the bitrate
-    So the process will read the first n AU of the MP3 stream to get
-    the average bitrate. n is defined by this define.
- */
-#define M4VSS3GPP_MP3_AU_NUMBER_MAX 500
-
-/*****************************/
-/* define AMR silence frames */
-/*****************************/
-
-#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE     13
-#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION 160
-
-#ifdef M4VSS3GPP_SILENCE_FRAMES
-const M4OSA_UInt8 M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
-{
-    0x04, 0xFF, 0x18, 0xC7, 0xF0, 0x0D, 0x04, 0x33,
-    0xFF, 0xE0, 0x00, 0x00, 0x00
-};
-#else
-extern const M4OSA_UInt8 \
-              M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE];
-#endif
-
-/*****************************/
-/* define AAC silence frames */
-/*****************************/
-
-#define M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE      4
-
-#ifdef M4VSS3GPP_SILENCE_FRAMES
-const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_MONO[M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE] =
-{
-    0x00, 0xC8, 0x20, 0x07
-};
-#else
-extern const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_MONO[M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE];
-#endif
-
-#define M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE        6
-
-#ifdef M4VSS3GPP_SILENCE_FRAMES
-const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_STEREO[M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE] =
-{
-    0x21, 0x10, 0x03, 0x20, 0x54, 0x1C
-};
-#else
-extern const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_STEREO[M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE];
-#endif
-
-#endif /* __M4VSS3GPP_INTERNALCONFIG_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_InternalFunctions.h b/libvideoeditor/vss/inc/M4VSS3GPP_InternalFunctions.h
deleted file mode 100755
index e855882..0000000
--- a/libvideoeditor/vss/inc/M4VSS3GPP_InternalFunctions.h
+++ /dev/null
@@ -1,651 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_InternalFunctions.h
- * @brief    This file contains all function prototypes not visible to the external world.
- * @note
- ******************************************************************************
-*/
-
-
-#ifndef __M4VSS3GPP_INTERNALFUNCTIONS_H__
-#define __M4VSS3GPP_INTERNALFUNCTIONS_H__
-
-#include "NXPSW_CompilerSwitches.h"
-/**
- *    VSS public API and types */
-#include "M4VSS3GPP_API.h"
-
-/**
- *    VSS private types */
-#include "M4VSS3GPP_InternalTypes.h"
-
-
-#include "M4READER_Common.h" /**< for M4_AccessUnit definition */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* All errors are fatal in the VSS */
-#define M4ERR_CHECK_RETURN(err) if(M4NO_ERROR!=err) return err;
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intEditStepVideo()
- * @brief    One step of video processing
- * @param   pC    (IN/OUT) Internal edit context
-  ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intEditStepVideo(M4VSS3GPP_InternalEditContext *pC);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intEditStepAudio()
- * @brief    One step of audio processing
- * @param   pC    (IN/OUT) Internal edit context
-  ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intEditStepAudio(M4VSS3GPP_InternalEditContext *pC);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intEditStepMP3()
- * @brief    One step of audio processing for the MP3 clip
- * @param   pC    (IN/OUT) Internal edit context
-  ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intEditStepMP3(M4VSS3GPP_InternalEditContext *pC);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intOpenClip()
- * @brief    Open next clip
- * @param   pC            (IN/OUT) Internal edit context
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intOpenClip(M4VSS3GPP_InternalEditContext *pC, M4VSS3GPP_ClipContext **hClip,
-                                 M4VSS3GPP_ClipSettings *pClipSettings);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder()
- * @brief    Destroy the video encoder
- * @note
-  ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder(M4VSS3GPP_InternalEditContext *pC);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder()
- * @brief    Creates the video encoder
- * @note
-  ******************************************************************************
-*/
-M4OSA_ERR  M4VSS3GPP_intCreateVideoEncoder(M4VSS3GPP_InternalEditContext *pC);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo()
- * @brief    Do what to do when the end of a clip video track is reached
- * @note    If there is audio on the current clip, process it, else switch to the next clip
- * @param   pC            (IN/OUT) Internal edit context
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo(M4VSS3GPP_InternalEditContext *pC);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio()
- * @brief    Do what to do when the end of a clip audio track is reached
- * @param   pC            (IN/OUT) Internal edit context
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio(M4VSS3GPP_InternalEditContext *pC);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing()
- * @brief    Check if the clip is compatible with VSS editing
- * @note
- * @param   pClipCtxt            (IN) internal clip context
- * @param    pClipProperties     (OUT) Pointer to a valid ClipProperties structure.
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing(M4VIDEOEDITING_ClipProperties \
-                                                            *pClipProperties);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipOpen()
- * @brief    Open a clip. Creates a clip context.
- * @note
- * @param   hClipCtxt            (OUT) Return the internal clip context
- * @param   pClipSettings        (IN) Edit settings of this clip. The module will keep a
- *                                        reference to this pointer
- * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
- * @param    bSkipAudioTrack        (IN) If true, do not open the audio
- * @param    bFastOpenMode        (IN) If true, use the fast mode of the 3gpp reader
- *                                            (only the first AU is read)
- * @return    M4NO_ERROR:                No error
- * @return    M4ERR_ALLOC:            There is no more available memory
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intClipInit (
-    M4VSS3GPP_ClipContext **hClipCtxt,
-    M4OSA_FileReadPointer *pFileReadPtrFct
-);
-
-M4OSA_ERR M4VSS3GPP_intClipOpen (
-    M4VSS3GPP_ClipContext *pClipCtxt,
-    M4VSS3GPP_ClipSettings *pClipSettings,
-    M4OSA_Bool bSkipAudioTrack,
-    M4OSA_Bool bFastOpenMode,
-    M4OSA_Bool bAvoidOpeningVideoDec
-);
-
-
-/**
- ******************************************************************************
- * M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack()
- * @brief    Delete the audio track. Clip will be like if it had no audio track
- * @note
- * @param   pClipCtxt            (IN) Internal clip context
- ******************************************************************************
-*/
-M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack(M4VSS3GPP_ClipContext *pClipCtxt);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCurrentTime()
- * @brief    Jump to the previous RAP and decode up to the current video time
- * @param   pClipCtxt    (IN) Internal clip context
- * @param   iCts        (IN) Target CTS
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCts(M4VSS3GPP_ClipContext* pClipCtxt, M4OSA_Int32 iCts);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame()
- * @brief    Read one AU frame in the clip
- * @note
- * @param   pClipCtxt            (IN) Internal clip context
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame(M4VSS3GPP_ClipContext *pClipCtxt);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame()
- * @brief    Decode the current AUDIO frame.
- * @note
- * @param   pClipCtxt        (IN) internal clip context
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame(M4VSS3GPP_ClipContext *pClipCtxt);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt()
- * @brief    Jump in the audio track of the clip.
- * @note
- * @param   pClipCtxt            (IN) internal clip context
- * @param   pJumpCts            (IN/OUT) in:target CTS, out: reached CTS
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt(M4VSS3GPP_ClipContext *pClipCtxt, M4OSA_Int32 *pJumpCts);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipClose()
- * @brief    Close a clip. Destroy the context.
- * @note
- * @param   pClipCtxt            (IN) Internal clip context
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intClipClose(M4VSS3GPP_ClipContext *pClipCtxt);
-
-M4OSA_ERR M4VSS3GPP_intClipCleanUp(M4VSS3GPP_ClipContext *pClipCtxt);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intEditJumpMP3()
- * @brief    One step of jumping processing for the MP3 clip.
- * @note    On one step, the jump of several AU is done
- * @param   pC    (IN/OUT) Internal edit context
-  ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intEditJumpMP3(M4VSS3GPP_InternalEditContext *pC);
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerWriter()
- * @brief    This function will register a specific file format writer.
- * @note    According to the Mediatype, this function will store in the internal context
- *             the writer context.
- * @param    pContext:    (IN) Execution context.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER    pContext,pWtrGlobalInterface or pWtrDataInterface is
- *                                 M4OSA_NULL (debug only), or invalid MediaType
- ******************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_registerWriter(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                     M4WRITER_OutputFileType MediaType,
-                                     M4WRITER_GlobalInterface* pWtrGlobalInterface,
-                                     M4WRITER_DataInterface* pWtrDataInterface);
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerEncoder()
- * @brief    This function will register a specific video encoder.
- * @note    According to the Mediatype, this function will store in the internal context
- *            the encoder context.
- * @param    pContext:    (IN) Execution context.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER    pContext or pEncGlobalInterface is M4OSA_NULL (debug only),
- *                                 or invalid MediaType
- ******************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_registerVideoEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                           M4ENCODER_Format MediaType,
-                                           M4ENCODER_GlobalInterface *pEncGlobalInterface);
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerAudioEncoder()
- * @brief    This function will register a specific audio encoder.
- * @note    According to the Mediatype, this function will store in the internal context
- *             the encoder context.
- * @param    pContext:                (IN) Execution context.
- * @param    mediaType:                (IN) The media type.
- * @param    pEncGlobalInterface:    (OUT) the encoder interface functions.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext or pEncGlobalInterface is M4OSA_NULL (debug only)
- ******************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_registerAudioEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                             M4ENCODER_AudioFormat MediaType,
-                                             M4ENCODER_AudioGlobalInterface *pEncGlobalInterface);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerReader()
- * @brief    Register reader.
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_registerReader(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                     M4READER_MediaType mediaType,
-                                     M4READER_GlobalInterface *pRdrGlobalInterface,
-                                     M4READER_DataInterface *pRdrDataInterface);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerVideoDecoder()
- * @brief    Register video decoder
- * @param    pContext                (IN/OUT) VSS context.
- * @param    decoderType            (IN) Decoder type
- * @param    pDecoderInterface    (IN) Decoder interface.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only), or the decoder type
- *                                    is invalid
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_registerVideoDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                            M4DECODER_VideoType decoderType,
-                                            M4DECODER_VideoInterface *pDecoderInterface);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerAudioDecoder()
- * @brief    Register audio decoder
- * @note    This function is used internaly by the VSS to register audio decoders,
- * @param    context                (IN/OUT) VSS context.
- * @param    decoderType            (IN) Audio decoder type
- * @param    pDecoderInterface    (IN) Audio decoder interface.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null, or the decoder type is invalid
- *                                 (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_registerAudioDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                           M4AD_Type decoderType,
-                                           M4AD_Interface *pDecoderInterface);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_unRegisterAllWriters()
- * @brief    Unregister writer
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_unRegisterAllWriters(M4VSS3GPP_MediaAndCodecCtxt *pC);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_unRegisterAllEncoders()
- * @brief    Unregister the encoders
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_unRegisterAllEncoders(M4VSS3GPP_MediaAndCodecCtxt *pC);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_unRegisterAllReaders()
- * @brief    Unregister reader
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_unRegisterAllReaders(M4VSS3GPP_MediaAndCodecCtxt *pC);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_unRegisterAllDecoders()
- * @brief    Unregister the decoders
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_unRegisterAllDecoders(M4VSS3GPP_MediaAndCodecCtxt *pC);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentWriter()
- * @brief    Set current writer
- * @param    pContext            (IN/OUT) VSS context.
- * @param    mediaType            (IN) Media type.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_setCurrentWriter(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                        M4VIDEOEDITING_FileType mediaType);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentVideoEncoder()
- * @brief    Set a video encoder
- * @param    pContext            (IN/OUT) VSS context.
- * @param    MediaType           (IN) Encoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_setCurrentVideoEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                                M4SYS_StreamType mediaType);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentAudioEncoder()
- * @brief    Set an audio encoder
- * @param    context            (IN/OUT) VSS context.
- * @param    MediaType        (IN) Encoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_setCurrentAudioEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                             M4SYS_StreamType mediaType);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentReader()
- * @brief    Set current reader
- * @param    pContext            (IN/OUT) VSS context.
- * @param    mediaType            (IN) Media type.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_setCurrentReader(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                         M4VIDEOEDITING_FileType mediaType);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentVideoDecoder()
- * @brief    Set a video decoder
- * @param    pContext            (IN/OUT) VSS context.
- * @param    decoderType        (IN) Decoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_setCurrentVideoDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                             M4_StreamType mediaType);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentAudioDecoder()
- * @brief    Set an audio decoder
- * @param    context            (IN/OUT) VSS context.
- * @param    decoderType        (IN) Decoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_setCurrentAudioDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                             M4_StreamType mediaType);
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_clearInterfaceTables()
- * @brief    Clear encoders, decoders, reader and writers interfaces tables
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    The context is null
- ************************************************************************
-*/
-M4OSA_ERR   M4VSS3GPP_clearInterfaceTables(M4VSS3GPP_MediaAndCodecCtxt *pC);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_SubscribeMediaAndCodec()
- * @brief    This function registers the reader, decoders, writers and encoders
- *          in the VSS.
- * @note
- * @param    pContext:    (IN) Execution context.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER    pContext is NULL
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_subscribeMediaAndCodec(M4VSS3GPP_MediaAndCodecCtxt *pContext);
-
-/**
- ******************************************************************************
- * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB()
- * @brief   Return the length, in bytes, of the AMR Narrow-Band frame contained in the given buffer
- * @note
- * @param   pAudioFrame   (IN) AMRNB frame
- * @return  M4NO_ERROR: No error
- ******************************************************************************
-*/
-M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB(M4OSA_MemAddr8 pAudioFrame);
-
-/**
- ******************************************************************************
- * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC()
- * @brief   Return the length, in bytes, of the EVRC frame contained in the given buffer
- * @note
- *     0 1 2 3
- *    +-+-+-+-+
- *    |fr type|              RFC 3558
- *    +-+-+-+-+
- *
- * Frame Type: 4 bits
- *    The frame type indicates the type of the corresponding codec data
- *    frame in the RTP packet.
- *
- * For EVRC and SMV codecs, the frame type values and size of the
- * associated codec data frame are described in the table below:
- *
- * Value   Rate      Total codec data frame size (in octets)
- * ---------------------------------------------------------
- *   0     Blank      0    (0 bit)
- *   1     1/8        2    (16 bits)
- *   2     1/4        5    (40 bits; not valid for EVRC)
- *   3     1/2       10    (80 bits)
- *   4     1         22    (171 bits; 5 padded at end with zeros)
- *   5     Erasure    0    (SHOULD NOT be transmitted by sender)
- *
- * @param   pCpAudioFrame   (IN) EVRC frame
- * @return  M4NO_ERROR: No error
- ******************************************************************************
-*/
-M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC(M4OSA_MemAddr8 pAudioFrame);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intBuildAnalysis()
- * @brief    Get video and audio properties from the clip streams
- * @note    This function must return fatal errors only (errors that should not happen in the
- *             final integrated product).
- * @param   pClipCtxt            (IN) internal clip context
- * @param    pClipProperties        (OUT) Pointer to a valid ClipProperties structure.
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intBuildAnalysis(M4VSS3GPP_ClipContext *pClipCtxt,
-                                     M4VIDEOEDITING_ClipProperties *pClipProperties);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder()
- * @brief    Reset the audio encoder (Create it if needed)
- * @note
-  ******************************************************************************
-*/
-M4OSA_ERR  M4VSS3GPP_intCreateAudioEncoder(M4VSS3GPP_EncodeWriteContext *pC_ewc,
-                                             M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
-                                             M4OSA_UInt32 uiAudioBitrate);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCreate3GPPOutputFile()
- * @brief    Creates and prepare the output MP3 file
- * @note    Creates the writer, Creates the output file, Adds the streams, Readies the
- *            writing process
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intCreate3GPPOutputFile(M4VSS3GPP_EncodeWriteContext *pC_ewc,
-                                            M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
-                                            M4OSA_FileWriterPointer *pOsaFileWritPtr,
-                                            M4OSA_Void* pOutputFile,
-                                            M4OSA_FileReadPointer *pOsaFileReadPtr,
-                                            M4OSA_Void* pTempFile,
-                                            M4OSA_UInt32 maxOutputFileSize);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingCompatibility()
- * @brief    This function allows checking if two clips are compatible with each other for
- *             VSS 3GPP audio mixing feature.
- * @note
- * @param    pC                            (IN) Context of the audio mixer
- * @param    pInputClipProperties        (IN) Clip analysis of the first clip
- * @param    pAddedClipProperties        (IN) Clip analysis of the second clip
- * @return    M4NO_ERROR:            No error
- * @return    M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
- * @return  M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP
- * @return  M4NO_ERROR
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_intAudioMixingCompatibility(M4VSS3GPP_InternalAudioMixingContext *pC,
-                                                 M4VIDEOEDITING_ClipProperties \
-                                                 *pInputClipProperties,
-                                                 M4VIDEOEDITING_ClipProperties  \
-                                                 *pAddedClipProperties);
-
-/**
- ******************************************************************************
- * M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack()
- * @brief    Delete the audio track. Clip will be like if it had no audio track
- * @note
- * @param   pClipCtxt            (IN) Internal clip context
- ******************************************************************************
-*/
-M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack(M4VSS3GPP_ClipContext *pClipCtxt);
-
-/******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intStartAU()
- * @brief    StartAU writer-like interface used for the VSS 3GPP only
- * @note
- * @param    pContext: (IN) It is the VSS 3GPP context in our case
- * @param    streamID: (IN) Id of the stream to which the Access Unit is related.
- * @param    pAU:      (IN/OUT) Access Unit to be prepared.
- * @return    M4NO_ERROR: there is no error
- ******************************************************************************
-*/
-M4OSA_ERR  M4VSS3GPP_intStartAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,
-                                 M4SYS_AccessUnit* pAU);
-
-/******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intProcessAU()
- * @brief    ProcessAU writer-like interface used for the VSS 3GPP only
- * @note
- * @param    pContext: (IN) It is the VSS 3GPP context in our case
- * @param    streamID: (IN) Id of the stream to which the Access Unit is related.
- * @param    pAU:      (IN/OUT) Access Unit to be written
- * @return    M4NO_ERROR: there is no error
- ******************************************************************************
-*/
-M4OSA_ERR  M4VSS3GPP_intProcessAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,
-                                     M4SYS_AccessUnit* pAU);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intVPP()
- * @brief    We implement our own VideoPreProcessing function
- * @note    It is called by the video encoder
- * @param    pContext    (IN) VPP context, which actually is the VSS 3GPP context in our case
- * @param    pPlaneIn    (IN)
- * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the
- *                             output YUV420 image
- * @return    M4NO_ERROR:    No error
- ******************************************************************************
-*/
-M4OSA_ERR  M4VSS3GPP_intVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
-                             M4VIFI_ImagePlane* pPlaneOut);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __M4VSS3GPP_INTERNALFUNCTIONS_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_InternalTypes.h b/libvideoeditor/vss/inc/M4VSS3GPP_InternalTypes.h
deleted file mode 100755
index a7900f0..0000000
--- a/libvideoeditor/vss/inc/M4VSS3GPP_InternalTypes.h
+++ /dev/null
@@ -1,781 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_InternalTypes.h
- * @brief    This file contains all enum and types not visible to the external world.
- * @note
- ******************************************************************************
-*/
-
-
-#ifndef __M4VSS3GPP_INTERNALTYPES_H__
-#define __M4VSS3GPP_INTERNALTYPES_H__
-
-#define M4VSS_VERSION_MAJOR        3
-#define M4VSS_VERSION_MINOR        2
-#define M4VSS_VERSION_REVISION    5
-
-#include "NXPSW_CompilerSwitches.h"
-
-/**
- *    VSS public API and types */
-#include "M4VSS3GPP_API.h"
-
-/**
- *    Internally used modules */
-#include "M4READER_Common.h"        /**< Reader common interface */
-#include "M4WRITER_common.h"        /**< Writer common interface */
-#include "M4DECODER_Common.h"        /**< Decoder common interface */
-#include "M4ENCODER_common.h"        /**< Encoder common interface */
-#include "M4VIFI_FiltersAPI.h"        /**< Image planes definition */
-#include "M4READER_3gpCom.h"        /**< Read 3GPP file     */
-#include "M4AD_Common.h"            /**< Decoder audio   */
-#include "M4ENCODER_AudioCommon.h"  /**< Encode audio    */
-
-
-#include "SSRC.h"                    /**< SSRC             */
-#include "From2iToMono_16.h"        /**< Stereo to Mono     */
-#include "MonoTo2I_16.h"            /**< Mono to Stereo     */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define WINDOW_SIZE 10
-/**
- ******************************************************************************
- * enum            M4VSS3GPP_EditState
- * @brief        Main state machine of the VSS 3GPP edit operation.
- ******************************************************************************
-*/
-
-typedef enum
-{
-    M4VSS3GPP_kEditState_CREATED    = 0,    /**< M4VSS3GPP_editInit has been called */
-    M4VSS3GPP_kEditState_VIDEO        = 1,    /**< Processing video track */
-    M4VSS3GPP_kEditState_AUDIO        = 2,    /**< Processing audio track */
-    M4VSS3GPP_kEditState_MP3        = 3,    /**< Processing MP3 audio track */
-    M4VSS3GPP_kEditState_MP3_JUMP   = 4,        /**< Processing a jump in a MP3 audio track */
-    M4VSS3GPP_kEditState_FINISHED    = 5,    /**< Processing done, VSS 3GPP can be closed */
-    M4VSS3GPP_kEditState_CLOSED        = 6        /**< Output file has been closed,
-                                                     VSS 3GPP can be destroyed */
-}
-M4VSS3GPP_EditState;
-
-typedef enum
-{
-    /**< Doing Read/Write operation. This operation will have no processing
-     * on input frames. Only time stamp manipulations in output file. */
-    M4VSS3GPP_kEditVideoState_READ_WRITE    = 10,
-    /**< Decode encode to create an I frame. This is done for a single frame
-     * to create a new reference frame. */
-    M4VSS3GPP_kEditVideoState_BEGIN_CUT     = 11,
-    /**< Doing Read->Decode->Filter->Encode->Write operation on the input file
-     * to create the output file. */
-    M4VSS3GPP_kEditVideoState_DECODE_ENCODE = 12,
-    /**< Applied when Transition is active and blending of two videos is
-     * required. */
-    M4VSS3GPP_kEditVideoState_TRANSITION    = 13,
-    /**< Special Read/Write mode used after BEGIN_CUT state. The frame
-     * is already coded as I frame in BEGIN_CUT state; so skip it. */
-    M4VSS3GPP_kEditVideoState_AFTER_CUT     = 14
-}
-M4VSS3GPP_EditVideoState;
-
-typedef enum
-{
-    M4VSS3GPP_kEditAudioState_READ_WRITE    = 20,    /**< Doing Read/Write operation
-                                                        (no decoding/encoding) */
-    M4VSS3GPP_kEditAudioState_DECODE_ENCODE = 21,    /**< Doing Read-Decode/Filter/
-                                                            Encode-Write operation */
-    M4VSS3GPP_kEditAudioState_TRANSITION    = 22    /**< Transition; blending of two audio */
-}
-M4VSS3GPP_EditAudioState;
-
-
-/**
- ******************************************************************************
- * enum            M4VSS3GPP_ClipStatus
- * @brief        Status of the clip.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4VSS3GPP_kClipStatus_READ            = 0,    /**< The clip is currently ready for reading */
-    M4VSS3GPP_kClipStatus_DECODE        = 1,    /**< The clip is currently ready for decoding */
-    M4VSS3GPP_kClipStatus_DECODE_UP_TO    = 2        /**< The clip is currently in splitted
-                                                         decodeUpTo() processing */
-}
-M4VSS3GPP_ClipStatus;
-
-
-/**
- ******************************************************************************
- * enum            M4VSS3GPP_ClipCurrentEffect
- * @brief        Current effect applied to the clip.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4VSS3GPP_kClipCurrentEffect_NONE    = 0,    /**< None */
-    M4VSS3GPP_kClipCurrentEffect_BEGIN    = 1,    /**< Begin effect currently applied */
-    M4VSS3GPP_kClipCurrentEffect_END    = 2        /**< End effect currently applied */
-}
-M4VSS3GPP_ClipCurrentEffect;
-
-
-/**
- ******************************************************************************
- * enum            M4VSS3GPP_AudioMixingState
- * @brief        Main state machine of the VSS audio mixing operation.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4VSS3GPP_kAudioMixingState_VIDEO = 0,            /**< Video is being processed */
-    M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT,  /**< Audio is being processed */
-    M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT, /**< Audio is being processed */
-    M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT,  /**< Audio is being processed */
-    M4VSS3GPP_kAudioMixingState_FINISHED              /**< Processing finished, user must now
-                                                            call M4VSS3GPP_audioMixingCleanUp*/
-}
-M4VSS3GPP_AudioMixingState;
-
-
-/**
- ******************************************************************************
- * enum            M4VSS3GPP_ExtractPictureState
- * @brief        Main state machine of the VSS picture extraction.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4VSS3GPP_kExtractPictureState_OPENED   = 0,  /**< Video clip is opened and ready to be read
-                                                     until the RAP before the picture to extract */
-    M4VSS3GPP_kExtractPictureState_PROCESS    = 1,  /**< Video is decoded from the previous RAP
-                                                        to the picture to extract */
-    M4VSS3GPP_kExtractPictureState_EXTRACTED= 2   /**< Video AU has been  decoded, user must now
-                                                        call M4VSS3GPP_extractPictureCleanUp */
-}
-M4VSS3GPP_ExtractPictureState;
-
-
-/**
- ******************************************************************************
- * @brief        Codecs registration same as in VPS and VES, so less mapping
- *              is required toward VSS api types
- ******************************************************************************
-*/
-typedef struct
-{
-    M4WRITER_GlobalInterface*    pGlobalFcts;    /**< open, close, setoption,etc... functions */
-    M4WRITER_DataInterface*        pDataFcts;        /**< data manipulation functions */
-} M4VSS3GPP_WriterInterface;
-/**
- ******************************************************************************
- * struct AAC_DEC_STREAM_PROPS
- * @brief AAC Stream properties
- * @Note aNoChan and aSampFreq are used for parsing even the user parameters
- *        are different.  User parameters will be input for the output behaviour
- *        of the decoder whereas for parsing bitstream properties are used.
- ******************************************************************************
- */
-typedef struct {
-  M4OSA_Int32 aAudioObjectType;     /**< Audio object type of the stream - in fact
-                                         the type found in the Access Unit parsed */
-  M4OSA_Int32 aNumChan;             /**< number of channels (=1(mono) or =2(stereo))
-                                         as indicated by input bitstream*/
-  M4OSA_Int32 aSampFreq;            /**< sampling frequency in Hz */
-  M4OSA_Int32 aExtensionSampFreq;   /**< extended sampling frequency in Hz, = 0 is
-                                         no extended frequency */
-  M4OSA_Int32 aSBRPresent;          /**< presence=1/absence=0 of SBR */
-  M4OSA_Int32 aPSPresent;           /**< presence=1/absence=0 of PS */
-  M4OSA_Int32 aMaxPCMSamplesPerCh;  /**< max number of PCM samples per channel */
-} AAC_DEC_STREAM_PROPS;
-
-
-/**
- ******************************************************************************
- * enum            M4VSS3GPP_MediaAndCodecCtxt
- * @brief        Filesystem and codec registration function pointers
- ******************************************************************************
-*/
-typedef struct {
-    /**
-      * Media and Codec registration */
-    /**< Table of M4VES_WriterInterface structures for avalaible Writers list */
-    M4VSS3GPP_WriterInterface    WriterInterface[M4WRITER_kType_NB];
-    /**< open, close, setoption,etc... functions of the used writer*/
-    M4WRITER_GlobalInterface*    pWriterGlobalFcts;
-    /**< data manipulation functions of the used writer */
-    M4WRITER_DataInterface*        pWriterDataFcts;
-
-    /**< Table of M4ENCODER_GlobalInterface structures for avalaible encoders list */
-    M4ENCODER_GlobalInterface*    pVideoEncoderInterface[M4ENCODER_kVideo_NB];
-    /**< Functions of the used encoder */
-    M4ENCODER_GlobalInterface*    pVideoEncoderGlobalFcts;
-
-    M4OSA_Void*                    pVideoEncoderExternalAPITable[M4ENCODER_kVideo_NB];
-    M4OSA_Void*                    pCurrentVideoEncoderExternalAPI;
-    M4OSA_Void*                    pVideoEncoderUserDataTable[M4ENCODER_kVideo_NB];
-    M4OSA_Void*                    pCurrentVideoEncoderUserData;
-
-    /**< Table of M4ENCODER_AudioGlobalInterface structures for avalaible encoders list */
-    M4ENCODER_AudioGlobalInterface*    pAudioEncoderInterface[M4ENCODER_kAudio_NB];
-    /**< Table of internal/external flags for avalaible encoders list */
-    M4OSA_Bool                      pAudioEncoderFlag[M4ENCODER_kAudio_NB];
-    /**< Functions of the used encoder */
-    M4ENCODER_AudioGlobalInterface*    pAudioEncoderGlobalFcts;
-
-    M4READER_GlobalInterface*   m_pReaderGlobalItTable[M4READER_kMediaType_NB];
-    M4READER_DataInterface*     m_pReaderDataItTable[M4READER_kMediaType_NB];
-    M4READER_GlobalInterface*   m_pReader;
-    M4READER_DataInterface*     m_pReaderDataIt;
-    M4OSA_UInt8                 m_uiNbRegisteredReaders;
-
-    M4DECODER_VideoInterface*   m_pVideoDecoder;
-    M4DECODER_VideoInterface*   m_pVideoDecoderItTable[M4DECODER_kVideoType_NB];
-    M4OSA_UInt8                 m_uiNbRegisteredVideoDec;
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-    M4OSA_Void*                    m_pCurrentVideoDecoderUserData;
-    M4OSA_Void*                    m_pVideoDecoderUserDataTable[M4DECODER_kVideoType_NB];
-#endif
-
-    M4AD_Interface*             m_pAudioDecoder;
-    M4AD_Interface*                m_pAudioDecoderItTable[M4AD_kType_NB];
-    /**< store indices of external decoders */
-    M4OSA_Bool                    m_pAudioDecoderFlagTable[M4AD_kType_NB];
-
-    M4OSA_Void*                pAudioEncoderUserDataTable[M4ENCODER_kAudio_NB];
-    M4OSA_Void*                pCurrentAudioEncoderUserData;
-
-    M4OSA_Void*                pAudioDecoderUserDataTable[M4AD_kType_NB];
-    M4OSA_Void*                pCurrentAudioDecoderUserData;
-
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-    /* boolean to tell whether registered external OMX codecs should be freed during cleanup
-     or new codec registration*/
-    M4OSA_Bool    bAllowFreeingOMXCodecInterface;
-#endif
-
-
-} M4VSS3GPP_MediaAndCodecCtxt;
-
-
-/**
- ******************************************************************************
- * structure    M4VSS3GPP_ClipContext
- * @brief        This structure contains informations related to one 3GPP clip (private)
- * @note        This structure is used to store the context related to one clip
- ******************************************************************************
-*/
-typedef struct
-{
-    M4VSS3GPP_ClipSettings*        pSettings;            /**< Pointer to the clip settings
-                                                            (not possessed) */
-
-    M4VSS3GPP_ClipStatus        Vstatus;            /**< Video status of the clip reading */
-    M4VSS3GPP_ClipStatus        Astatus;            /**< Audio status of the clip reading */
-
-    M4OSA_Int32                    iVoffset;            /**< [Milliseconds] Offset between the
-                                                            clip and the output video stream
-                                                            (begin cut taken into account) */
-    M4OSA_Int32                    iAoffset;           /**< [Timescale] Offset between the clip
-                                                            and the output audio stream (begin
-                                                            cut taken into account) */
-
-    /**
-     * 3GPP reader Stuff */
-    M4OSA_FileReadPointer*        pFileReadPtrFct;
-    M4OSA_Context                pReaderContext;         /**< Context of the 3GPP reader module */
-    M4_VideoStreamHandler*        pVideoStream;        /**< Description of the read video stream */
-    M4_AudioStreamHandler*        pAudioStream;        /**< Description of the read audio stream */
-    M4_AccessUnit                VideoAU;            /**< Read video access unit (we do not use a
-                                                            pointer to allocate later, because
-                                                            most of the time we will need it) */
-    M4_AccessUnit                AudioAU;            /**< Read audio access unit (we do not use a
-                                                         pointer to allocate later, because most
-                                                         of the time we will need it) */
-    M4OSA_Bool                    bVideoAuAvailable;    /**< Tell if a video AU is available
-                                                            (previously read) */
-    /**< Boolean only used to fix the BZZ bug... */
-    M4OSA_Bool                    bFirstAuWritten;
-
-    /**
-     * Video decoder stuff */
-    M4OSA_Context                pViDecCtxt;            /**< Video decoder context */
-    M4OSA_Int32                 iVideoDecCts;       /**< [Milliseconds] For video decodeUpTo(),
-                                                             the actual reached cts */
-    M4OSA_Int32                    iVideoRenderCts;    /**< [Milliseconds] For video render(),
-                                                             the actual reached cts */
-    M4OSA_Bool                    isRenderDup;        /**< To handle duplicate frame rendering in
-                                                             case of external decoding */
-    M4VIFI_ImagePlane*            lastDecodedPlane;    /**< Last decoded plane */
-
-    /**
-     * MPEG4 time info stuff at clip level */
-    M4OSA_Bool             bMpeg4GovState;            /**< Namely, update or initialization */
-    M4OSA_UInt32           uiMpeg4PrevGovValueGet;    /**< Previous Gov value read (in second) */
-    M4OSA_UInt32           uiMpeg4PrevGovValueSet;    /**< Previous Gov value write (in second) */
-
-    /**
-     * Time-line stuff */
-     /**< [Milliseconds] CTS at which the video clip actually starts */
-    M4OSA_Int32                    iActualVideoBeginCut;
-    /**< [Milliseconds] CTS at which the audio clip actually starts */
-    M4OSA_Int32                    iActualAudioBeginCut;
-    /**< [Milliseconds] Time at which the clip must end */
-    M4OSA_Int32                    iEndTime;
-
-    /**
-     * Audio decoder stuff */
-    M4OSA_Context                pAudioDecCtxt;        /**< Context of the AMR decoder */
-    M4AD_Buffer                 AudioDecBufferIn;    /**< Input structure for the audio decoder */
-    M4AD_Buffer                    AudioDecBufferOut;    /**< Buffer for the decoded PCM data */
-    AAC_DEC_STREAM_PROPS        AacProperties;      /**< Structure for new api to get AAC
-                                                            properties */
-
-    /**
-     * Audio AU to Frame split stuff */
-    M4OSA_Bool                bAudioFrameAvailable;  /**< True if an audio frame is available */
-    M4OSA_MemAddr8            pAudioFramePtr;        /**< Pointer to the Audio frame */
-    M4OSA_UInt32              uiAudioFrameSize;        /**< Size of the audio frame available */
-    M4OSA_Int32               iAudioFrameCts;       /**< [Timescale] CTS of the audio frame
-                                                            available */
-
-    /**
-     * Silence frame stuff */
-     /**< Size to reserve to store a pcm full of zeros compatible with master clip stream type */
-    M4OSA_UInt32                uiSilencePcmSize;
-    /**< Pointer to silence frame data compatible with master clip stream type */
-    M4OSA_UInt8*                pSilenceFrameData;
-    /**< Size of silence frame data compatible with master clip stream type */
-    M4OSA_UInt32                uiSilenceFrameSize;
-    /**< [Timescale] Duration of silence frame data compatible with master clip stream type */
-    M4OSA_Int32                 iSilenceFrameDuration;
-    M4OSA_Double                scale_audio;            /**< frequency / 1000.0 */
-
-    /**
-     * Interfaces of the used modules */
-     /**< Filesystem and shell reader, decoder functions */
-    M4VSS3GPP_MediaAndCodecCtxt ShellAPI;
-    M4VIFI_ImagePlane           *pPlaneYuv;  /* YUV420 image plane, converted from ARGB888 */
-    M4VIFI_ImagePlane*          m_pPreResizeFrame;  /* The decoded image before resize
-                                                   (allocated only if resize needed)*/
-    M4VIFI_ImagePlane           *pPlaneYuvWithEffect; /* YUV420 image plane, with color effect */
-    M4OSA_Bool                  bGetYuvDataFromDecoder;  /* Boolean used to get YUV data from dummy video decoder only for first time */
-} M4VSS3GPP_ClipContext;
-
-
-/**
- ******************************************************************************
- * enum            anonymous enum
- * @brief        enum to keep track of the encoder state
- ******************************************************************************
-*/
-enum
-{
-    M4VSS3GPP_kNoEncoder,
-    M4VSS3GPP_kEncoderClosed,
-    M4VSS3GPP_kEncoderStopped,
-    M4VSS3GPP_kEncoderRunning
-};
-
-/**
- ******************************************************************************
- * structure    M4VSS3GPP_AudioVideoContext
- * @brief        This structure defines the audio video context (private)
- * @note        This structure is used for all audio/video, encoding/writing operations.
- ******************************************************************************
-*/
-typedef struct
-{
-    /**
-     * Timing Stuff */
-    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-    /**< [Milliseconds] Duration of the output file, used for progress computation */
-    M4OSA_Double                dInputVidCts;
-    /**< [Milliseconds] Current CTS of the video output stream */
-    M4OSA_Double                dOutputVidCts;
-/**< [Milliseconds] Current CTS of the audio output stream */
-    M4OSA_Double                dATo;
-     /**< [Milliseconds] Duration of the output file, used for progress computation */
-    M4OSA_Int32                    iOutputDuration;
-
-    /**
-     * Output Video Stream Stuff */
-    M4SYS_StreamType            VideoStreamType;        /**< Output video codec */
-    M4OSA_Int32                 outputVideoProfile;  /**< Output video profile */
-    M4OSA_Int32                 outputVideoLevel;   /**< Output video level */
-    M4OSA_UInt32                uiVideoBitrate;     /**< Average video bitrate of the output file,
-                                                         computed from input bitrates, durations,
-                                                          transitions and cuts */
-    M4OSA_UInt32                uiVideoWidth;            /**< Output image width */
-    M4OSA_UInt32                uiVideoHeight;            /**< Output image height */
-    M4OSA_UInt32                uiVideoTimeScale;        /**< Time scale to use for the encoding
-                                                            of the transition (if MPEG-4) */
-    M4OSA_Bool                    bVideoDataPartitioning;    /**< Data partitioning to use for the
-                                                                 encoding of the transition
-                                                                 (if MPEG-4) */
-    M4OSA_MemAddr8                pVideoOutputDsi;        /**< Decoder Specific Info of the output
-                                                                 MPEG-4 track */
-    M4OSA_UInt16                uiVideoOutputDsiSize;    /**< Size of the Decoder Specific Info
-                                                                of the output MPEG-4 track */
-    /**
-     * Output Audio Stream Stuff */
-    M4SYS_StreamType            AudioStreamType;        /**< Type of the output audio stream */
-    M4OSA_UInt32                uiNbChannels;           /**< Number of channels in the output
-                                                            stream (1=mono, 2=stereo) */
-    M4OSA_UInt32                uiAudioBitrate;         /**< Audio average bitrate (in bps) */
-    M4OSA_UInt32                uiSamplingFrequency;    /**< Sampling audio frequency (8000 for
-                                                                amr, 16000 or more for aac) */
-    M4OSA_MemAddr8                pAudioOutputDsi;        /**< Decoder Specific Info of the
-                                                                output audio track */
-    M4OSA_UInt16                uiAudioOutputDsiSize;    /**< Size of the Decoder Specific Info
-                                                                of the output audio track */
-
-    /**
-     * Audio Encoder stuff */
-    M4OSA_Context                   pAudioEncCtxt;        /**< Context of the audio encoder */
-    M4ENCODER_AudioDecSpecificInfo  pAudioEncDSI;       /**< Decoder specific info built by the
-                                                                encoder */
-    M4ENCODER_AudioParams           AudioEncParams;     /**< Config of the audio encoder */
-
-    /**
-     * Silence frame stuff */
-    M4OSA_UInt32                uiSilencePcmSize;       /**< Size to reserve to store a pcm full
-                                                             of zeros compatible with master clip
-                                                             stream type */
-    M4OSA_UInt8*                pSilenceFrameData;      /**< Pointer to silence frame data
-                                                                compatible with master clip
-                                                                stream type */
-    M4OSA_UInt32                uiSilenceFrameSize;     /**< Size of silence frame data compatible
-                                                             with master clip stream type */
-    M4OSA_Int32                 iSilenceFrameDuration;  /**< [Timescale] Duration of silence frame
-                                                                 data compatible with master clip
-                                                                 stream type */
-    M4OSA_Double                scale_audio;            /**< frequency / 1000.0 */
-
-    /**
-     * Video Encoder stuff */
-    M4ENCODER_Context            pEncContext;            /**< Context of the encoder */
-    M4WRITER_DataInterface        OurWriterDataInterface;    /**< Our own implementation of the
-                                                                    writer interface, to give to
-                                                                    the encoder shell */
-    M4OSA_MemAddr32                pDummyAuBuffer;            /**< Buffer given to the encoder for
-                                                                   it to write AUs we don't want
-                                                                    in the output */
-    M4OSA_Int32                    iMpeg4GovOffset;        /**< Clip GOV offset in ms between
-                                                                 video and system time */
-    M4OSA_ERR                    VppError;                /**< Error for VPP are masked by Video
-                                                               Encoder, so we must remember it */
-    M4OSA_UInt32                encoderState;
-
-    /**
-     * Writer stuff */
-    M4WRITER_Context            p3gpWriterContext;        /**< Context of the 3GPP writer module */
-    M4SYS_StreamDescription        WriterVideoStream;        /**< Description of the written
-                                                                    video stream */
-    M4SYS_StreamDescription        WriterAudioStream;        /**< Description of the written
-                                                                    audio stream */
-    M4WRITER_StreamVideoInfos    WriterVideoStreamInfo;    /**< Video properties of the written
-                                                                     video stream */
-    M4WRITER_StreamAudioInfos    WriterAudioStreamInfo;    /**< Audio properties of the written
-                                                                    audio stream */
-    M4SYS_AccessUnit            WriterVideoAU;            /**< Written video access unit */
-    M4SYS_AccessUnit            WriterAudioAU;            /**< Written audio access unit */
-    M4OSA_UInt32                uiVideoMaxAuSize;        /**< Max AU size set to the writer
-                                                                for the video */
-    M4OSA_UInt32                uiAudioMaxAuSize;        /**< Max AU size set to the writer
-                                                                for the audio */
-    M4OSA_UInt32                uiOutputAverageVideoBitrate; /**< Average video bitrate of the
-                                                                    output file, computed from
-                                                                    input bitrates, durations,
-                                                                    transitions and cuts */
-
-} M4VSS3GPP_EncodeWriteContext;
-
-
-/**
- ******************************************************************************
- * structure    M4VSS3GPP_InternalEditContext
- * @brief        This structure defines the edit VSS context (private)
- * @note        This structure is used for all VSS edit operations to store the context
- ******************************************************************************
-*/
-typedef struct
-{
-    /**
-     * VSS 3GPP main variables */
-    M4VSS3GPP_EditState         State;                    /**< VSS internal state */
-    M4VSS3GPP_EditVideoState    Vstate;
-    M4VSS3GPP_EditAudioState    Astate;
-
-    /**
-     * User Settings (copied, thus owned by VSS3GPP) */
-    M4OSA_UInt8                        uiClipNumber;        /**< Number of element of the clip
-                                                                 list pClipList. */
-    M4VSS3GPP_ClipSettings           *pClipList;            /**< List of the input clips settings
-                                                            Array of uiClipNumber clip settings */
-    M4VSS3GPP_TransitionSettings   *pTransitionList;    /**< List of the transition settings.
-                                                    Array of uiClipNumber-1 transition settings */
-    M4VSS3GPP_EffectSettings       *pEffectsList;        /**< List of the effects settings.
-                                                             Array of nbEffects RC */
-    M4OSA_UInt8                       *pActiveEffectsList;    /**< List of the active effects
-                                                                settings. Array of nbEffects RC */
-    M4OSA_UInt8                        nbEffects;            /**< Numbers of effects RC */
-    M4OSA_UInt8                        nbActiveEffects;    /**< Numbers of active effects RC */
-
-    /**
-     * Input Stuff */
-    M4OSA_UInt8                        uiCurrentClip;        /**< Index of the current clip 1 in
-                                                                    the input clip list */
-    M4VSS3GPP_ClipContext*            pC1;                /**< Context of the current clip 1 */
-    M4VSS3GPP_ClipContext*            pC2;                /**< Context of the current clip 2 */
-
-    /**
-     * Decoder stuff */
-    M4OSA_Double                dOutputFrameDuration;    /**< [Milliseconds] directly related to
-                                                                 output frame rate */
-    M4VIFI_ImagePlane            yuv1[3];            /**< First temporary YUV420 image plane */
-    M4VIFI_ImagePlane            yuv2[3];            /**< Second temporary YUV420 image plane */
-    M4VIFI_ImagePlane            yuv3[3];            /**< Third temporary YUV420 image plane RC */
-    M4VIFI_ImagePlane            yuv4[3];            /**< Fourth temporary YUV420 image plane RC */
-
-    /**
-     * Effect stuff */
-    M4OSA_Bool                    bClip1AtBeginCut;        /**< [Milliseconds] The clip1 is at
-                                                                its begin cut */
-    M4OSA_Int8                    iClip1ActiveEffect;        /**< The index of the active effect
-                                                                    on Clip1 (<0 means none)
-                                                                    (used for video and audio but
-                                                                     not simultaneously) */
-    M4OSA_Int8                    iClip2ActiveEffect;        /**< The index of the active effect
-                                                                 on Clip2 (<0 means none)
-                                                                 (used for video and audio but
-                                                                 not simultaneously) */
-    M4OSA_Bool                    bTransitionEffect;        /**< True if the transition effect
-                                                                 must be applied at the current
-                                                                 time */
-
-    /**
-     * Encoding and Writing operations */
-    M4OSA_Bool                      bSupportSilence;    /**< Flag to know if the output stream can
-                                                             support silence (even if not editable,
-                                                              for example AAC+, but not EVRC) */
-    M4VSS3GPP_EncodeWriteContext    ewc;                /**< Audio and video encode/write stuff */
-    M4OSA_Bool                        bIsMMS;                /**< Boolean used to know if we are
-                                                                processing a file with an output
-                                                                size constraint */
-    M4OSA_UInt32                    uiMMSVideoBitrate;    /**< If in MMS mode,
-                                                                 targeted video bitrate */
-    M4VIDEOEDITING_VideoFramerate    MMSvideoFramerate;    /**< If in MMS mode,
-                                                                 targeted video framerate */
-
-    /**
-     * Filesystem functions */
-    M4OSA_FileReadPointer*        pOsaFileReadPtr;     /**< OSAL file read functions,
-                                                             to be provided by user */
-    M4OSA_FileWriterPointer*    pOsaFileWritPtr;     /**< OSAL file write functions,
-                                                             to be provided by user */
-
-    /**
-     * Interfaces of the used modules */
-    M4VSS3GPP_MediaAndCodecCtxt         ShellAPI;           /**< Filesystem and shell reader,
-                                                                 decoder functions */
-    M4OSA_Bool               bIssecondClip;
-    M4OSA_UInt8              *pActiveEffectsList1;  /**< List of the active effects settings. Array of nbEffects RC */
-    M4OSA_UInt8              nbActiveEffects1;  /**< Numbers of active effects RC */
-    M4OSA_Bool               m_bClipExternalHasStarted;  /**< Flag to indicate that an
-                                                              external effect is active */
-    M4OSA_Int32              iInOutTimeOffset;
-    M4OSA_Bool               bEncodeTillEoF;
-    M4xVSS_EditSettings      xVSS;
-    M4OSA_Context            m_air_context;
-
-    M4OSA_Bool bClip1ActiveFramingEffect; /**< Overlay flag for clip1 */
-    M4OSA_Bool bClip2ActiveFramingEffect; /**< Overlay flag for clip2, used in transition */
-} M4VSS3GPP_InternalEditContext;
-
-
-/**
- ******************************************************************************
- * structure    M4VSS3GPP_InternalAudioMixingContext
- * @brief        This structure defines the audio mixing VSS 3GPP context (private)
- * @note        This structure is used for all VSS 3GPP audio mixing operations to store
- *                the context
- ******************************************************************************
-*/
-typedef struct
-{
-    /**
-     *    VSS main variables */
-    M4VSS3GPP_AudioMixingState State;                    /**< VSS audio mixing internal state */
-
-    /**
-     * Internal copy of the input settings */
-    M4OSA_Int32                iAddCts;                 /**< [Milliseconds] Time, in milliseconds,
-                                                             at which the added audio track is
-                                                              inserted */
-    M4OSA_UInt32               uiBeginLoop;                /**< Describes in milli-second the
-                                                                start time of the loop */
-    M4OSA_UInt32               uiEndLoop;                /**< Describes in milli-second the end
-                                                            time of the loop (0 means no loop) */
-    M4OSA_Bool                 bRemoveOriginal;            /**< If true, the original audio track
-                                                                is not taken into account */
-
-    /**
-     * Input audio/video file */
-    M4VSS3GPP_ClipSettings        InputClipSettings;        /**< Structure internally used to
-                                                                 manage the input 3GPP settings */
-    M4VSS3GPP_ClipContext*        pInputClipCtxt;           /**< Context of the input 3GPP clip */
-
-    /**
-     * Added audio file stuff */
-    M4VSS3GPP_ClipSettings        AddedClipSettings;        /**< Structure internally used to
-                                                                    manage the added settings */
-    M4VSS3GPP_ClipContext*        pAddedClipCtxt;           /**< Context of the added 3GPP clip */
-
-    /**
-     * Audio stuff */
-    M4OSA_Float                    fOrigFactor;            /**< Factor to apply to the original
-                                                                audio track for the mixing */
-    M4OSA_Float                    fAddedFactor;            /**< Factor to apply to the added
-                                                                    audio track for the mixing */
-    M4OSA_Bool                  bSupportSilence;        /**< Flag to know if the output stream can
-                                                             support silence (even if not editable,
-                                                              for example AAC+, but not EVRC) */
-    M4OSA_Bool                  bHasAudio;              /**< Flag to know if we have to delete
-                                                            audio track */
-    M4OSA_Bool                  bAudioMixingIsNeeded;  /**< Flag to know if we have to do mixing */
-
-    /**
-     * Encoding and Writing operations */
-    M4VSS3GPP_EncodeWriteContext    ewc;                /**< Audio and video encode/write stuff */
-
-    /**
-     * Filesystem functions */
-    M4OSA_FileReadPointer*        pOsaFileReadPtr;     /**< OSAL file read functions,
-                                                             to be provided by user */
-    M4OSA_FileWriterPointer*    pOsaFileWritPtr;     /**< OSAL file write functions,
-                                                            to be provided by user */
-
-    /**
-     * Interfaces of the used modules */
-    M4VSS3GPP_MediaAndCodecCtxt ShellAPI;               /**< Filesystem and shell reader,
-                                                                 decoder functions */
-
-    /**
-     * Sample Rate Convertor (SSRC) stuff (needed in case of mixing with != ASF/nb of channels) */
-    M4OSA_Bool                  b_SSRCneeded;        /**< If true, SSRC is needed
-                                                            (!= ASF or nb of channels) */
-    M4OSA_UInt8                 ChannelConversion;    /**< 1=Conversion from Mono to Stereo
-                                                             2=Stereo to Mono, 0=no conversion */
-    SSRC_Instance_t             SsrcInstance;        /**< Context of the Ssrc */
-    SSRC_Scratch_t*             SsrcScratch;        /**< Working memory of the Ssrc */
-    short                       iSsrcNbSamplIn;    /**< Number of sample the Ssrc needs as input */
-    short                       iSsrcNbSamplOut;    /**< Number of sample the Ssrc outputs */
-    M4OSA_MemAddr8              pSsrcBufferIn;        /**< Input of the SSRC */
-    M4OSA_MemAddr8              pSsrcBufferOut;        /**< Output of the SSRC */
-    M4OSA_MemAddr8              pPosInSsrcBufferIn;    /**< Position into the SSRC in buffer */
-    M4OSA_MemAddr8              pPosInSsrcBufferOut;/**< Position into the SSRC out buffer */
-    M4OSA_MemAddr8              pTempBuffer;        /**< Temporary buffer */
-    M4OSA_MemAddr8              pPosInTempBuffer;    /**< Position in temporary buffer */
-    M4OSA_UInt32                minimumBufferIn;    /**< Minimum amount of decoded data to be
-                                                            processed by SSRC and channel
-                                                             convertor */
-    M4OSA_Bool                  b_DuckingNeedeed;
-    M4OSA_Int32                 InDucking_threshold;  /**< Threshold value at which background
-                                                                 music shall duck */
-    M4OSA_Float                 InDucking_lowVolume;  /**< lower the background track to this
-                                                                factor and increase the primary
-                                                                track to inverse of this factor */
-    M4OSA_Float                 lowVolume;
-    M4OSA_Int32                 audioVolumeArray[WINDOW_SIZE]; // store peak audio vol. level
-                                                                  // for duration for WINDOW_SIZE
-    M4OSA_Int32                 audVolArrIndex;
-    M4OSA_Float                 duckingFactor ;     /**< multiply by this factor to bring
-                                                             FADE IN/FADE OUT effect */
-    M4OSA_Float                 fBTVolLevel;
-    M4OSA_Float                 fPTVolLevel;
-    M4OSA_Bool                  bDoDucking;
-    M4OSA_Bool                  bLoop;
-    M4OSA_Bool                  bNoLooping;
-    M4OSA_Context              pLVAudioResampler;
-    M4OSA_Bool                  bjumpflag;
-
-} M4VSS3GPP_InternalAudioMixingContext;
-
-
-/**
- ******************************************************************************
- * structure    M4VSS3GPP_InternalExtractPictureContext
- * @brief        This structure defines the extract picture VSS context (private)
- * @note        This structure is used for all VSS picture extractions to store the context
- ******************************************************************************
-*/
-typedef struct
-{
-    /**
-     *    VSS main variables */
-    M4VSS3GPP_ExtractPictureState State;                /**< VSS extract pictureinternal state */
-
-    /**
-     * Input files */
-    M4VSS3GPP_ClipSettings        ClipSettings;            /**< Structure internally used to
-                                                                manage the input 3FPP settings */
-    M4VSS3GPP_ClipContext*        pInputClipCtxt;           /**< Context of the input 3GPP clip */
-
-    /**
-     * Settings */
-    M4OSA_Int32                    iExtractCts;            /**< [Milliseconds] Cts of the AU
-                                                                to be extracted */
-
-    /**
-     * Video stuff */
-    M4VIFI_ImagePlane            decPlanes[3];            /**< Decoded YUV420 picture plane */
-    M4OSA_UInt32                uiVideoWidth;            /**< Decoded image width */
-    M4OSA_UInt32                uiVideoHeight;            /**< Decoded image height */
-
-    /*
-     * Decoder info */
-    M4OSA_Int32                iDecCts;      /**< [Milliseconds] Decoded AU Cts */
-    M4OSA_Bool                 bJumpFlag;     /**< 1 if a jump has been made */
-    M4OSA_Int32                iDeltaTime;   /**< [Milliseconds] Time between previous RAP and
-                                                     picture to extract */
-    M4OSA_Int32                iGap;         /**< [Milliseconds] Time between jump AU and
-                                                    extraction time */
-    M4OSA_UInt32               uiStep;          /**< [Milliseconds] Progress bar time increment */
-
-    /**
-     * Filesystem functions */
-     /**< OSAL file read functions, to be provided by user */
-    M4OSA_FileReadPointer*        pOsaFileReadPtr;
-    /**< OSAL file write functions, to be provided by user */
-    M4OSA_FileWriterPointer*    pOsaFileWritPtr;
-
-    M4OSA_Bool                    bClipOpened;
-} M4VSS3GPP_InternalExtractPictureContext;
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __M4VSS3GPP_INTERNALTYPES_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4xVSS_API.h b/libvideoeditor/vss/inc/M4xVSS_API.h
deleted file mode 100755
index 5ce102f..0000000
--- a/libvideoeditor/vss/inc/M4xVSS_API.h
+++ /dev/null
@@ -1,590 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __M4XVSS_API_H__
-#define __M4XVSS_API_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-/**
- ******************************************************************************
- * @file    M4xVSS_API.h
- * @brief    API of Video Studio 2.1
- * @note
- ******************************************************************************
-*/
-
-#define M4VSS_SUPPORT_EXTENDED_FEATURES
-
-#include "M4VSS3GPP_API.h"
-#include "M4VSS3GPP_Extended_API.h"
-#include "M4DECODER_Common.h"
-/* Errors codes */
-
-/**
- * End of analyzing => the user can call M4xVSS_PreviewStart or M4xVSS_SaveStart */
-#define M4VSS3GPP_WAR_ANALYZING_DONE                  M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0001)
-
-/**
- * End of preview generating => the user can launch vps to see preview. Once preview is over,
-   the user must call M4xVSS_PreviewStop() to be able to save edited file, or to call another
-   M4xVSS_SendCommand() */
-#define M4VSS3GPP_WAR_PREVIEW_READY                   M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0002)
-
-/**
- * End of saved file generation => the user must call M4xVSS_SaveStop() */
-#define M4VSS3GPP_WAR_SAVING_DONE                     M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0003)
-
-/**
- * Transcoding is necessary to go further -> if the user does not want to continue,
-  he must call M4xVSS_sendCommand() */
-#define M4VSS3GPP_WAR_TRANSCODING_NECESSARY           M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0004)
-
-/**
- * In case of MMS, the output file size won't be reached */
-#define M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED           M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0005)
-
-/**
- * JPG input file dimensions are too high */
-#define M4VSS3GPP_ERR_JPG_TOO_BIG                     M4OSA_ERR_CREATE( M4_ERR, M4VS, 0x0001)
-
-/**
- * UTF Conversion, warning on the size of the temporary converted buffer*/
-#define M4xVSSWAR_BUFFER_OUT_TOO_SMALL                M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0006)
-
-/**
- * SWIKAR :Error whan NO_MORE_SPACE*/
-#define M4xVSSERR_NO_MORE_SPACE                       M4OSA_ERR_CREATE( M4_ERR, M4VS, 0x0007)
-
-/**
- ******************************************************************************
- * enum     M4xVSS_VideoEffectType
- * @brief   This enumeration defines the video effect types of the xVSS
- ******************************************************************************
-*/
-typedef enum
-{
-    M4xVSS_kVideoEffectType_BlackAndWhite = M4VSS3GPP_kVideoEffectType_External+1, /* 257 */
-    M4xVSS_kVideoEffectType_Pink,                                                  /* 258 */
-    M4xVSS_kVideoEffectType_Green,                                                 /* 259 */
-    M4xVSS_kVideoEffectType_Sepia,                                                 /* 260 */
-    M4xVSS_kVideoEffectType_Negative,                                              /* 261 */
-    M4xVSS_kVideoEffectType_Framing,                                               /* 262 */
-    M4xVSS_kVideoEffectType_Text, /* Text overlay */                               /* 263 */
-    M4xVSS_kVideoEffectType_ZoomIn,                                                /* 264 */
-    M4xVSS_kVideoEffectType_ZoomOut,                                               /* 265 */
-    M4xVSS_kVideoEffectType_Fifties,                                                /*266 */
-    M4xVSS_kVideoEffectType_ColorRGB16,                                                /*267 */
-    M4xVSS_kVideoEffectType_Gradient                                                /*268*/
-} M4xVSS_VideoEffectType;
-
-/**
- ******************************************************************************
- * enum     M4xVSS_VideoTransitionType
- * @brief   This enumeration defines the video effect that can be applied during a transition.
- ******************************************************************************
-*/
-typedef enum
-{
-    M4xVSS_kVideoTransitionType_External = M4VSS3GPP_kVideoTransitionType_External, /*256*/
-    M4xVSS_kVideoTransitionType_AlphaMagic,
-    M4xVSS_kVideoTransitionType_SlideTransition,
-    M4xVSS_kVideoTransitionType_FadeBlack
-
-} M4xVSS_VideoTransitionType;
-
-/**
- ******************************************************************************
- * struct    M4xVSS_PreviewSettings
- * @brief    This structure gathers all the information needed by the VPS for preview
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_Void                                *p3gpPreviewFile;
-    M4OSA_Void                                *pPCMFile;
-    M4VIDEOEDITING_AudioSamplingFrequency    outPCM_ASF;
-    M4OSA_Bool                                bAudioMono;
-    M4VSS3GPP_EffectSettings                   *Effects;
-    M4OSA_UInt8                                nbEffects;
-
-} M4xVSS_PreviewSettings;
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_toUTF8Fct
- * @brief        This prototype defines the function implemented by the integrator
- *                to convert a string encoded in any format to an UTF8 string.
- * @note
- *
- * @param    pBufferIn        IN            Buffer containing the string to convert to UTF8
- * @param    pBufferOut        IN            Buffer containing the UTF8 converted string
- * @param    bufferOutSize    IN/OUT    IN:     Size of the given output buffer
- *                                    OUT: Size of the converted buffer
- *
- ******************************************************************************
-*/
-typedef M4OSA_ERR (*M4xVSS_toUTF8Fct)
-(
-    M4OSA_Void            *pBufferIn,
-    M4OSA_UInt8            *pBufferOut,
-    M4OSA_UInt32        *bufferOutSize
-);
-
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_fromUTF8Fct
- * @brief        This prototype defines the function implemented by the integrator
- *                to convert an UTF8 string to a string encoded in any format.
- * @note
- *
- * @param    pBufferIn        IN            Buffer containing the UTF8 string to convert
- *                                        to the desired format.
- * @param    pBufferOut        IN            Buffer containing the converted string
- * @param    bufferOutSize    IN/OUT    IN:     Size of the given output buffer
- *                                    OUT: Size of the converted buffer
- *
- ******************************************************************************
-*/
-typedef M4OSA_ERR (*M4xVSS_fromUTF8Fct)
-(
-    M4OSA_UInt8            *pBufferIn,
-    M4OSA_Void            *pBufferOut,
-    M4OSA_UInt32        *bufferOutSize
-);
-
-
-
-
-/**
- ******************************************************************************
- * struct    M4xVSS_InitParams
- * @brief    This structure defines parameters for xVSS.
- * @note
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_FileReadPointer*            pFileReadPtr;
-    M4OSA_FileWriterPointer*        pFileWritePtr;
-    M4OSA_Void*                        pTempPath;
-    /*Function pointer on an external text conversion function */
-    M4xVSS_toUTF8Fct                pConvToUTF8Fct;
-    /*Function pointer on an external text conversion function */
-    M4xVSS_fromUTF8Fct                pConvFromUTF8Fct;
-
-
-
-} M4xVSS_InitParams;
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_Init
- * @brief        This function initializes the xVSS
- * @note        Initializes the xVSS edit operation (allocates an execution context).
- *
- * @param    pContext            (OUT) Pointer on the xVSS edit context to allocate
- * @param    params                (IN) Parameters mandatory for xVSS
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_Init(M4OSA_Context* pContext, M4xVSS_InitParams* params);
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_ReduceTranscode
- * @brief        This function changes the given editing structure in order to
- *                minimize the transcoding time.
- * @note        The xVSS analyses this structure, and if needed, changes the
- *                output parameters (Video codec, video size, audio codec,
- *                audio nb of channels) to minimize the transcoding time.
- *
- * @param    pContext            (OUT) Pointer on the xVSS edit context to allocate
- * @param    pSettings            (IN) Edition settings (allocated by the user)
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_ReduceTranscode(M4OSA_Context pContext, M4VSS3GPP_EditSettings* pSettings);
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_SendCommand
- * @brief        This function gives to the xVSS an editing structure
- * @note        The xVSS analyses this structure, and prepare edition
- *                This function must be called after M4xVSS_Init, after
- *                M4xVSS_CloseCommand, or after M4xVSS_PreviewStop.
- *                After this function, the user must call M4xVSS_Step until
- *                it returns another error than M4NO_ERROR.
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @param    pSettings            (IN) Edition settings (allocated by the user)
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_SendCommand(M4OSA_Context pContext, M4VSS3GPP_EditSettings* pSettings);
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_PreviewStart
- * @brief        This function prepare the preview
- * @note        The xVSS create 3GP preview file and fill pPreviewSettings with
- *                preview parameters.
- *                This function must be called once M4xVSS_Step has returned
- *                M4VSS3GPP_WAR_ANALYZING_DONE
- *                After this function, the user must call M4xVSS_Step until
- *                it returns another error than M4NO_ERROR.
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @param    pPreviewSettings    (IN) Preview settings (allocated by the user)
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_PreviewStart(M4OSA_Context pContext, M4xVSS_PreviewSettings* pPreviewSettings);
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_PreviewStop
- * @brief        This function unallocate preview ressources and change xVSS
- *                internal state to allow saving or resend an editing command
- * @note        This function must be called once M4xVSS_Step has returned
- *                M4VSS3GPP_WAR_PREVIEW_READY
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_PreviewStop(M4OSA_Context pContext);
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_SaveStart
- * @brief        This function prepare the save
- * @note        The xVSS create 3GP edited final file
- *                This function must be called once M4xVSS_Step has returned
- *                M4VSS3GPP_WAR_ANALYZING_DONE
- *                After this function, the user must call M4xVSS_Step until
- *                it returns another error than M4NO_ERROR.
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @param    pFilePath            (IN) If the user wants to provide a different
- *                                output filename, else can be NULL (allocated by the user)
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_SaveStart(M4OSA_Context pContext, M4OSA_Void* pFilePath,
-                            M4OSA_UInt32 filePathSize);
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_SaveStop
- * @brief        This function unallocate save ressources and change xVSS
- *                internal state.
- * @note        This function must be called once M4xVSS_Step has returned
- *                M4VSS3GPP_WAR_SAVING_DONE
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_SaveStop(M4OSA_Context pContext);
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_Step
- * @brief        This function executes differents tasks, depending of xVSS
- *                internal state.
- * @note        This function:
- *                    - analyses editing structure if called after M4xVSS_SendCommand
- *                    - generates preview file if called after M4xVSS_PreviewStart
- *                    - generates final edited file if called after M4xVSS_SaveStart
- *
- * @param    pContext                        (IN) Pointer on the xVSS edit context
- * @param    pContext                        (OUT) Progress indication from 0 to 100
- * @return    M4NO_ERROR:                        No error, the user must call M4xVSS_Step again
- * @return    M4ERR_PARAMETER:                At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:                    This function cannot not be called at this time
- * @return    M4VSS3GPP_WAR_PREVIEW_READY:    Preview file is generated
- * @return    M4VSS3GPP_WAR_SAVING_DONE:        Final edited file is generated
- * @return    M4VSS3GPP_WAR_ANALYZING_DONE:    Analyse is done
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_Step(M4OSA_Context pContext, M4OSA_UInt8 *pProgress);
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_CloseCommand
- * @brief        This function deletes current editing profile, unallocate
- *                ressources and change xVSS internal state.
- * @note        After this function, the user can call a new M4xVSS_SendCommand
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_CloseCommand(M4OSA_Context pContext);
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_CleanUp
- * @brief        This function deletes all xVSS ressources
- * @note        This function must be called after M4xVSS_CloseCommand.
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_CleanUp(M4OSA_Context pContext);
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_GetVersion(M4_VersionInfo *pVersion)
- * @brief        This function get the version of the Video Studio 2.1
- *
- * @param    pVersion            (IN) Pointer on the version info struct
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_GetVersion(M4_VersionInfo *pVersion);
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
- *                                                    M4VIFI_ImagePlane *PlaneIn,
- *                                                    M4VIFI_ImagePlane *PlaneOut,
- *                                                    M4VSS3GPP_ExternalProgress *pProgress,
- *                                                    M4OSA_UInt32 uiEffectKind)
- *
- * @brief    This function apply a color effect on an input YUV420 planar frame
- * @note    The prototype of this effect function is exposed because it needs to
- *            called by the VPS during the preview
- * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
- * @param    PlaneIn            (IN) Input YUV420 planar
- * @param    PlaneOut        (IN/OUT) Output YUV420 planar
- * @param    pProgress        (IN/OUT) Progress indication (0-100)
- * @param    uiEffectKind    (IN) Unused
- *
- * @return    M4VIFI_OK:    No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_externalVideoEffectColor
-(
-    M4OSA_Void *pFunctionContext,
-    M4VIFI_ImagePlane *pInputPlanes,
-    M4VIFI_ImagePlane *pOutputPlanes,
-    M4VSS3GPP_ExternalProgress *pProgress,
-    M4OSA_UInt32 uiEffectKind
-);
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_externalVideoEffectFraming(M4OSA_Void *pFunctionContext,
- *                                                    M4VIFI_ImagePlane *PlaneIn,
- *                                                    M4VIFI_ImagePlane *PlaneOut,
- *                                                    M4VSS3GPP_ExternalProgress *pProgress,
- *                                                    M4OSA_UInt32 uiEffectKind)
- *
- * @brief    This function add a fixed or animated image on an input YUV420 planar frame
- * @note    The prototype of this effect function is exposed because it needs to
- *            called by the VPS during the preview
- * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
- * @param    PlaneIn            (IN) Input YUV420 planar
- * @param    PlaneOut        (IN/OUT) Output YUV420 planar
- * @param    pProgress        (IN/OUT) Progress indication (0-100)
- * @param    uiEffectKind    (IN) Unused
- *
- * @return    M4VIFI_OK:    No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming
-(
-    M4OSA_Void *pFunctionContext,
-    M4VIFI_ImagePlane *pInputPlanes,
-    M4VIFI_ImagePlane *pOutputPlanes,
-    M4VSS3GPP_ExternalProgress *pProgress,
-    M4OSA_UInt32 uiEffectKind
-);
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_externalVideoEffectFifties(M4OSA_Void *pFunctionContext,
- *                                                    M4VIFI_ImagePlane *PlaneIn,
- *                                                    M4VIFI_ImagePlane *PlaneOut,
- *                                                    M4VSS3GPP_ExternalProgress *pProgress,
- *                                                    M4OSA_UInt32 uiEffectKind)
- *
- * @brief    This function make a video look as if it was taken in the fifties
- * @note
- * @param    pUserData       (IN) Context
- * @param    pPlaneIn        (IN) Input YUV420 planar
- * @param    pPlaneOut        (IN/OUT) Output YUV420 planar
- * @param    pProgress        (IN/OUT) Progress indication (0-100)
- * @param    uiEffectKind    (IN) Unused
- *
- * @return    M4VIFI_OK:            No error
- * @return  M4ERR_PARAMETER:    pFiftiesData, pPlaneOut or pProgress are NULL (DEBUG only)
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties
-(
-    M4OSA_Void *pUserData,
-    M4VIFI_ImagePlane *pInputPlanes,
-    M4VIFI_ImagePlane *pPlaneOut,
-    M4VSS3GPP_ExternalProgress *pProgress,
-    M4OSA_UInt32 uiEffectKind
-);
-
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_externalVideoEffectZoom(M4OSA_Void *pFunctionContext,
- *                                                    M4VIFI_ImagePlane *PlaneIn,
- *                                                    M4VIFI_ImagePlane *PlaneOut,
- *                                                    M4VSS3GPP_ExternalProgress *pProgress,
- *                                                    M4OSA_UInt32 uiEffectKind)
- *
- * @brief    This function add a fixed or animated image on an input YUV420 planar frame
- * @note    The prototype of this effect function is exposed because it needs to
- *            called by the VPS during the preview
- * @param    pFunctionContext(IN) Contains which zoom to apply (In/Out)
- * @param    PlaneIn            (IN) Input YUV420 planar
- * @param    PlaneOut        (IN/OUT) Output YUV420 planar
- * @param    pProgress        (IN/OUT) Progress indication (0-100)
- * @param    uiEffectKind    (IN) Unused
- *
- * @return    M4VIFI_OK:    No error
- ******************************************************************************
-*/
-M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom
-(
-    M4OSA_Void *pFunctionContext,
-    M4VIFI_ImagePlane *pInputPlanes,
-    M4VIFI_ImagePlane *pOutputPlanes,
-    M4VSS3GPP_ExternalProgress *pProgress,
-    M4OSA_UInt32 uiEffectKind
-);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_CreateClipSettings()
- * @brief    Allows filling a clip settings structure with default values
- *
- * @note    WARNING: pClipSettings->Effects[ ] will be allocated in this function.
- *                   pClipSettings->pFile      will be allocated in this function.
- *
- * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param   pFile               (IN) Clip file name
- * @param   filePathSize        (IN) Size of the clip path (needed for the UTF16 conversion)
- * @param    nbEffects           (IN) Nb of effect settings to allocate
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_CreateClipSettings(M4VSS3GPP_ClipSettings *pClipSettings, M4OSA_Void* pFile,
-                                    M4OSA_UInt32 filePathSize, M4OSA_UInt8 nbEffects);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_DuplicateClipSettings()
- * @brief    Duplicates a clip settings structure, performing allocations if required
- *
- * @param    pClipSettingsDest    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param    pClipSettingsOrig    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param   bCopyEffects        (IN) Flag to know if we have to duplicate effects
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_DuplicateClipSettings(M4VSS3GPP_ClipSettings *pClipSettingsDest,
-                                         M4VSS3GPP_ClipSettings *pClipSettingsOrig,
-                                         M4OSA_Bool bCopyEffects);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_FreeClipSettings()
- * @brief    Free the pointers allocated in the ClipSetting structure (pFile, Effects).
- *
- * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_FreeClipSettings(M4VSS3GPP_ClipSettings *pClipSettings);
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_getMCSContext(M4OSA_Context pContext, M4OSA_Context* mcsContext)
- * @brief        This function returns the MCS context within the xVSS internal context
- * @note        This function must be called only after VSS state has moved to analyzing state
- *                or beyond
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @param    mcsContext        (OUT) Pointer to pointer of mcs context to return
- * @return    M4NO_ERROR:        No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_getMCSContext(M4OSA_Context pContext, M4OSA_Context* mcsContext);
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_getVSS3GPPContext(M4OSA_Context pContext,
- *                                                     M4OSA_Context* mcsContext)
- * @brief        This function returns the VSS3GPP context within the xVSS internal context
- * @note        This function must be called only after VSS state has moved to Generating
- *                preview or beyond
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @param    vss3gppContext        (OUT) Pointer to pointer of vss3gpp context to return
- * @return    M4NO_ERROR:        No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
-*/
-M4OSA_ERR M4xVSS_getVSS3GPPContext(M4OSA_Context pContext, M4OSA_Context* vss3gppContext);
-
-// Get supported video decoders and capabilities.
-M4OSA_ERR M4xVSS_getVideoDecoderCapabilities(M4DECODER_VideoDecoders **decoders);
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-#endif /* __M4XVSS_API_H__ */
-
diff --git a/libvideoeditor/vss/inc/M4xVSS_Internal.h b/libvideoeditor/vss/inc/M4xVSS_Internal.h
deleted file mode 100755
index 5296572..0000000
--- a/libvideoeditor/vss/inc/M4xVSS_Internal.h
+++ /dev/null
@@ -1,587 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __M4XVSS_INTERNAL_H__
-#define __M4XVSS_INTERNAL_H__
-
-/**
- ******************************************************************************
- * @file    M4xVSS_Internal.h
- * @brief    Internal of Video Authoring.
- * @note
- ******************************************************************************
-*/
-
-#include "NXPSW_CompilerSwitches.h"
-
-#include "M4MCS_API.h"
-#include "M4MCS_ErrorCodes.h"
-
-#include "M4PTO3GPP_API.h"
-#include "M4PTO3GPP_ErrorCodes.h"
-
-#include "M4AIR_API.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define M4_xVSS_MAJOR        1
-#define M4_xVSS_MINOR        5
-#define M4_xVSS_REVISION    5
-
-/* The following defines describe the max dimensions of an input JPG */
-#define M4XVSS_MX_JPG_NB_OF_PIXELS    3926016
-
-/*Size of the UTF temporary conversion buffer keep in the VA internal context and
-allocate at the initialization*/
-#define UTF_CONVERSION_BUFFER_SIZE            2048
-
-/* Max path length size */
-#define  M4XVSS_MAX_PATH_LEN 256
-
-/** Determine absolute value of a. */
-#define M4xVSS_ABS(a)               ( ( (a) < (0) ) ? (-(a)) : (a) )
-
-/** Y,U,V values in case of black borders rendering */
-#define Y_PLANE_BORDER_VALUE    0x00
-#define U_PLANE_BORDER_VALUE    0x80
-#define V_PLANE_BORDER_VALUE    0x80
-
-/**
- ******************************************************************************
- * struct    M4xVSS_EffectsAlphaBlending
- * @brief    Internal effects alpha blending parameters
- * @note    This structure contains all internal informations to create an alpha
- *            blending for the effects text and framing
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_UInt8                    m_fadeInTime;        /*Start percentage of Alpha blending*/
-    M4OSA_UInt8                    m_fadeOutTime;        /*Middle percentage of Alpha blending*/
-    M4OSA_UInt8                    m_end;            /*End percentage of Alpha blending*/
-    M4OSA_UInt8                    m_middle;    /*Duration, in percentage of effect duration,
-                                                 of the FadeIn phase*/
-    M4OSA_UInt8                    m_start;    /*Duration, in percentage of effect duration,
-                                                of the FadeOut phase*/
-
-} M4xVSS_internalEffectsAlphaBlending;
-
-/**
- ******************************************************************************
- * THIS STRUCTURE MUST NOT BE MODIFIED
- * struct    M4xVSS_FramingStruct
- * @brief    It is used internally by xVSS for framing effect, and by VPS for previewing
- ******************************************************************************
-*/
-typedef struct
-{
-    M4VIFI_ImagePlane *FramingRgb;                /**< decoded BGR565 plane */
-    M4VIFI_ImagePlane *FramingYuv;                /**< converted YUV420 planar plane */
-    M4OSA_Int32 duration;                        /**< Duration of the frame */
-    M4OSA_Int32 previousClipTime;                /**< Previous clip time, used by framing
-                                                     filter for SAVING */
-    M4OSA_Int32 previewOffsetClipTime;            /**< Previous clip time, used by framing
-                                                     filter for PREVIEW */
-    M4OSA_Int32 previewClipTime;                /**< Current clip time, used by framing
-                                                     filter for PREVIEW */
-    M4OSA_Void* pCurrent;                        /**< Current M4xVSS_FramingStruct used by
-                                                         framing filter */
-    M4OSA_Void* pNext;                            /**< Next M4xVSS_FramingStruct, if no more,
-                                                         point on current M4xVSS_FramingStruct */
-    M4OSA_UInt32 topleft_x;                        /**< The top-left X coordinate in the output
-                                                         picture of the first decoded pixel */
-    M4OSA_UInt32 topleft_y;                        /**< The top-left Y coordinate in the output
-                                                         picture of the first decoded pixel */
-    M4xVSS_internalEffectsAlphaBlending* alphaBlendingStruct; /* Alpha blending Struct */
-/*To support ARGB8888 : get the width and height in case of file ARGB888 used in framing
- as video effect */
-    M4OSA_UInt32                width;   /*width of the ARGB8888 clip
-                                        .Used only if video effect is framming */
-    M4OSA_UInt32                height; /*height of the ARGB8888 clip .
-                                        Used only if video effect is framming */
-
-} M4xVSS_FramingStruct;
-
-#ifdef DECODE_GIF_ON_SAVING
-/**
- ******************************************************************************
- * THIS STRUCTURE MUST NOT BE MODIFIED
- * struct    M4xVSS_FramingContext
- * @brief    It is used internally by xVSS for framing effect, when the flag
-                DECODE_GIF_ON_SAVING is activated
- ******************************************************************************
-*/
-typedef struct
-{
-    M4xVSS_FramingStruct*            aFramingCtx;        /**<Framing struct for the decoding
-                                                            of the current frame of the gif*/
-    M4xVSS_FramingStruct*            aFramingCtx_last;    /**<Framing struct for the decoding of
-                                                             the previous frame of the gif*/
-    M4OSA_FileReadPointer*            pFileReadPtr;    /**< Pointer on OSAL file read functions */
-    M4OSA_FileWriterPointer*        pFileWritePtr;     /**< Pointer on OSAL file write functions */
-    M4OSA_Void*                        pSPSContext;        /**<SPS context for the GIF decoding*/
-    //M4SPS_Stream                    inputStream;        /**<GIF input stream buffer pointer*/
-    M4OSA_Void*                        pEffectFilePath;    /**<file path of the gif*/
-    M4VIDEOEDITING_VideoFrameSize    outputVideoSize;    /**< Output video size RC */
-    //M4SPS_DisposalMode                disposal;            /**<previous frame GIF disposal*/
-    M4OSA_UInt16                    b_animated;            /**<Is the GIF animated?*/
-    M4OSA_Bool                        bEffectResize;        /**<Is the gif resize*/
-    M4OSA_UInt32                    topleft_x;            /**< The top-left X coordinate in the
-                                                                 output picture of the first
-                                                                 decoded pixel */
-    M4OSA_UInt32                    topleft_y;            /**< The top-left Y coordinate in the
-                                                                 output picture of the first
-                                                                 decoded pixel */
-    M4OSA_UInt32                    width;                /**<GIF width, fill during the
-                                                                initialization with the SPS*/
-    M4OSA_UInt32                    height;                /**<GIF height, fill during the
-                                                                 initialization with the SPS*/
-    M4OSA_UInt32                    effectDuration;        /**<Effect duration*/
-    M4OSA_Int32                        effectStartTime;    /**<Effect start time*/
-    M4OSA_UInt32                    clipTime;            /**<current output clip time for the
-                                                                current frame*/
-    M4OSA_UInt32                    last_clipTime;        /**<previous output clip time for the
-                                                                previous frame*/
-    M4OSA_UInt32                    lastStepDuration;    /**<Time interval between the previous
-                                                             frame and the current frame*/
-    M4OSA_Bool                        b_IsFileGif;        /**<Is the framing using a gif file*/
-    M4OSA_UInt32                    last_width;            /**<Last frame width*/
-    M4OSA_UInt32                    last_height;        /**<Last frame height*/
-    M4OSA_UInt32                    last_topleft_x;        /**<Last frame x topleft*/
-    M4OSA_UInt32                    last_topleft_y;        /**<Last frame y topleft*/
-    M4OSA_UInt32                    current_gif_time;    /**< Current time os the GIF in output
-                                                              file time */
-    M4OSA_Float                        frameDurationRatio;    /**< Frame duration ratio */
-    M4xVSS_internalEffectsAlphaBlending*    alphaBlendingStruct;/*Alpha blending structure*/
-#ifdef DEBUG_GIF
-    M4OSA_UInt8                        uiDebug_fileCounter;/**<for debug purpose,
-                                                                 count the frame of the gif*/
-#endif /*DEBUG_GIF*/
-}M4xVSS_FramingContext;
-#endif /*DECODE_GIF_ON_SAVING*/
-
-/**
- ******************************************************************************
- * struct    M4xVSS_Pto3GPP_params
- * @brief    Internal xVSS parameter for Pto3GPP module
- * @note    This structure is filled by M4xVSS_sendCommand function,
- * @note    and is used during M4xVSS_Step function to initialize Pto3GPP module
- * @note    All the JPG files to transform to 3GP are chained
- ******************************************************************************
-*/
-typedef struct {
-    M4OSA_Char*                        pFileIn;
-    M4OSA_Char*                        pFileOut;
-    M4OSA_Char*                        pFileTemp;            /**< temporary file used for
-                                                                 metadata writing, NULL is cstmem
-                                                                 writer not used */
-    M4OSA_UInt32                    duration;
-    M4VIDEOEDITING_FileType            InputFileType;
-    M4OSA_Bool                        isCreated;            /**< This boolean is used to know if
-                                                                    the output file is already
-                                                                    created or not */
-    M4OSA_Bool                        isPanZoom;            /**< RC: Boolean used to know if the
-                                                                pan and zoom mode is enabled */
-    M4OSA_UInt16                    PanZoomXa;            /**< RC */
-    M4OSA_UInt16                    PanZoomTopleftXa;    /**< RC */
-    M4OSA_UInt16                    PanZoomTopleftYa;    /**< RC */
-    M4OSA_UInt16                    PanZoomXb;            /**< RC */
-    M4OSA_UInt16                    PanZoomTopleftXb;    /**< RC */
-    M4OSA_UInt16                    PanZoomTopleftYb;    /**< RC */
-    M4xVSS_MediaRendering            MediaRendering;        /**< FB: to render or not picture
-                                                                aspect ratio */
-    M4VIDEOEDITING_VideoFramerate    framerate;            /**< RC */
-    M4OSA_Void*                pNext;                /**< Address of next M4xVSS_Pto3GPP_params*
-                                                             element */
-    /*To support ARGB8888:width and height */
-    M4OSA_UInt32            width;
-    M4OSA_UInt32             height;
-
-} M4xVSS_Pto3GPP_params;
-
-/**
- ******************************************************************************
- * struct    M4xVSS_fiftiesStruct
- * @brief    It is used internally by xVSS for fifties effect
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_UInt32 fiftiesEffectDuration;    /**< Duration of the same effect in a video */
-    M4OSA_Int32 previousClipTime;          /**< Previous clip time, used by framing filter
-                                                for SAVING */
-    M4OSA_UInt32 shiftRandomValue;                /**< Vertical shift of the image */
-      M4OSA_UInt32 stripeRandomValue;                /**< Horizontal position of the stripe */
-
-} M4xVSS_FiftiesStruct;
-
-/**
- ******************************************************************************
- * struct    M4xVSS_ColorRGB16
- * @brief    It is used internally by xVSS for RGB16 color effect
- ******************************************************************************
-*/
-typedef struct
-{
-    M4xVSS_VideoEffectType colorEffectType;    /*Color type of effect*/
-    M4OSA_UInt16    rgb16ColorData;            /*RGB16 color only for the RGB16 color effect*/
-} M4xVSS_ColorStruct;
-
-
-/**
- ******************************************************************************
- * struct    M4xVSS_PictureCallbackCtxt
- * @brief    The Callback Context parameters for Pto3GPP
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_Char*                m_FileIn;
-    M4OSA_UInt32            m_NbImage;
-    M4OSA_UInt32            m_ImageCounter;
-    M4OSA_Double            m_timeDuration;
-    M4OSA_FileReadPointer*  m_pFileReadPtr;
-    M4VIFI_ImagePlane*        m_pDecodedPlane; /* Used for Pan and Zoom only */
-    M4xVSS_Pto3GPP_params*    m_pPto3GPPparams;
-    M4OSA_Context            m_air_context;
-    M4xVSS_MediaRendering    m_mediaRendering;
-
-} M4xVSS_PictureCallbackCtxt;
-
-/**
- ******************************************************************************
- * enum        M4xVSS_State
- * @brief    Internal State of the xVSS
- ******************************************************************************
-*/
-typedef enum
-{
-    M4xVSS_kStateInitialized = 0,
-    M4xVSS_kStateAnalyzing,
-    M4xVSS_kStateOpened,
-    //M4xVSS_kStateGeneratingPreview,
-    //M4xVSS_kStatePreview,
-    M4xVSS_kStateSaving,
-    M4xVSS_kStateSaved
-
-} M4xVSS_State;
-
-/**
- ******************************************************************************
- * enum        M4xVSS_editMicroState
- * @brief    Internal Micro state of the xVSS for previewing/saving states
- ******************************************************************************
-*/
-typedef enum
-{
-    M4xVSS_kMicroStateEditing = 0,
-    M4xVSS_kMicroStateAudioMixing
-
-} M4xVSS_editMicroState;
-
-/**
- ******************************************************************************
- * enum        M4xVSS_editMicroState
- * @brief    Internal Micro state of the xVSS for analyzing states
- ******************************************************************************
-*/
-typedef enum
-{
-    M4xVSS_kMicroStateAnalysePto3GPP = 0,
-    M4xVSS_kMicroStateConvertPto3GPP,
-    M4xVSS_kMicroStateAnalyzeMCS,
-    M4xVSS_kMicroStateTranscodeMCS
-
-} M4xVSS_analyseMicroState;
-
-
-/**
- ******************************************************************************
- * struct    M4xVSS_MCS_params
- * @brief    Internal xVSS parameter for MCS module
- * @note    This structure is filled by M4xVSS_sendCommand function,
- * @note    and is used during M4xVSS_Step function to initialize MCS module
- * @note    All the input files to transcode are chained
- ******************************************************************************
-*/
-typedef struct {
-    M4OSA_Void*                                pFileIn;
-    M4OSA_Void*                                pFileOut;
-    /**< temporary file used for metadata writing, NULL is cstmem writer not used */
-    M4OSA_Void*                             pFileTemp;
-    M4VIDEOEDITING_FileType                    InputFileType;
-    M4VIDEOEDITING_FileType                    OutputFileType;
-    M4VIDEOEDITING_VideoFormat                OutputVideoFormat;
-    M4VIDEOEDITING_VideoFrameSize            OutputVideoFrameSize;
-    M4VIDEOEDITING_VideoFramerate            OutputVideoFrameRate;
-    M4VIDEOEDITING_AudioFormat                OutputAudioFormat;
-    M4VIDEOEDITING_AudioSamplingFrequency    OutputAudioSamplingFrequency;
-    M4OSA_Bool                                bAudioMono;
-    M4VIDEOEDITING_Bitrate                    OutputVideoBitrate;
-    M4VIDEOEDITING_Bitrate                    OutputAudioBitrate;
-    M4OSA_Bool                                isBGM;
-    /**< This boolean is used to know if the output file is already created or not */
-    M4OSA_Bool                                isCreated;
-    /**< Address of next M4xVSS_MCS_params* element */
-    M4OSA_Void*                                pNext;
-
-    /*FB: transcoding per parts*/
-    M4OSA_UInt32                         BeginCutTime;    /**< Beginning cut time in input file */
-    M4OSA_UInt32                         EndCutTime;      /**< End cut time in input file */
-    M4OSA_UInt32                         OutputVideoTimescale;    /*Output timescale*/
-
-    M4MCS_MediaRendering                 MediaRendering;   /**< FB: to crop, resize, or render
-                                                                black borders*/
-    M4OSA_UInt32                         videoclipnumber;
-    M4OSA_UInt32  outputVideoProfile;
-    M4OSA_UInt32  outputVideoLevel;
-} M4xVSS_MCS_params;
-
-/**
- ******************************************************************************
- * struct    M4xVSS_internal_AlphaMagicSettings
- * @brief    This structure defines the alpha magic transition settings
- ******************************************************************************
-*/
-typedef struct {
-    M4VIFI_ImagePlane    *pPlane;
-    M4OSA_Int32         blendingthreshold;    /**< Blending Range */
-    M4OSA_Bool            isreverse;            /**< direct effect or reverse */
-
-} M4xVSS_internal_AlphaMagicSettings;
-
-
-/**
- ******************************************************************************
- * struct    M4xVSS_internal_SlideTransitionSettings
- * @brief    This structure defines the internal slide transition settings
- * @note    This type happens to match the external transition settings
- *            structure (i.e. the one which is given by the application), but are
- *            conceptually different types, so that if (or rather when) some day
- *            translation needs to occur when loading the settings from the app,
- *            this separate type will already be ready.
- ******************************************************************************
-*/
-
-typedef M4xVSS_SlideTransitionSettings    M4xVSS_internal_SlideTransitionSettings;
-
-/**
- ******************************************************************************
- * struct    M4xVSS_internalJpegChunkMode
- * @brief    This structure defines the parameters of the chunk callback to decode
- *            a JPEG by chunk mode.
- ******************************************************************************
-*/
-
-/**
- ******************************************************************************
- * struct    M4xVSS_UTFConversionContext
- * @brief    Internal UTF conversion context
- * @note    This structure contains the UTF conversion informations
- *            needed by the xVSS to manage the different formats (UTF8/16/ASCII)
- ******************************************************************************
-*/
-typedef struct
-{
-    /*Function pointer on an external text conversion function */
-    M4xVSS_toUTF8Fct                pConvToUTF8Fct;
-    /*Function pointer on an external text conversion function */
-    M4xVSS_fromUTF8Fct                pConvFromUTF8Fct;
-    /*Temporary buffer that contains the result of each conversion*/
-    M4OSA_Void*                        pTempOutConversionBuffer;
-    /*Size of the previous buffer, the size is prederminated*/
-    M4OSA_UInt32                    m_TempOutConversionSize;
-} M4xVSS_UTFConversionContext;
-
-
-
-/**
- ******************************************************************************
- * struct    M4xVSS_Context
- * @brief    Internal context of the xVSS
- * @note    This structure contains all internal informations needed by the xVSS
- ******************************************************************************
-*/
-typedef struct {
-    /**< Pointer on OSAL file read functions */
-    M4OSA_FileReadPointer*            pFileReadPtr;
-    /**< Pointer on OSAL file write functions */
-    M4OSA_FileWriterPointer*        pFileWritePtr;
-    /**< Local copy of video editor settings */
-    M4VSS3GPP_EditSettings*            pSettings;
-    /**< Current Settings of video editor to use in step functions for preview/save */
-    M4VSS3GPP_EditSettings*            pCurrentEditSettings;
-    /**< Current context of video editor to use in step functions for preview/save */
-    M4VSS3GPP_EditContext            pCurrentEditContext;
-    /**< This is to know if a previous M4xVSS_sendCommand has already been called */
-    M4OSA_UInt8                        previousClipNumber;
-    /**< Audio mixing settings, needed to free it in M4xVSS_internalCloseAudioMixedFile function*/
-    M4VSS3GPP_AudioMixingSettings*    pAudioMixSettings;
-    /**< Audio mixing context */
-    M4VSS3GPP_AudioMixingContext    pAudioMixContext;
-    /**< File path for PCM output file: used for preview, given to user */
-    M4OSA_Char*                        pcmPreviewFile;
-    /**< Duplication of output file pointer, to be able to use audio mixing */
-    M4OSA_Char*                        pOutputFile;
-    /**< Duplication of temporary file pointer, to be able to use audio mixing */
-    M4OSA_Char*                        pTemporaryFile;
-    /**< Micro state for Saving/Previewing state */
-    M4xVSS_editMicroState            editingStep;
-    /**< Micro state for Analyzing state */
-    M4xVSS_analyseMicroState        analyseStep;
-    /**< Nb of step for analysis or save/preview. Used to compute progression
-         of analysis or save/preview */
-    M4OSA_UInt8                        nbStepTotal;
-    /**< Current step number for analysis or save/preview */
-    M4OSA_UInt8                        currentStep;
-    /**< To be able to free pEffects during preview close */
-    M4xVSS_PreviewSettings*            pPreviewSettings;
-    /**< Temporary file path: all temporary files are created here */
-    M4OSA_Char*                        pTempPath;
-    /**< Current state of xVSS */
-    M4xVSS_State                    m_state;
-    /**< List of still pictures input to convert to 3GP with parameters */
-    M4xVSS_Pto3GPP_params*            pPTo3GPPparamsList;
-    /**< Current element of the above chained list beeing processd by the Pto3GPP */
-    M4xVSS_Pto3GPP_params*            pPTo3GPPcurrentParams;
-    /**< Current Pto3GPP context, needed to call Pto3GPP_step function in M4xVSS_step function */
-    M4PTO3GPP_Context                pM4PTO3GPP_Ctxt;
-    /**< Pointer on the callback function of the Pto3GPP module */
-    M4xVSS_PictureCallbackCtxt*        pCallBackCtxt;
-    /**< List of files to transcode with parameters */
-    M4xVSS_MCS_params*                pMCSparamsList;
-    /**< Current element of the above chained list beeing processd by the MCS */
-    M4xVSS_MCS_params*                pMCScurrentParams;
-    /**< Current MCS context, needed to call MCS_step function in M4xVSS_step function*/
-    M4MCS_Context                    pMCS_Ctxt;
-    /**< Index to have unique temporary filename */
-    M4OSA_UInt32                    tempFileIndex;
-    /**< In case of MMS use case, targeted bitrate to reach output file size */
-    M4OSA_UInt32                    targetedBitrate;
-    /**< If the sendCommand fct is called twice or more, the first computed timescale
-        recorded here must be reused */
-    M4OSA_UInt32                    targetedTimescale;
-
-    /*UTF Conversion support*/
-    M4xVSS_UTFConversionContext    UTFConversionContext;    /*UTF conversion context structure*/
-
-} M4xVSS_Context;
-
-/**
- * Internal function prototypes */
-
-M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext,
-                                          M4OSA_UInt32 *rotationDegree);
-
-M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalDecodeJPG(M4OSA_Void* pFileIn, M4OSA_FileReadPointer* pFileReadPtr,
-                                   M4VIFI_ImagePlane** pImagePlanes);
-
-M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
-                                                 M4OSA_FileReadPointer* pFileReadPtr,
-                                                 M4VIFI_ImagePlane** pImagePlanes,
-                                                 M4OSA_UInt32 width,M4OSA_UInt32 height);
-M4OSA_ERR M4xVSS_internalDecodeAndResizeJPG(M4OSA_Void* pFileIn,
-                                            M4OSA_FileReadPointer* pFileReadPtr,
-                                            M4VIFI_ImagePlane* pImagePlanes);
-M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
-                                                          M4OSA_FileReadPointer* pFileReadPtr,
-                                                          M4VIFI_ImagePlane* pImagePlanes,
-                                                          M4OSA_UInt32 width,M4OSA_UInt32 height);
-
-M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx);
-
-#ifdef DECODE_GIF_ON_SAVING
-M4OSA_ERR M4xVSS_internalDecodeGIF(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalDecodeGIF_Initialization(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalDecodeGIF_Cleaning(M4OSA_Context pContext);
-
-#else
-M4OSA_ERR M4xVSS_internalDecodeGIF(M4OSA_Context pContext, M4VSS3GPP_EffectSettings* pEffect,
-                                   M4xVSS_FramingStruct* framingCtx);
-#endif /*DECODE_GIF_ON_SAVING*/
-
-M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pContext,
-                                                               M4VSS3GPP_EffectSettings* pEffect,
-                                                               M4xVSS_FramingStruct* framingCtx,
-                                                               M4VIDEOEDITING_VideoFrameSize \
-                                                                    OutputVideoResolution);
-
-M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_freeSettings(M4VSS3GPP_EditSettings* pSettings);
-
-M4OSA_ERR M4xVSS_freeCommand(M4OSA_Context pContext);
-
-M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext, M4OSA_Char* pFile,
-                                         M4VIDEOEDITING_ClipProperties *pFileProperties);
-
-M4OSA_ERR M4xVSS_AlphaMagic( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
-                             M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
-                             M4VSS3GPP_ExternalProgress *pProgress,
-                             M4OSA_UInt32 uiTransitionKind);
-
-M4OSA_ERR M4xVSS_AlphaMagicBlending( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
-                                     M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
-                                     M4VSS3GPP_ExternalProgress *pProgress,
-                                     M4OSA_UInt32 uiTransitionKind);
-
-M4OSA_ERR M4xVSS_SlideTransition( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
-                                  M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
-                                  M4VSS3GPP_ExternalProgress *pProgress,
-                                  M4OSA_UInt32 uiTransitionKind);
-
-M4OSA_ERR M4xVSS_FadeBlackTransition(M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
-                                     M4VIFI_ImagePlane PlaneIn2[3],M4VIFI_ImagePlane *PlaneOut,
-                                     M4VSS3GPP_ExternalProgress *pProgress,
-                                     M4OSA_UInt32 uiTransitionKind);
-
-M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
-                                              M4VSS3GPP_EditSettings* pSettings,
-                                              M4OSA_UInt32* pTargetedTimeScale);
-
-M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
-                                       M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize);
-
-
-M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
-                                         M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize);
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-
-#endif /* __M4XVSS_INTERNAL_H__ */
-
diff --git a/libvideoeditor/vss/mcs/Android.mk b/libvideoeditor/vss/mcs/Android.mk
deleted file mode 100755
index 5053e7d..0000000
--- a/libvideoeditor/vss/mcs/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_API.h b/libvideoeditor/vss/mcs/inc/M4MCS_API.h
deleted file mode 100755
index a8987e2..0000000
--- a/libvideoeditor/vss/mcs/inc/M4MCS_API.h
+++ /dev/null
@@ -1,575 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file    M4MCS_API.h
- * @brief   Media Conversion Service public API.
- * @note    MCS allows transcoding a 3gp/mp4 file into a new 3gp/mp4 file changing the
- *          video and audio encoding settings.
- *          It is a straightforward and fully synchronous API.
- ******************************************************************************
- */
-
-#ifndef __M4MCS_API_H__
-#define __M4MCS_API_H__
-
-/**
- *    OSAL basic types and errors */
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-
-/**
- *    OSAL types for file access */
-#include "M4OSA_FileReader.h"
-#include "M4OSA_FileWriter.h"
-
-/**
- *    Definition of M4_VersionInfo */
-#include "M4TOOL_VersionInfo.h"
-
-/**
- * Common definitions of video editing components */
-#include "M4_VideoEditingCommon.h"
-
-/**
- * To enable external audio codecs registering*/
-#include "M4AD_Common.h"
-#include "M4ENCODER_AudioCommon.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- *    Public type of the MCS context */
-typedef M4OSA_Void* M4MCS_Context;
-
-
-/**
- ******************************************************************************
- * enum        M4MCS_MediaRendering
- * @brief    This enum defines different media rendering
- ******************************************************************************
- */
-typedef enum
-{
-    M4MCS_kResizing = 0,    /**< The media is resized, the aspect ratio can be
-                              different from the original one.
-                              All of the media is rendered */
-    M4MCS_kCropping,        /**< The media is cropped, the aspect ratio is the
-                              same as the original one.
-                              The media is not rendered entirely */
-    M4MCS_kBlackBorders     /**< Black borders are rendered in order to keep the
-                              original aspect ratio. All the media is rendered */
-} M4MCS_MediaRendering;
-
-
-/**
- ******************************************************************************
- * struct   M4MCS_ExternalProgress
- * @brief   This structure contains information provided to the external Effect functions
- * @note    The uiProgress value should be enough for most cases
- ******************************************************************************
- */
-typedef struct
-{
-    M4OSA_UInt32    uiProgress;     /**< Progress of the Effect from 0 to 1000 (one thousand) */
-    M4OSA_UInt32    uiClipTime;     /**< Current time, in milliseconds,
-                                          in the current clip time-line */
-    M4OSA_UInt32    uiOutputTime;   /**< Current time, in milliseconds,
-                                          in the output clip time-line */
-
-} M4MCS_ExternalProgress;
-
-
-/**
- ******************************************************************************
- * enum     M4MCS_AudioEffectType
- * @brief   This enumeration defines the audio effect types of the MCS
- ******************************************************************************
- */
-typedef enum
-{
-    M4MCS_kAudioEffectType_None    = 0,
-    M4MCS_kAudioEffectType_FadeIn  = 8, /**< Intended for begin effect */
-    M4MCS_kAudioEffectType_FadeOut = 16, /**< Intended for end effect */
-    M4MCS_kAudioEffectType_External = 256
-
-} M4MCS_AudioEffectType;
-
-
-/**
- ******************************************************************************
- * prototype    M4MCS_editAudioEffectFct
- * @brief       Audio effect functions implemented by the integrator
- *              must match this prototype.
- * @note        The function is provided with the original PCM data buffer and its size.
- *              Audio effect have to be applied on it.
- *              The progress of the effect is given, on a scale from 0 to 1000.
- *              When the effect function is called, all the buffers are valid and
- *              owned by the MCS.
- *
- * @param   pFunctionContext    (IN) The function context, previously set by the integrator
- * @param   pPCMdata            (IN/OUT) valid PCM data buffer
- * @param   uiPCMsize           (IN/OUT) PCM data buffer corresponding size
- * @param   pProgress           (IN) Set of information about the audio effect progress.
- *
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-typedef M4OSA_ERR (*M4MCS_editAudioEffectFct)
-(
-    M4OSA_Void *pFunctionContext,
-    M4OSA_Int16 *pPCMdata,
-    M4OSA_UInt32 uiPCMsize,
-    M4MCS_ExternalProgress *pProgress
-);
-
-
-/**
- ******************************************************************************
- * struct   M4MCS_EffectSettings
- * @brief   This structure defines an audio effect for the edition.
- ******************************************************************************
- */
-typedef struct
-{
-    M4OSA_UInt32                 uiStartTime;              /**< In ms */
-    M4OSA_UInt32                 uiDuration;               /**< In ms */
-    M4MCS_editAudioEffectFct     ExtAudioEffectFct;        /**< External effect function */
-    M4OSA_Void                   *pExtAudioEffectFctCtxt;  /**< Context given to the external
-                                                                effect function */
-    M4MCS_AudioEffectType        AudioEffectType;         /**< None, FadeIn, FadeOut */
-
-} M4MCS_EffectSettings;
-
-
-/**
- ******************************************************************************
- * struct    M4MCS_OutputParams
- * @brief    MCS Output parameters
- * @note     Following parameters are used for still picture inputs :
- *             - OutputFileType (must be set to M4VIDEOEDITING_kFileType_JPG)
- *             - bDiscardExif must be set to M4OSA_TRUE or M4OSA_FALSE
- *             - bAdjustOrientation must be set to M4OSA_TRUE or M4OSA_FALSE
- *             - (MediaRendering is not handled : output image resolution is always
-                 set according to BestFit criteria)
- *            bDiscardExif and bAdjustOrientation are still picture only parameters
- ******************************************************************************
- */
-typedef struct
-{
-    /**< Format of the output file */
-    M4VIDEOEDITING_FileType                 OutputFileType;
-    /**< Output video compression format, see enum */
-    M4VIDEOEDITING_VideoFormat              OutputVideoFormat;
-    /**< Output frame size : QQVGA, QCIF or SQCIF */
-    M4VIDEOEDITING_VideoFrameSize           OutputVideoFrameSize;
-    /**< Targeted Output framerate, see enum */
-    M4VIDEOEDITING_VideoFramerate           OutputVideoFrameRate;
-    /**< Format of the audio in the stream */
-    M4VIDEOEDITING_AudioFormat              OutputAudioFormat;
-    /**< Sampling frequency of the audio in the stream */
-    M4VIDEOEDITING_AudioSamplingFrequency   OutputAudioSamplingFrequency;
-    /**< Set to M4OSA_TRUE if the output audio is mono */
-    M4OSA_Bool                              bAudioMono;
-    /**< Output PCM file if not NULL */
-    M4OSA_Char                              *pOutputPCMfile;
-    /**< To crop, resize, or render black borders*/
-    M4MCS_MediaRendering                    MediaRendering;
-    /**< List of effects */
-    M4MCS_EffectSettings                    *pEffects;
-    /**< Number of effects in the above list */
-    M4OSA_UInt8                             nbEffects;
-
-    /*--- STILL PICTURE ---*/
-    /**< TRUE: Even if the input file contains an EXIF section,
-    the output file won't contain any EXIF section.*/
-    M4OSA_Bool                              bDiscardExif ;
-
-    /**< =TRUE : picture must be rotated if Exif tags hold a rotation info
-    (and rotation info is set to 0)*/
-    M4OSA_Bool                              bAdjustOrientation ;
-    /*--- STILL PICTURE ---*/
-    M4OSA_Int32 outputVideoProfile;
-    M4OSA_Int32 outputVideoLevel;
-} M4MCS_OutputParams;
-
-/*--- STILL PICTURE ---*/
-/**
- ******************************************************************************
- * enum      M4MCS_SPOutputResolution
- * @brief    Still picture specific : MCS output targeted file resolution
- ******************************************************************************
- */
-typedef enum
-{
-    M4MCS_kResSameAsInput       = 0x00, /*width x height*/
-    M4MCS_kResQVGA              = 0x01, /*320x240*/
-    M4MCS_kResVGA               = 0x02, /*640x480*/
-    M4MCS_kResWQVGA             = 0x03, /*400x240*/
-    M4MCS_kResWVGA              = 0x04, /*800x480*/
-    M4MCS_kResXGA               = 0x05, /*1024x768*/
-    M4MCS_kResCustom            = 0xFF  /*Size is set via StillPictureCustomWidth/Height*/
-} M4MCS_SPOutputResolution ;
-
-
-/**
- ******************************************************************************
- * enum      M4MCS_SPStrategy
- * @brief    Still picture specific : MCS strategy to configure the encoding parameters
- ******************************************************************************
- */
-typedef enum
-{
-    M4MCS_kFileSizeOnlyFixed            = 0x00, /*StillPictureResolution and
-                                                 QualityFactor are ignored*/
-    M4MCS_kFileSizeAndResFixed          = 0x01, /*QualityFactor is ignored*/
-    M4MCS_kQualityAndResFixed           = 0x02  /*OutputFileSize is ignored*/
-} M4MCS_SPStrategy ;
-
-
-/**
- ******************************************************************************
- * enum      M4MCS_SPCrop
- * @brief    Still picture specific : indicate whether cropping should be done
-                                     before changing the resolution
- ******************************************************************************
- */
-typedef enum
-{
-    M4MCS_kNoCrop                = 0x00, /*No Cropping is performed*/
-    M4MCS_kCropBeforeResize      = 0x01  /*Input image is cropped (before changing resolution)*/
-} M4MCS_SPCrop ;
-
-
-/**
- ******************************************************************************
- * struct    M4MCS_EncodingParams
- * @brief    MCS file size, bitrate and cut parameters
- * @note     Following parameters are used for still picture inputs :
- *             - OutputFileSize
- *             - StillPictureResolution
- *             - QualityFactor
- *             - StillPictureStrategy
- *             - StillPictureCustomWidth/Height (if StillPictureResolution==M4MCS_kResCustom)
- *            Still picture only parameters : StillPictureResolution, QualityFactor,
- *            StillPictureStrategy and StillPictureCustomWidth/Height
- ******************************************************************************
- */
-typedef struct
-{
-    M4VIDEOEDITING_Bitrate    OutputVideoBitrate;     /**< Targeted video bitrate */
-    M4VIDEOEDITING_Bitrate    OutputAudioBitrate;     /**< Targeted audio bitrate */
-    M4OSA_UInt32              BeginCutTime;           /**< Beginning cut time in input file */
-    M4OSA_UInt32              EndCutTime;             /**< End cut time in input file */
-    M4OSA_UInt32              OutputFileSize;         /**< Expected resulting file size */
-    M4OSA_UInt32              OutputVideoTimescale;   /**< Optional parameter used to fix a
-                                                           timescale during transcoding */
-
-    /*--- STILL PICTURE ---*/
-    M4OSA_Int32               QualityFactor ;         /**< =-1 (undefined) or 0(lowest)..
-                                                            50(best) : This parameter is the
-                                                            quality indication for the JPEG output
-                                                            file (if =-1 the MCS will set quality
-                                                            automatically)*/
-    M4MCS_SPStrategy            StillPictureStrategy ; /**< Defines which input parameters
-                                                            will be taken into account by MCS*/
-    M4MCS_SPOutputResolution    StillPictureResolution;/**< Desired output resolution for
-                                                            a still picture file */
-    /**< (only if Resolution==M4MCS_kResCustom) : Custom output image width */
-    M4OSA_UInt32                StillPictureCustomWidth;
-    /**< (only if Resolution==M4MCS_kResCustom) : Custom output image height */
-    M4OSA_UInt32                StillPictureCustomHeight;
-    /**< Indicate whether Crop should be performed */
-    M4MCS_SPCrop                StillPictureCrop;
-    /**< (only if cropping) X coordinate of topleft corner of the crop window */
-    M4OSA_UInt32                StillPictureCrop_X;
-    /**< (only if cropping) Y coordinate of topleft corner of the crop window */
-    M4OSA_UInt32                StillPictureCrop_Y;
-    /**< (only if cropping) Width of the crop window (in pixels) */
-    M4OSA_UInt32                StillPictureCrop_W;
-    /**< (only if cropping) Height of the crop window (in pixels) */
-    M4OSA_UInt32                StillPictureCrop_H;
-    /*--- STILL PICTURE ---*/
-} M4MCS_EncodingParams;
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_getVersion(M4_VersionInfo* pVersionInfo);
- * @brief    Get the MCS version.
- * @note Can be called anytime. Do not need any context.
- * @param    pVersionInfo        (OUT) Pointer to a version info structure
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_getVersion(M4_VersionInfo* pVersionInfo);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_init(M4MCS_Context* pContext, M4OSA_FileReadPointer* pFileReadPtrFct,
-                        M4OSA_FileWriterPointer* pFileWritePtrFct);
- * @brief    Initializes the MCS (allocates an execution context).
- * @note
- * @param    pContext            (OUT) Pointer on the MCS context to allocate
- * @param    pFileReadPtrFct     (IN) Pointer to OSAL file reader functions
- * @param    pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (If Debug Level >= 2)
- * @return   M4ERR_ALLOC:        There is no more available memory
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_init(M4MCS_Context* pContext, M4OSA_FileReadPointer* pFileReadPtrFct,
-                      M4OSA_FileWriterPointer* pFileWritePtrFct);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_open(M4MCS_Context pContext, M4OSA_Void* pFileIn, M4OSA_Void* pFileOut,
-                          M4OSA_UInt32 uiMaxMetadataSize);
- * @brief   Set the MCS input and output files.
- * @note    It opens the input file, but the output file is not created yet.
- *          In case of still picture, four InputFileType are possible
- *          (M4VIDEOEDITING_kFileType_JPG/BMP/GIF/PNG
- *          If one of them is set, the OutputFileType SHALL be set to M4VIDEOEDITING_kFileType_JPG
- * @param   pContext            (IN) MCS context
- * @param   pFileIn             (IN) Input file to transcode (The type of this parameter
- *                                    (URL, pipe...) depends on the OSAL implementation).
- * @param   mediaType           (IN) Container type (.3gp,.amr, ...) of input file.
- * @param   pFileOut            (IN) Output file to create  (The type of this parameter
- *                                    (URL, pipe...) depends on the OSAL implementation).
- * @param   pTempFile           (IN) Temporary file for the constant memory writer to store
- *                                    metadata ("moov.bin").
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4ERR_ALLOC:        There is no more available memory
- * @return  M4ERR_FILE_NOT_FOUND:   The input file has not been found
- * @return  M4MCS_ERR_INVALID_INPUT_FILE:   The input file is not a valid file, or is corrupted
- * @return  M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM:  The input file contains no
- *                                                               supported audio or video stream
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_open(M4MCS_Context pContext, M4OSA_Void* pFileIn,
-                     M4VIDEOEDITING_FileType InputFileType,
-                     M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_step(M4MCS_Context pContext, M4OSA_UInt8 *pProgress);
- * @brief   Perform one step of trancoding.
- * @note
- * @param   pContext            (IN) MCS context
- * @param   pProgress           (OUT) Progress percentage (0 to 100) of the transcoding
- * @note    pProgress must be a valid address.
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    One of the parameters is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4MCS_WAR_TRANSCODING_DONE: Transcoding is over, user should now call M4MCS_close()
- * @return  M4MCS_ERR_AUDIO_CONVERSION_FAILED: The audio conversion (AAC to AMR-NB, MP3) failed
- * @return  M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY: The input file contains an AAC audio track
- *                                                     with an invalid sampling frequency
- *                                                     (should never happen)
- * @return  M4MCS_WAR_PICTURE_AUTO_RESIZE: Picture will be automatically resized to fit
- *                                          into requirements
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_step(M4MCS_Context pContext, M4OSA_UInt8 *pProgress);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_pause(M4MCS_Context pContext);
- * @brief   Pause the transcoding i.e. release the (external hardware) video decoder.
- * @note    This function is not needed if no hardware accelerators are used.
- *          In that case, pausing the MCS is simply achieved by temporarily suspending
- *          the M4MCS_step function calls.
- * @param   pContext            (IN) MCS context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_pause(M4MCS_Context pContext);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_resume(M4MCS_Context pContext);
- * @brief   Resume the transcoding after a pause (see M4MCS_pause).
- * @note    This function is not needed if no hardware accelerators are used.
- *          In that case, resuming the MCS is simply achieved by calling
- *          the M4MCS_step function.
- * @param   pContext            (IN) MCS context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_resume(M4MCS_Context pContext);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_close(M4MCS_Context pContext);
- * @brief    Finish the MCS transcoding.
- * @note The output 3GPP file is ready to be played after this call
- * @param    pContext            (IN) MCS context
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
- * @return   M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_close(M4MCS_Context pContext);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_cleanUp(M4MCS_Context pContext);
- * @brief    Free all resources used by the MCS.
- * @note The context is no more valid after this call
- * @param    pContext            (IN) MCS context
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
- * @return   M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_cleanUp(M4MCS_Context pContext);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_abort(M4MCS_Context pContext);
- * @brief    Finish the MCS transcoding and free all resources used by the MCS
- *          whatever the state is.
- * @note    The context is no more valid after this call
- * @param    pContext            (IN) MCS context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_abort(M4MCS_Context pContext);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_getInputFileProperties(M4MCS_Context pContext,
- *                                          M4VIDEOEDITING_ClipProperties* pFileProperties);
- * @brief   Retrieves the properties of the audio and video streams from the input file.
- * @param   pContext            (IN) MCS context
- * @param   pProperties         (OUT) Pointer on an allocated M4VIDEOEDITING_ClipProperties
-                                structure which is filled with the input stream properties.
- * @note    The structure pProperties must be allocated and further de-allocated
-            by the application. The function must be called in the opened state.
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_getInputFileProperties(M4MCS_Context pContext,
-                                        M4VIDEOEDITING_ClipProperties *pFileProperties);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_setOutputParams(M4MCS_Context pContext, M4MCS_OutputParams* pParams);
- * @brief   Set the MCS video output parameters.
- * @note    Must be called after M4MCS_open. Must be called before M4MCS_step.
- * @param   pContext            (IN) MCS context
- * @param   pParams             (IN/OUT) Transcoding parameters
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263 : Output video frame size parameter is
- *                                                          incompatible with H263 encoding
- * @return  M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263 : Output video frame size parameter is
- *                                                          incompatible with H263 encoding
- * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT     : Undefined output video format parameter
- * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE : Undefined output video frame size
- * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE : Undefined output video frame rate
- * @return  M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT : Undefined output audio format parameter
- * @return  M4MCS_ERR_DURATION_IS_NULL : Specified output parameters define a null duration stream
- *                                        (no audio and video)
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_setOutputParams(M4MCS_Context pContext, M4MCS_OutputParams* pParams);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_setEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
- * @brief   Set the values of the encoding parameters
- * @note    Must be called before M4MCS_checkParamsAndStart().
- * @param   pContext           (IN) MCS context
- * @param   pRates             (IN) Transcoding parameters
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4MCS_ERR_AUDIOBITRATE_TOO_HIGH: Audio bitrate too high (we limit to 96 kbps)
- * @return  M4MCS_ERR_AUDIOBITRATE_TOO_LOW: Audio bitrate is too low (16 kbps min for aac,
- *                                           12.2 for amr, 8 for mp3)
- * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Begin cut and End cut are equals
- * @return  M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION: Begin cut time is larger than
- *                                                     the input clip duration
- * @return  M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT: End cut time is smaller than begin cut time
- * @return  M4MCS_ERR_MAXFILESIZE_TOO_SMALL: Not enough space to store whole output
- *                                            file at given bitrates
- * @return  M4MCS_ERR_VIDEOBITRATE_TOO_HIGH: Video bitrate too high (we limit to 800 kbps)
- * @return  M4MCS_ERR_VIDEOBITRATE_TOO_LOW: Video bitrate too low
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_setEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_getExtendedEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
- * @brief   Get the extended values of the encoding parameters
- * @note    Could be called after M4MCS_setEncodingParams.
- * @param   pContext           (IN) MCS context
- * @param   pRates             (OUT) Transcoding parameters
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Encoding settings would produce a
- *                                              null duration clip = encoding is impossible
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_getExtendedEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_checkParamsAndStart(M4MCS_Context pContext)
- * @brief
- * @note
- * @param   pContext           (IN) MCS context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4MCS_ERR_AUDIOBITRATE_TOO_HIGH: Audio bitrate too high (we limit to 96 kbps)
- * @return  M4MCS_ERR_AUDIOBITRATE_TOO_LOW: Audio bitrate is too low (16 kbps min for aac,
- *                                           12.2 for amr, 8 for mp3)
- * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Begin cut and End cut are equals
- * @return  M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION: Begin cut time is larger than
- *                                                    the input clip duration
- * @return  M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT: End cut time is smaller than begin cut time
- * @return  M4MCS_ERR_MAXFILESIZE_TOO_SMALL: Not enough space to store whole output
- *                                            file at given bitrates
- * @return  M4MCS_ERR_VIDEOBITRATE_TOO_HIGH: Video bitrate too high (we limit to 800 kbps)
- * @return  M4MCS_ERR_VIDEOBITRATE_TOO_LOW:  Video bitrate too low
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_checkParamsAndStart(M4MCS_Context pContext);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* __M4MCS_API_H__ */
-
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_ErrorCodes.h b/libvideoeditor/vss/mcs/inc/M4MCS_ErrorCodes.h
deleted file mode 100755
index c042dbb..0000000
--- a/libvideoeditor/vss/mcs/inc/M4MCS_ErrorCodes.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- *************************************************************************
- * @file   M4MCS_API.h
- * @brief  MCS error codes definitions (Media Compressor Service)
- * @note
- *************************************************************************
- **/
-
-#ifndef __M4MCS_ErrorCodes_H__
-#define __M4MCS_ErrorCodes_H__
-
-/**
- *    OSAL basic types and errors */
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-
-/**
- *    OSAL core ID definitions */
-#include "M4OSA_CoreID.h"
-
-
-/************************************************************************/
-/* Warning codes                                                        */
-/************************************************************************/
-
-/* End of processing, user should now call M4MCS_close() */
-#define M4MCS_WAR_TRANSCODING_DONE            M4OSA_ERR_CREATE( M4_WAR, M4MCS, 0x1)
-/* Mediatype is not supported by the MCS */
-#define M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED    M4OSA_ERR_CREATE( M4_WAR, M4MCS, 0x2)
-/* Indicate that picture will be automatically resized to fit into the required
-   parameters (file size) */
-#define M4MCS_WAR_PICTURE_AUTO_RESIZE        M4OSA_ERR_CREATE( M4_WAR, M4MCS, 0x3)
-
-/************************************************************************/
-/* Error codes                                                          */
-/************************************************************************/
-
-
-/* ----- OPEN ERRORS ----- */
-
-/* The input file contains no supported stream (may be a corrupted file) */
-#define M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM   M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x01)
-/* The input file is invalid/corrupted */
-#define M4MCS_ERR_INVALID_INPUT_FILE                        M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x02)
-/* The input video frame size parameter is undefined */
-#define M4MCS_ERR_INVALID_INPUT_VIDEO_FRAME_SIZE            M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x03)
-/* The input video frame size is non multiple of 16 */
-#define M4MCS_ERR_INPUT_VIDEO_SIZE_NON_X16                  M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x04)
-
-
-/* ----- SET OUTPUT PARAMS ERRORS ----- */
-
-/* The output video format parameter is undefined */
-#define M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT             M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x10)
-/* The output video frame size parameter is undefined */
-#define M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE         M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x11)
-/* The output video frame rate parameter is undefined */
-#define M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE         M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x12)
-/* The output audio format parameter is undefined */
-#define M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT             M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x13)
-/* The output video frame size parameter is incompatible with H263 encoding */
-#define M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263         M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x14)
-/* The output video frame rate parameter is incompatible with H263 encoding
-   (It can't happen in current version of MCS!) */
-#define M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263         M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x15)
-/* A null clip duration as been computed, which is unvalid (should never happen!) */
-#define M4MCS_ERR_DURATION_IS_NULL                          M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x16)
-/* The .mp4 container cannot handle h263 codec */
-#define M4MCS_ERR_H263_FORBIDDEN_IN_MP4_FILE                M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x17)
-
-
-/* ----- PREPARE DECODERS ERRORS ----- */
-
-/* H263 Profile (other than 0) is not supported */
-#define M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED                M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x20)
-/* The input file contains an AAC audio track with an invalid sampling frequency
-   (should never happen) */
-#define M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY            M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x21)
-/* The audio conversion (AAC to AMR-NB, or MP3) failed */
-#define M4MCS_ERR_AUDIO_CONVERSION_FAILED                   M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x22)
-
-
-/* ----- SET ENCODING PARAMS ERRORS ----- */
-
-/* Begin cut time is larger than the input clip duration */
-#define M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION            M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x30)
-/* Begin cut and End cut are equals */
-#define M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT                  M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x31)
-/* End cut time is smaller than begin cut time */
-#define M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT            M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x32)
-/* Not enough space to store whole output file at given bitrates */
-#define M4MCS_ERR_MAXFILESIZE_TOO_SMALL                     M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x33)
-/* Video bitrate is too low (avoid ugly video) */
-#define M4MCS_ERR_VIDEOBITRATE_TOO_LOW                      M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x34)
-/* Audio bitrate is too low (16 kbps min for aac, 12.2 for amr, 8 for mp3) */
-#define M4MCS_ERR_AUDIOBITRATE_TOO_LOW                      M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x35)
-/* Video bitrate too high (we limit to 800 kbps) */
-#define M4MCS_ERR_VIDEOBITRATE_TOO_HIGH                     M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x36)
-/* Audio bitrate too high (we limit to 96 kbps) */
-#define M4MCS_ERR_AUDIOBITRATE_TOO_HIGH                     M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x37)
-
-/* ----- OTHERS ERRORS ----- */
-#define M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL                M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x50)
-#define M4MCS_ERR_NOMORE_SPACE                              M4OSA_ERR_CREATE(M4_ERR, M4MCS, 0x51)
-#define M4MCS_ERR_FILE_DRM_PROTECTED                        M4OSA_ERR_CREATE(M4_ERR, M4MCS, 0x52)
-#endif /* __M4MCS_ErrorCodes_H__ */
-
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_InternalConfig.h b/libvideoeditor/vss/mcs/inc/M4MCS_InternalConfig.h
deleted file mode 100755
index efaf1e6..0000000
--- a/libvideoeditor/vss/mcs/inc/M4MCS_InternalConfig.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- *************************************************************************
- * @file   M4MCS_API.h
- * @brief  MCS internal constant values settings
- * @note   This header file is not public
- *************************************************************************
- **/
-
-#ifndef __M4MCS_INTERNALCONFIG_H__
-#define __M4MCS_INTERNALCONFIG_H__
-
-
-/**
- * Definition of max AU size */
-#define M4MCS_AUDIO_MAX_CHUNK_SIZE        7168 /**< add mp3 encoder and writer,
-                                                    max bitrate is now 320kbps instead of 128kbps
-                                                    so this value has to be increased accordingly
-                                                    = ((sizeof(M4OSA_UInt8)*max_channel_number)+3
-                                                    to take a margin(after tests, 2 was not enough
-                                                    ))*MAX_PCM_GRANULARITY_SAMPLES*/
-                                                    /**< Before: 4000*//**< Magical */
-
-/**
- * Video max AU and fragment size */
-#define M4MCS_VIDEO_MIN_COMPRESSION_RATIO   0.8 /**< Magical. Used to define the max AU size */
-#define M4MCS_VIDEO_CHUNK_AU_SIZE_RATIO     1.2 /**< Magical. Used to define the max chunk size */
-
-/**
- * Various Magicals */
-#define M4MCS_WRITER_AUDIO_STREAM_ID        1
-#define M4MCS_WRITER_VIDEO_STREAM_ID        2
-
-/**
- * Granularity for audio encoder */
- /**< minimum number of samples to pass in AMR encoding case */
-#define M4MCS_PCM_AMR_GRANULARITY_SAMPLES 160
-/**< minimum number of samples to pass in AAC encoding case */
-#define M4MCS_PCM_AAC_GRANULARITY_SAMPLES 1024
-/**< minimum number of samples to pass in MP3 encoding case */
-#define M4MCS_PCM_MP3_GRANULARITY_SAMPLES 576
-
-#define M4MCS_AUDIO_MAX_AU_SIZE           1024  /**< add mp3 encoder and writer
-                                                This value is not used anymore, now the max AU
-                                                size is computed dynamically according to the
-                                                number of channels,the max PCM granularity sample
-                                                and a margin.*/
-                                                /**< Before: 1024*//**< Magical */
-/**
- * Writer file and moov size estimation */
-#define M4MCS_MOOV_OVER_FILESIZE_RATIO    1.04  /**< magical moov size is less than 4%
-                                                     of file size in average */
-
-/**
- * If 3gp file does not contain an STSS table (no rap frames),
-   jump backward to a specified limit */
-#define M4MCS_NO_STSS_JUMP_POINT          40000 /**< 40 s */
-
-#endif /* __M4MCS_INTERNALCONFIG_H__ */
-
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_InternalFunctions.h b/libvideoeditor/vss/mcs/inc/M4MCS_InternalFunctions.h
deleted file mode 100755
index 21c679e..0000000
--- a/libvideoeditor/vss/mcs/inc/M4MCS_InternalFunctions.h
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *************************************************************************
- * @file    M4MCS_InternalFunctions.h
- * @brief   This file contains all functions declarations internal
- *          to the MCS.
- *************************************************************************
- */
-
-#ifndef __M4MCS_INTERNALFUNCTIONS_H__
-#define __M4MCS_INTERNALFUNCTIONS_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "M4VPP_API.h"
-#include "M4ENCODER_common.h"
-
-/**
- **************************************************************************
- * M4OSA_ERR M4MCS_intApplyVPP( M4VPP_Context pContext,
- *                              M4VIFI_ImagePlane* pPlaneIn,
- *                              M4VIFI_ImagePlane* pPlaneOut)
- * @brief   Do the video rendering and the resize (if needed)
- * @note    It is called by the video encoder
- * @param   pContext    (IN)     VPP context, which actually is the MCS
- *                               internal context in our case
- * @param   pPlaneIn    (IN)     Contains the image
- * @param   pPlaneOut   (IN/OUT) Pointer to an array of 3 planes that will
- *                               contain the output YUV420 image
- * @return  M4NO_ERROR:                 No error
- * @return  ERR_MCS_VIDEO_DECODE_ERROR: the video decoding failed
- * @return  ERR_MCS_RESIZE_ERROR:       the resizing failed
- * @return  Any error returned by an underlaying module
- **************************************************************************
- */
-M4OSA_ERR M4MCS_intApplyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
-                            M4VIFI_ImagePlane* pPlaneOut);
-
-/**
- **************************************************************************
- * M4OSA_ERR M4MCS_SubscribeMediaAndCodec(M4MCS_Context pContext);
- * @brief    This function registers the reader, decoders, writers and encoders
- *           in the MCS.
- * @note
- * @param    pContext:    (IN) Execution context.
- * @return   M4NO_ERROR:        there is no error
- * @return   M4ERR_PARAMETER    pContext is NULL
- **************************************************************************
- */
-M4OSA_ERR M4MCS_subscribeMediaAndCodec(M4MCS_Context pContext);
-
-/**
- **************************************************************************
- * @brief    Clear encoders, decoders, reader and writers interfaces tables
- * @param    pContext            (IN/OUT) MCS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    The context is null
- **************************************************************************
- */
-M4OSA_ERR   M4MCS_clearInterfaceTables(M4MCS_Context pContext);
-
-/**
- **************************************************************************
- * M4OSA_ERR   M4MCS_registerWriter(M4MCS_Context pContext,
- *                                  M4VIDEOEDITING_FileType MediaType,
- *                                  M4WRITER_GlobalInterface *pWtrGlobalInterface,
- *                                  M4WRITER_DataInterface *pWtrDataInterface)
- * @brief   This function will register a specific file format writer.
- * @note    According to the Mediatype, this function will store in the internal
- *          context the writer context.
- * @param   pContext:    (IN) Execution context.
- * @return  M4NO_ERROR:         there is no error
- * @return  M4ERR_PARAMETER     pContext,pWtrGlobalInterface or pWtrDataInterface
- *                              is M4OSA_NULL (debug only), or invalid MediaType
- **************************************************************************
- */
-M4OSA_ERR   M4MCS_registerWriter(
-                        M4MCS_Context pContext,
-                        M4WRITER_OutputFileType MediaType,
-                        M4WRITER_GlobalInterface* pWtrGlobalInterface,
-                        M4WRITER_DataInterface* pWtrDataInterface);
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4MCS_registerEncoder(   M4MCS_Context pContext,
- *                                      M4VIDEOEDITING_VideoFormat mediaType,
- *                                      M4ENCODER_GlobalInterface *pEncGlobalInterface)
- * @brief   This function will register a specific video encoder.
- * @note    According to the Mediatype, this function will store in the internal
- *          context the encoder context.
- * @param   pContext:    (IN) Execution context.
- * @return  M4NO_ERROR:         there is no error
- * @return  M4ERR_PARAMETER     pContext or pEncGlobalInterface is
- *                              M4OSA_NULL (debug only), or invalid MediaType
- ******************************************************************************
- */
-M4OSA_ERR   M4MCS_registerVideoEncoder(
-                        M4MCS_Context pContext,
-                        M4ENCODER_Format MediaType,
-                        M4ENCODER_GlobalInterface *pEncGlobalInterface);
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4MCS_registerAudioEncoder(  M4MCS_Context pContext,
- *                                          M4ENCODER_AudioFormat mediaType,
- *                                          M4ENCODER_AudioGlobalInterface *pEncGlobalInterface)
- * @brief   This function will register a specific audio encoder.
- * @note    According to the Mediatype, this function will store in the internal
- *          context the encoder context.
- * @param   pContext:               (IN)   Execution context.
- * @param   mediaType:              (IN)   The media type.
- * @param   pEncGlobalInterface:    (OUT)  The encoder interface functions.
- * @return  M4NO_ERROR:       there is no error
- * @return  M4ERR_PARAMETER:  pContext or pEncGlobalInterface is
- *                              M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR   M4MCS_registerAudioEncoder(
-                        M4MCS_Context pContext,
-                        M4ENCODER_AudioFormat MediaType,
-                        M4ENCODER_AudioGlobalInterface *pEncGlobalInterface);
-
-/**
- **************************************************************************
- * @brief    Register reader.
- * @param    pContext            (IN/OUT) MCS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- **************************************************************************
- */
-M4OSA_ERR   M4MCS_registerReader(   M4MCS_Context pContext,
-                                    M4READER_MediaType mediaType,
-                                    M4READER_GlobalInterface *pRdrGlobalInterface,
-                                    M4READER_DataInterface *pRdrDataInterface);
-
-/**
- **************************************************************************
- * @brief   Register video decoder
- * @param   pContext             (IN/OUT) MCS context.
- * @param   decoderType          (IN) Decoder type
- * @param   pDecoderInterface    (IN) Decoder interface.
- * @return  M4NO_ERROR:            No error
- * @return  M4ERR_PARAMETER:    A parameter is null (in DEBUG only),or the
- *                              decoder type is invalid
- **************************************************************************
- */
-M4OSA_ERR   M4MCS_registerVideoDecoder( M4MCS_Context pContext,
-                                        M4DECODER_VideoType decoderType,
-                                        M4DECODER_VideoInterface *pDecoderInterface);
-
-/**
- ************************************************************************
- * @brief   Register audio decoder
- * @note    This function is used internaly by the MCS to register Core audio decoders,
- * @param   context            (IN/OUT) MCS context.
- * @param   decoderType        (IN)     Audio decoder type
- * @param   pDecoderInterface  (IN)     Audio decoder interface.
- * @return  M4NO_ERROR:        No error
- * @return  M4ERR_PARAMETER:   A parameter is null, or the decoder type is invalid(in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_registerAudioDecoder(M4MCS_Context pContext, M4AD_Type decoderType,
-                                        M4AD_Interface *pDecoderInterface);
-
-/**
- ************************************************************************
- * @brief   Unregister writer
- * @param   pContext            (IN/OUT) MCS context.
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_unRegisterAllWriters(M4MCS_Context pContext);
-
-/**
- ************************************************************************
- * @brief   Unregister the encoders
- * @param   pContext            (IN/OUT) MCS context.
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_unRegisterAllEncoders(M4MCS_Context pContext);
-
-/**
- ************************************************************************
- * @brief   Unregister reader
- * @param   pContext            (IN/OUT) MCS context.
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_unRegisterAllReaders(M4MCS_Context pContext);
-
-/**
- ************************************************************************
- * @brief   Unregister the decoders
- * @param   pContext            (IN/OUT) MCS context.
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_unRegisterAllDecoders(M4MCS_Context pContext);
-
-/**
- ************************************************************************
- * @brief   Set current writer
- * @param   pContext            (IN/OUT) MCS context.
- * @param   mediaType           (IN) Media type.
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return  M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:  Media type not supported
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentWriter( M4MCS_Context pContext,
-                                    M4VIDEOEDITING_FileType mediaType);
-
-/**
- ************************************************************************
- * @brief    Set a video encoder
- * @param    pContext            (IN/OUT) MCS context.
- * @param    MediaType           (IN) Encoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentVideoEncoder(   M4MCS_Context pContext,
-                                            M4VIDEOEDITING_VideoFormat mediaType);
-
-/**
- ************************************************************************
- * @brief    Set an audio encoder
- * @param    context            (IN/OUT) MCS context.
- * @param    MediaType        (IN) Encoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentAudioEncoder(   M4MCS_Context pContext,
-                                            M4VIDEOEDITING_AudioFormat mediaType);
-
-/**
- ************************************************************************
- * @brief    Set current reader
- * @param    pContext            (IN/OUT) MCS context.
- * @param    mediaType           (IN) Media type.
- * @return    M4NO_ERROR:        No error
- * @return    M4ERR_PARAMETER:   A parameter is null (in DEBUG only)
- * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentReader( M4MCS_Context pContext,
-                                    M4VIDEOEDITING_FileType mediaType);
-
-/**
- ************************************************************************
- * @brief    Set a video decoder
- * @param    pContext           (IN/OUT) MCS context.
- * @param    decoderType        (IN) Decoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:       A parameter is null (in DEBUG only)
- * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentVideoDecoder(   M4MCS_Context pContext,
-                                            M4_StreamType mediaType);
-
-/**
- ************************************************************************
- * @brief    Set an audio decoder
- * @param    context            (IN/OUT) MCS context.
- * @param    decoderType        (IN) Decoder type
- * @return    M4NO_ERROR:         No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentAudioDecoder(M4MCS_Context pContext, M4_StreamType mediaType);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pContext)
- * @brief    Check if an effect has to be applied currently
- * @note     It is called by the stepEncoding function
- * @param    pContext    (IN)   MCS internal context
- * @return   M4NO_ERROR:        No error
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pC);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn()
- * @brief    Apply audio effect FadeIn to pPCMdata
- * @param    pC           (IN/OUT) Internal edit context
- * @param    pPCMdata     (IN/OUT) Input and Output PCM audio data
- * @param    uiPCMsize    (IN)     Size of pPCMdata
- * @param    pProgress    (IN)     Effect progress
- * @return   M4NO_ERROR:           No error
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn(  M4OSA_Void *pFunctionContext,
-                                            M4OSA_Int16 *pPCMdata,
-                                            M4OSA_UInt32 uiPCMsize,
-                                            M4MCS_ExternalProgress *pProgress);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn()
- * @brief    Apply audio effect FadeIn to pPCMdata
- * @param    pC           (IN/OUT) Internal edit context
- * @param    pPCMdata     (IN/OUT) Input and Output PCM audio data
- * @param    uiPCMsize    (IN)     Size of pPCMdata
- * @param    pProgress    (IN)     Effect progress
- * @return   M4NO_ERROR:           No error
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_editAudioEffectFct_FadeOut( M4OSA_Void *pFunctionContext,
-                                            M4OSA_Int16 *pPCMdata,
-                                            M4OSA_UInt32 uiPCMsize,
-                                            M4MCS_ExternalProgress *pProgress);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __M4MCS_INTERNALFUNCTIONS_H__ */
-
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_InternalTypes.h b/libvideoeditor/vss/mcs/inc/M4MCS_InternalTypes.h
deleted file mode 100755
index 5e4f236..0000000
--- a/libvideoeditor/vss/mcs/inc/M4MCS_InternalTypes.h
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *************************************************************************
- * @file   M4MCS_API.h
- * @brief  MCS internal types and structures definitions
- * @note   This header file is not public
- *************************************************************************
- **/
-
-#ifndef __M4MCS_INTERNALTYPES_H__
-#define __M4MCS_INTERNALTYPES_H__
-
-/**
- *    MCS public API and types */
-#include "M4MCS_API.h"
-#include "M4MCS_ErrorCodes.h"
-
-#include "NXPSW_CompilerSwitches.h"
-
-/** Determine absolute value of a. */
-#define M4MCS_ABS(a)               ( ( (a) < (0) ) ? (-(a)) : (a) )
-
-
-#define Y_PLANE_BORDER_VALUE    0x00
-#define U_PLANE_BORDER_VALUE    0x80
-#define V_PLANE_BORDER_VALUE    0x80
-
-
-/**
- *    Internally used modules */
-#include "M4READER_3gpCom.h"        /**< Read 3GPP file     */
-#include "M4DECODER_Common.h"       /**< Decode video       */
-#include "M4VIFI_FiltersAPI.h"      /**< Video resize       */
-#include "M4AD_Common.h"            /**< Decoder audio      */
-#include "SSRC.h"                   /**< SSRC               */
-#include "From2iToMono_16.h"        /**< Stereo to Mono     */
-#include "MonoTo2I_16.h"            /**< Mono to Stereo     */
-#include "M4ENCODER_AudioCommon.h"  /**< Encode audio       */
-#include "M4WRITER_common.h"        /**< Writer common interface */
-#include "M4ENCODER_common.h"
-
-/**
- *  Instead of including AAC core properties, it is better to redefine the needed type
- *  AAC_DEC_STREAM_PROPS
- *  In case of external AAC decoder, it will be necessary to put this type as public
- */
-
-/**
- ******************************************************************************
- * struct AAC_DEC_STREAM_PROPS
- * @brief AAC Stream properties
- * @Note aNoChan and aSampFreq are used for parsing even the user parameters
- *        are different.  User parameters will be input for the output behaviour
- *        of the decoder whereas for parsing bitstream properties are used.
- ******************************************************************************
- */
-typedef struct {
-  M4OSA_Int32 aAudioObjectType;     /**< Audio object type of the stream - in fact
-                                         the type found in the Access Unit parsed */
-  M4OSA_Int32 aNumChan;             /**< number of channels (=1(mono) or =2(stereo))
-                                         as indicated by input bitstream*/
-  M4OSA_Int32 aSampFreq;            /**< sampling frequency in Hz */
-  M4OSA_Int32 aExtensionSampFreq;   /**< extended sampling frequency in Hz, = 0 is
-                                         no extended frequency */
-  M4OSA_Int32 aSBRPresent;          /**< presence=1/absence=0 of SBR */
-  M4OSA_Int32 aPSPresent;           /**< presence=1/absence=0 of PS */
-  M4OSA_Int32 aMaxPCMSamplesPerCh;  /**< max number of PCM samples per channel */
-} AAC_DEC_STREAM_PROPS;
-
-/**
- ******************************************************************************
- * @brief        Codecs registration same as in VPS and VES, so less mapping
- *              is required toward MCS api types
- ******************************************************************************
- */
-typedef struct
-{
-    M4WRITER_GlobalInterface* pGlobalFcts;    /**< open, close, setoption,etc... functions */
-    M4WRITER_DataInterface*    pDataFcts;        /**< data manipulation functions */
-} M4MCS_WriterInterface;
-
-/**
- ******************************************************************************
- * enum            M4MCS_States
- * @brief        Main state machine of the MCS.
- ******************************************************************************
- */
-typedef enum
-{
-    M4MCS_kState_CREATED,           /**< M4MCS_init has been called                */
-    M4MCS_kState_OPENED,            /**< M4MCS_open has been called                */
-    M4MCS_kState_SET,               /**< All mandatory parameters have been set    */
-    M4MCS_kState_READY,             /**< All optionnal parameters have been set    */
-    M4MCS_kState_BEGINVIDEOJUMP,    /**< Must jump to the Iframe before the begin cut */
-    M4MCS_kState_BEGINVIDEODECODE,  /**< Must decode up to the begin cut        */
-    M4MCS_kState_PROCESSING,        /**< Step can be called                        */
-    M4MCS_kState_PAUSED,            /**< Paused, Resume can be called            */
-    M4MCS_kState_FINISHED,          /**< Transcoding is finished                */
-    M4MCS_kState_CLOSED             /**< Output file has been created            */
-} M4MCS_States;
-
-/**
- ******************************************************************************
- * enum            M4MCS_StreamState
- * @brief        State of a media stream encoding (audio or video).
- ******************************************************************************
- */
-typedef enum
-{
-    M4MCS_kStreamState_NOSTREAM  = 0,    /**< No stream present                    */
-    M4MCS_kStreamState_STARTED   = 1,    /**< The stream encoding is in progress */
-    M4MCS_kStreamState_FINISHED  = 2    /**< The stream has finished encoding    */
-} M4MCS_StreamState;
-
-
-/**
- ******************************************************************************
- * enum            anonymous enum
- * @brief        enum to keep track of the encoder state
- ******************************************************************************
- */
-enum
-{
-    M4MCS_kNoEncoder,
-    M4MCS_kEncoderClosed,
-    M4MCS_kEncoderStopped,
-    M4MCS_kEncoderRunning
-};
-
-/**
- ******************************************************************************
- * structure    M4MCS_InternalContext
- * @brief        This structure defines the MCS context (private)
- * @note        This structure is used for all MCS calls to store the context
- ******************************************************************************
- */
-typedef struct
-{
-    M4OSA_UInt32    bitPos;
-                 /* bit count of number of bits used so far */
-
-    M4OSA_UInt8   *streamBuffer;
-                /* Bitstream Buffer */
-
-    M4OSA_UInt32    byteCnt;
-                /* Number of Bytes written in Bitstream buffer*/
-
-    M4OSA_UInt32    currBuff;
-                /* Current buffer holds, 4bytes of bitstream*/
-
-    M4OSA_UInt8   prevByte;
-                /* Previous byte written in the buffer */
-
-    M4OSA_UInt8   prevPrevByte;
-                /* Previous to previous byte written in the buffer */
-
-}NSWAVC_bitStream_t_MCS;
-
-#define _MAXnum_slice_groups  8
-#define _MAXnum_ref_frames_in_pic_order_cnt_cycle  256
-
-typedef struct
-{
-  M4OSA_UInt32  level_idc_index;
-  M4OSA_UInt32  MaxFrameNum;
-  M4OSA_UInt32  expectedDeltaPerPicOrderCntCycle;
-  M4OSA_Int32   MaxPicOrderCntLsb;
-  M4OSA_Int32   max_dec_frame_buffering;
-
-  /* (pic_order_cnt_type == 1) */
-  M4OSA_Int32   offset_for_non_ref_pic;
-  M4OSA_Int32   offset_for_top_to_bottom_field;
-  M4OSA_Int32   frame_crop_left_offset;
-  M4OSA_Int32   frame_crop_right_offset;
-  M4OSA_Int32   frame_crop_top_offset;
-  M4OSA_Int32   frame_crop_bottom_offset;
-  M4OSA_Int32   offset_for_ref_frame[_MAXnum_ref_frames_in_pic_order_cnt_cycle];
-
-  M4OSA_UInt16 PicWidthInMbs;
-  M4OSA_UInt16 FrameHeightInMbs;
-  M4OSA_UInt16  pic_width_in_mbs_minus1;
-  M4OSA_UInt16  pic_height_in_map_units_minus1;
-
-#ifdef _CAP_FMO_
-  M4OSA_UInt16 NumSliceGroupMapUnits;
-  M4OSA_UInt16 MaxPicSizeInMbs;
-#endif /*_CAP_FMO_*/
-
-  M4OSA_UInt8   profile_idc;
-  M4OSA_UInt8   reserved_zero_4bits;
-  M4OSA_UInt8   level_idc;
-  M4OSA_UInt8   seq_parameter_set_id;
-  M4OSA_UInt8   log2_max_frame_num_minus4;
-  M4OSA_UInt8   pic_order_cnt_type;
-  /* if(pic_order_cnt_type == 0) */
-  M4OSA_UInt8   log2_max_pic_order_cnt_lsb_minus4;
-
-  M4OSA_UInt8   num_ref_frames_in_pic_order_cnt_cycle;
-  /* for( i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++ ) */
-  M4OSA_UInt8   num_ref_frames;
-
-  M4OSA_Bool    constraint_set0_flag;
-  M4OSA_Bool    constraint_set1_flag;
-  M4OSA_Bool    constraint_set2_flag;
-  M4OSA_Bool    constraint_set3_flag;
-  M4OSA_Bool    delta_pic_order_always_zero_flag;
-  M4OSA_Bool    gaps_in_frame_num_value_allowed_flag;
-  M4OSA_Bool    frame_mbs_only_flag;
-  M4OSA_Bool    mb_adaptive_frame_field_flag;
-  M4OSA_Bool    direct_8x8_inference_flag;
-  M4OSA_Bool    frame_cropping_flag;
-  M4OSA_Bool    vui_parameters_present_flag;
-  M4OSA_Bool    Active;
-
-  /* vui_seq_parameters_t vui_seq_parameters; */
-} ComSequenceParameterSet_t_MCS;
-
-typedef struct
-{
-  M4OSA_Int16       pic_init_qp_minus26;
-  M4OSA_Int16       pic_init_qs_minus26;
-  M4OSA_Int16       chroma_qp_index_offset;
-
-//#ifdef _CAP_FMO_
-  /* if( slice_group_map_type = = 0 ) */
-  M4OSA_UInt16      run_length_minus1[_MAXnum_slice_groups];
-  /* else if( slice_group_map_type = = 2 ) */
-  M4OSA_UInt16      top_left[_MAXnum_slice_groups];
-  M4OSA_UInt16      bottom_right[_MAXnum_slice_groups];
-  /* else if( slice_group_map_type = = 6 ) */
-  M4OSA_UInt16      pic_size_in_map_units_minus1;
-  M4OSA_UInt16      slice_group_change_rate_minus1;
-
-  M4OSA_UInt16 FirstMbInSliceGroup[_MAXnum_slice_groups];
-  M4OSA_UInt16 LastMbInSliceGroup[_MAXnum_slice_groups];
-
-
-  M4OSA_UInt8  *slice_group_id;
-  M4OSA_UInt8  *MapUnitToSliceGroupMap;
-  M4OSA_UInt8  *MbToSliceGroupMap;
-  M4OSA_UInt16  NumSliceGroupMapUnits;
-
-  M4OSA_UInt8       slice_group_map_type;
-  /* else if( slice_group_map_type = = 3 || 4 || 5 */
-  M4OSA_Bool        slice_group_change_direction_flag;
-  M4OSA_Bool   map_initialized;
-// #endif /*_CAP_FMO_*/
-
-  M4OSA_UInt8       pic_parameter_set_id;
-  M4OSA_UInt8       seq_parameter_set_id;
-  M4OSA_UInt8      num_ref_idx_l0_active_minus1;
-  M4OSA_UInt8      num_ref_idx_l1_active_minus1;
-  M4OSA_UInt8       weighted_bipred_idc;
-  M4OSA_UInt8       num_slice_groups_minus1;
-
-  M4OSA_Bool        entropy_coding_mode_flag;
-  /* if( pic_order_cnt_type < 2 )  in the sequence parameter set */
-  M4OSA_Bool        pic_order_present_flag;
-  M4OSA_Bool        weighted_pred_flag;
-  M4OSA_Bool        deblocking_filter_control_present_flag;
-  M4OSA_Bool        constrained_intra_pred_flag;
-  M4OSA_Bool        redundant_pic_cnt_present_flag;
-  M4OSA_Bool    Active;
-
-  ComSequenceParameterSet_t_MCS *p_active_sps;
-} ComPictureParameterSet_t_MCS;
-
-typedef struct
-{
-      M4OSA_UInt32 bitPos;                /*!< bit position in buffer */
-      M4OSA_UInt32 totalBits;             /*!< bit position in file (total bits read so far) */
-
-      M4OSA_UInt32 lastTotalBits;         /*!< bit position in file of the last VOP */
-      M4OSA_UInt32 numBitsInBuffer;       /*!< number of bits in buffer */
-      M4OSA_UInt32 readableBytesInBuffer; /*!< number of bytes that can be read in decoder buffer*/
-      M4OSA_UInt32 maxBufferSize;         /*!< max buffer size in bit units */
-      M4OSA_UInt8  *Buffer;               /*!< char buffer at reading from file */
-      M4OSA_Int32     i8BitCnt;
-      M4OSA_UInt32     ui32TempBuff;
-      M4OSA_Int8*pui8BfrPtr;
-      M4OSA_UInt32    ui32LastTwoBytes;  /*!< stores the last read two bytes */
-} ComBitStreamMCS_t;
-
-
-typedef struct
-{
-
-    M4OSA_Int32 prev_frame_num;
-    M4OSA_Int32 cur_frame_num;
-    M4OSA_Int32 prev_new_frame_num;
-    M4OSA_Int32 log2_max_frame_num_minus4;
-    M4OSA_Int32 is_done;
-    M4OSA_Int32 is_first;
-    M4OSA_Int32 frame_count;
-    M4OSA_Int32 frame_mod_count;
-    M4OSA_Int32 POC_lsb;
-    M4OSA_Int32 POC_lsb_mod;
-
-
-    M4OSA_UInt32    m_Num_Bytes_NALUnitLength;
-
-    M4OSA_UInt8*    m_pDecoderSpecificInfo;   /**< Pointer on specific information required
-                                                   to create a decoder */
-    M4OSA_UInt32    m_decoderSpecificInfoSize;/**< Size of the specific information pointer above*/
-
-    M4OSA_UInt8*    m_pEncoderSPS;
-    M4OSA_UInt32    m_encoderSPSSize;
-
-    M4OSA_UInt8*    m_pEncoderPPS;
-    M4OSA_UInt32    m_encoderPPSSize;
-
-    M4OSA_UInt8*    m_pFinalDSI;
-    M4OSA_UInt32    m_pFinalDSISize;
-
-    M4OSA_UInt32    m_encoder_SPS_Cnt;
-    ComSequenceParameterSet_t_MCS *p_clip_sps;
-    M4OSA_UInt32    m_encoder_PPS_Cnt;
-    ComPictureParameterSet_t_MCS  *p_clip_pps;
-
-    ComSequenceParameterSet_t_MCS *p_encoder_sps;
-    ComPictureParameterSet_t_MCS  *p_encoder_pps;
-
-
-    ComSequenceParameterSet_t_MCS  encoder_sps;
-    ComPictureParameterSet_t_MCS   encoder_pps;
-    ComSequenceParameterSet_t_MCS  clip_sps;
-
-    /* Encoder SPS parameters */
-    M4OSA_UInt32 enc_seq_parameter_set_id;
-    M4OSA_UInt32 enc_log2_max_frame_num_minus4;
-    M4OSA_UInt32 enc_pic_order_cnt_type;
-    M4OSA_UInt32 enc_log2_max_pic_order_cnt_lsb_minus4; /* applicable when POC type = 0 */
-    M4OSA_UInt32 enc_delta_pic_order_always_zero_flag;
-    M4OSA_Int32 enc_offset_for_non_ref_pic;
-    M4OSA_Int32 enc_offset_for_top_to_bottom_field;
-    M4OSA_UInt32 enc_num_ref_frames_in_pic_order_cnt_cycle; /* range 0 to 255 */
-    /* array of size num_ref_frames_in_pic_order_cnt_cycle */
-    M4OSA_Int32   enc_offset_for_ref_frame[256];
-    M4OSA_UInt32 enc_num_ref_frames;
-    M4OSA_UInt32 enc_gaps_in_frame_num_value_allowed_flag;
-
-
-    /* Input clip SPS parameters */
-    M4OSA_UInt32 clip_seq_parameter_set_id;
-    M4OSA_UInt32 clip_log2_max_frame_num_minus4;
-    M4OSA_UInt32 clip_pic_order_cnt_type;
-    M4OSA_UInt32 clip_log2_max_pic_order_cnt_lsb_minus4; /* applicable when POC type = 0 */
-    M4OSA_UInt32 clip_delta_pic_order_always_zero_flag;
-    M4OSA_Int32  clip_offset_for_non_ref_pic;
-    M4OSA_Int32  clip_offset_for_top_to_bottom_field;
-    M4OSA_UInt32 clip_num_ref_frames_in_pic_order_cnt_cycle; /* range 0 to 255 */
-    /* array of size num_ref_frames_in_pic_order_cnt_cycle */
-    M4OSA_Int32  clip_offset_for_ref_frame[256];
-    M4OSA_UInt32 clip_num_ref_frames;
-    M4OSA_UInt32 clip_gaps_in_frame_num_value_allowed_flag;
-
-    M4OSA_UInt32 final_PPS_ID;
-    M4OSA_UInt32 final_SPS_ID;
-    NSWAVC_bitStream_t_MCS  encbs;
-
-} NSWAVC_MCS_t;
-
-
-
-/**
- ******************************************************************************
- * structure    M4MCS_InternalContext
- * @brief       This structure defines the MCS context (private)
- * @note        This structure is used for all MCS calls to store the context
- ******************************************************************************
- */
-typedef struct
-{
-    /**
-     * MCS State and settings stuff */
-    M4MCS_States            State;     /**< MCS internal state */
-    M4MCS_StreamState       VideoState;/**< State of the video encoding */
-    M4MCS_StreamState       AudioState;/**< State of the audio encoding */
-    M4OSA_Bool              noaudio;/**< Flag to know if we have to deal with audio transcoding */
-    M4OSA_Bool              novideo;/**< Flag to know if we have to deal with video transcoding */
-
-    M4VIDEOEDITING_ClipProperties  InputFileProperties;/**< Input audio/video stream properties */
-    M4OSA_Void*             pInputFile;             /**< Remember input file pointer between fast
-                                                         open and normal open */
-    M4VIDEOEDITING_FileType InputFileType;          /**< Remember input file type between fast
-                                                         open and normal open */
-    M4OSA_Bool              bFileOpenedInFastMode;  /**< Flag to know if a particular reader
-                                                         supports fast open */
-    M4OSA_UInt32            uiMaxMetadataSize;      /**< Limitation on the max acceptable moov
-                                                         size of a 3gpp file */
-
-    M4ENCODER_Format        EncodingVideoFormat;    /**< Output video format, set by the user */
-    M4ENCODER_FrameWidth    EncodingWidth;          /**< Output video width, set by the user */
-    M4ENCODER_FrameHeight   EncodingHeight;         /**< Output video height, set by the user */
-    M4ENCODER_FrameRate     EncodingVideoFramerate; /**< Output video framerate, set by the user*/
-
-    M4OSA_UInt32            uiBeginCutTime;     /**< Begin cut time, in milliseconds */
-    M4OSA_UInt32            uiEndCutTime;       /**< Begin cut time, in milliseconds */
-    M4OSA_UInt32            uiMaxFileSize;      /**< Maximum output file size, in bytes */
-    M4OSA_UInt32            uiAudioBitrate;     /**< Targeted audio bitrate in bps */
-    M4OSA_UInt32            uiVideoBitrate;     /**< Targeted video bitrate in bps */
-
-    M4OSA_UInt8     uiProgress;  /**< Progress information saved at each step to be able to
-                                      return it in case of pause */
-
-    /**
-     * Reader stuff */
-    M4OSA_Context           pReaderContext;           /**< Context of the reader module */
-    M4_VideoStreamHandler*  pReaderVideoStream;       /**< Description of the read video stream */
-    M4_AudioStreamHandler*  pReaderAudioStream;       /**< Description of the read audio stream */
-    M4OSA_Bool              bUnsupportedVideoFound;   /**< True if an unsupported video stream
-                                                            type has been found */
-    M4OSA_Bool              bUnsupportedAudioFound;   /**< True if an unsupported audio stream
-                                                            type has been found */
-    M4_AccessUnit           ReaderVideoAU;            /**< Read video access unit */
-    M4_AccessUnit           ReaderVideoAU1;           /**< Read video access unit */
-    M4_AccessUnit           ReaderVideoAU2;           /**< Read video access unit */
-    M4_AccessUnit           ReaderAudioAU;            /**< Read audio access unit */
-    M4_AccessUnit           ReaderAudioAU1;           /**< Read audio access unit */
-    M4_AccessUnit           ReaderAudioAU2;           /**< Read audio access unit */
-    M4OSA_MemAddr8          m_pDataAddress1;          /**< Temporary buffer for Access Unit */
-    M4OSA_MemAddr8          m_pDataAddress2;          /**< Temporary buffer for Access Unit */
-    M4OSA_MemAddr8          m_pDataVideoAddress1;     /**< Temporary buffer for Access Unit */
-    M4OSA_MemAddr8          m_pDataVideoAddress2;     /**< Temporary buffer for Access Unit */
-    M4OSA_UInt32            m_audioAUDuration;        /**< Audio AU duration */
-    M4OSA_Int32             iAudioCtsOffset;          /**< Audio AU CTS offset due to begin cut */
-
-    /**
-     * Video decoder stuff */
-    M4OSA_Context         pViDecCtxt;         /**< Video decoder context */
-    M4OSA_Double          dViDecStartingCts;  /**< Video CTS at which the decode/encode will start
-                                                   (used for begin cut and pause/resume) */
-    M4OSA_Double          dViDecCurrentCts;   /**< Video CTS to decode */
-    M4OSA_Int32           iVideoBeginDecIncr; /**< CTS step for the begin cut decode (doesn't
-                                                    need floating point precision) */
-    M4OSA_Double          dCtsIncrement;      /**< Cts increment from one video frame to another*/
-    M4OSA_Bool            isRenderDup;        /**< To handle duplicate frame rendering in case of
-                                                    external decoding */
-    M4VIFI_ImagePlane*    lastDecodedPlane;   /**< Last decoded plane */
-
-    /**
-     * Video encoder stuff */
-    M4OSA_Context         pViEncCtxt;         /**< Video encoder context */
-    M4VIFI_ImagePlane*    pPreResizeFrame;    /**< The decoded image before resize
-                                                  (allocated if resize needed only)*/
-    M4OSA_UInt32          uiEncVideoBitrate;  /**< Actual video bitrate for the video encoder */
-    M4OSA_UInt32          outputVideoTimescale;
-    M4OSA_UInt32          encoderState;
-
-    /**
-     * Audio decoder stuff */
-    M4OSA_Context         pAudioDecCtxt;        /**< Audio (AAC) decoder context */
-    M4AD_Buffer           AudioDecBufferIn;     /**< Input structure for the audio decoder */
-    M4AD_Buffer           AudioDecBufferOut;    /**< Output structure for the audio decoder */
-    M4OSA_MemAddr8        pPosInDecBufferOut;   /**< Position into the decoder buffer */
-    AAC_DEC_STREAM_PROPS  AacProperties;   /**< Structure for new api to get AAC properties */
-
-    /**
-     * Sample Rate Convertor (SSRC) stuff */
-    SSRC_Instance_t        SsrcInstance;       /**< Context of the Ssrc */
-    SSRC_Scratch_t*        SsrcScratch;        /**< Working memory of the Ssrc */
-    short                  iSsrcNbSamplIn;     /**< Number of sample the Ssrc needs as input */
-    short                  iSsrcNbSamplOut;    /**< Number of sample the Ssrc outputs */
-    M4OSA_MemAddr8         pSsrcBufferIn;      /**< Input of the SSRC */
-    M4OSA_MemAddr8         pSsrcBufferOut;     /**< Output of the SSRC */
-    M4OSA_MemAddr8         pPosInSsrcBufferIn; /**< Position into the SSRC in buffer */
-    M4OSA_MemAddr8         pPosInSsrcBufferOut;/**< Position into the SSRC out buffer */
-
-    M4OSA_Context          pLVAudioResampler;
-
-
-    /**
-     * audio encoder stuff */
-    M4OSA_Context                   pAudioEncCtxt; /**< Context of the audio encoder */
-    M4ENCODER_AudioDecSpecificInfo  pAudioEncDSI; /**< Decoder specific info built by the encoder*/
-    M4ENCODER_AudioParams           AudioEncParams;/**< Config of the audio encoder */
-    M4OSA_MemAddr8            pAudioEncoderBuffer;      /**< Input of the encoder */
-    M4OSA_MemAddr8            pPosInAudioEncoderBuffer; /**< Position into the encoder buffer */
-    M4OSA_UInt32              audioEncoderGranularity;  /**< Minimum number of pcm samples needed
-                                                             to feed audio encoder */
-
-    /**
-     * Writer stuff */
-    M4OSA_Context             pWriterContext;     /**< Context of the writer module */
-    M4OSA_Void*               pOutputFile;        /**< Output file to be created */
-    M4OSA_Void*               pTemporaryFile;     /**< Temporary file to be created to store
-                                                        metadata ("moov.bin") */
-    M4SYS_StreamDescription   WriterVideoStream;  /**< Description of the written video stream */
-    M4SYS_StreamDescription   WriterAudioStream;  /**< Description of the written audio stream */
-    M4WRITER_StreamVideoInfos WriterVideoStreamInfo;/**< Video properties of the written video
-                                                          stream */
-    M4SYS_AccessUnit          WriterVideoAU;        /**< Written video access unit */
-    M4SYS_AccessUnit          WriterAudioAU;        /**< Written audio access unit */
-    M4OSA_UInt32              uiVideoAUCount;       /**< Number of video AU written in output
-                                                          file */
-    M4OSA_UInt32              uiVideoMaxAuSize;     /**< Max access unit size for the output
-                                                          video stream */
-    M4OSA_UInt32              uiVideoMaxChunckSize; /**< Max chunck size for the output video
-                                                          stream */
-    M4OSA_UInt32              uiAudioAUCount;   /**< Number of audio AU written in output file */
-    M4OSA_UInt32              uiAudioMaxAuSize; /**< Max access unit size for the output
-                                                       audio stream */
-    M4OSA_UInt32              uiAudioCts;       /**< Audio AU cts (when audio is transcoded) */
-    M4OSA_Bool                b_isRawWriter;    /**< Boolean to know if the raw writer is
-                                                      registered or not */
-    M4OSA_Context             pOutputPCMfile;   /**< Output PCM file if not NULL */
-
-    /**
-     * Filesystem functions */
-    M4OSA_FileReadPointer*    pOsaFileReadPtr; /**< OSAL file read functions,
-                                                    to be provided by user */
-    M4OSA_FileWriterPointer*  pOsaFileWritPtr; /**< OSAL file write functions,
-                                                    to be provided by user */
-
-    /**
-      * Media and Codec registration */
-    /**< Table of M4VES_WriterInterface structures for avalaible Writers list */
-    M4MCS_WriterInterface               WriterInterface[M4WRITER_kType_NB];
-    /**< open, close, setoption,etc... functions of the used writer*/
-    M4WRITER_GlobalInterface*           pWriterGlobalFcts;
-    /**< data manipulation functions of the used writer */
-    M4WRITER_DataInterface*             pWriterDataFcts;
-    /**< Table of M4ENCODER_GlobalInterface structures for avalaible encoders list */
-    M4ENCODER_GlobalInterface*          pVideoEncoderInterface[M4ENCODER_kVideo_NB];
-    /**< Functions of the used encoder */
-    M4ENCODER_GlobalInterface*          pVideoEncoderGlobalFcts;
-
-    M4OSA_Void*                         pVideoEncoderExternalAPITable[M4ENCODER_kVideo_NB];
-    M4OSA_Void*                         pCurrentVideoEncoderExternalAPI;
-    M4OSA_Void*                         pVideoEncoderUserDataTable[M4ENCODER_kVideo_NB];
-    M4OSA_Void*                         pCurrentVideoEncoderUserData;
-
-    /**< Table of M4ENCODER_AudioGlobalInterface structures for avalaible encoders list */
-    M4ENCODER_AudioGlobalInterface*     pAudioEncoderInterface[M4ENCODER_kAudio_NB];
-    /**< Table of internal/external flags for avalaible encoders list */
-    M4OSA_Bool                          pAudioEncoderFlag[M4ENCODER_kAudio_NB];
-    /**< Functions of the used encoder */
-    M4ENCODER_AudioGlobalInterface*     pAudioEncoderGlobalFcts;
-    M4OSA_Void*                         pAudioEncoderUserDataTable[M4ENCODER_kAudio_NB];
-    M4OSA_Void*                         pCurrentAudioEncoderUserData;
-
-    M4READER_GlobalInterface*           m_pReaderGlobalItTable[M4READER_kMediaType_NB];
-    M4READER_DataInterface*             m_pReaderDataItTable[M4READER_kMediaType_NB];
-    M4READER_GlobalInterface*           m_pReader;
-    M4READER_DataInterface*             m_pReaderDataIt;
-    M4OSA_UInt8                         m_uiNbRegisteredReaders;
-
-    M4DECODER_VideoInterface*           m_pVideoDecoder;
-    M4DECODER_VideoInterface*           m_pVideoDecoderItTable[M4DECODER_kVideoType_NB];
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-    M4OSA_Void*                         m_pCurrentVideoDecoderUserData;
-    M4OSA_Void*                         m_pVideoDecoderUserDataTable[M4DECODER_kVideoType_NB];
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-    M4OSA_UInt8                         m_uiNbRegisteredVideoDec;
-
-    M4AD_Interface*         m_pAudioDecoder;
-    M4AD_Interface*         m_pAudioDecoderItTable[M4AD_kType_NB];
-    M4OSA_Bool              m_pAudioDecoderFlagTable[M4AD_kType_NB]; /**< store indices of external
-                                                                      decoders */
-    M4OSA_Void*             m_pAudioDecoderUserDataTable[M4AD_kType_NB];
-    M4OSA_Void*             m_pCurrentAudioDecoderUserData;
-
-    M4MCS_MediaRendering    MediaRendering;     /**< FB: to crop, resize, or render black borders*/
-    M4OSA_Context           m_air_context;
-    M4OSA_Bool              bExtOMXAudDecoder;  /* External OMX Audio decoder */
-
-    /**< FlB 2009.03.04: Audio effects*/
-    M4MCS_EffectSettings    *pEffects;              /**< List of effects */
-    M4OSA_UInt8             nbEffects;              /**< Number of effects in the above list */
-    M4OSA_Int8              pActiveEffectNumber;    /**< Effect ID to be applied, if -1,
-                                                       no effect has to be applied currently*/
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-    M4OSA_Bool              m_bIsStillPicture;       /**< =TRUE if input file is a still picture
-                                                        (JPEG, PNG, BMP, GIF)*/
-    M4MCS_Context           m_pStillPictureContext; /**< Context of the still picture part of MCS*/
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-    NSWAVC_MCS_t            *m_pInstance;
-    M4OSA_UInt8             *H264MCSTempBuffer;
-    M4OSA_UInt32            H264MCSTempBufferSize;
-    M4OSA_UInt32            H264MCSTempBufferDataSize;
-    M4OSA_Bool              bH264Trim;
-    /* Flag when to get  lastdecodedframeCTS */
-    M4OSA_Bool              bLastDecodedFrameCTS;
-    M4OSA_Int32             encodingVideoProfile;
-    M4OSA_Int32             encodingVideoLevel;
-
-} M4MCS_InternalContext;
-
-
-#endif /* __M4MCS_INTERNALTYPES_H__ */
-
diff --git a/libvideoeditor/vss/mcs/src/Android.mk b/libvideoeditor/vss/mcs/src/Android.mk
deleted file mode 100755
index b470e6b..0000000
--- a/libvideoeditor/vss/mcs/src/Android.mk
+++ /dev/null
@@ -1,58 +0,0 @@
-#
-# Copyright (C) 2011 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH:= $(call my-dir)
-
-#
-# libvideoeditor_mcs
-#
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE:= libvideoeditor_mcs
-
-LOCAL_SRC_FILES:=          \
-      M4MCS_API.c \
-      M4MCS_AudioEffects.c \
-      M4MCS_Codecs.c \
-      M4MCS_MediaAndCodecSubscription.c \
-      M4MCS_VideoPreProcessing.c
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_SHARED_LIBRARIES := \
-    libcutils             \
-    libutils              \
-    libvideoeditor_osal   \
-
-LOCAL_C_INCLUDES += \
-    $(TOP)/frameworks/av/libvideoeditor/osal/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/mcs/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/common/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/stagefrightshells/inc \
-    $(TOP)/frameworks/native/include/media/openmax
-
-LOCAL_SHARED_LIBRARIES += libdl
-
-# All of the shared libraries we link against.
-LOCAL_LDLIBS := \
-    -lpthread -ldl
-
-LOCAL_CFLAGS += -Wno-multichar \
-    -DM4MCS_WITH_FAST_OPEN
-
-include $(BUILD_STATIC_LIBRARY)
-
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_API.c b/libvideoeditor/vss/mcs/src/M4MCS_API.c
deleted file mode 100755
index c056ef0..0000000
--- a/libvideoeditor/vss/mcs/src/M4MCS_API.c
+++ /dev/null
@@ -1,10949 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- *************************************************************************
- * @file   M4MCS_API.c
- * @brief  MCS implementation (Video Compressor Service)
- * @note   This file implements the API and the processing of the MCS
- *************************************************************************
- **/
-
-/**
- ********************************************************************
- * Includes
- ********************************************************************
- */
-/**
- * OSAL headers */
-#include "M4OSA_Memory.h" /**< OSAL memory management */
-#include "M4OSA_Debug.h"  /**< OSAL debug management */
-
-/* PCM samples */
-#include "VideoEditorResampler.h"
-/**
- * Decoder interface */
-#include "M4DECODER_Common.h"
-
-/* Encoder interface*/
-#include "M4ENCODER_common.h"
-
-/* Enable for DEBUG logging */
-//#define MCS_DUMP_PCM_TO_FILE
-#ifdef MCS_DUMP_PCM_TO_FILE
-#include <stdio.h>
-FILE *file_au_reader = NULL;
-FILE *file_pcm_decoder = NULL;
-FILE *file_pcm_encoder = NULL;
-#endif
-
-/* Core headers */
-#include "M4MCS_API.h"
-#include "M4MCS_ErrorCodes.h"
-#include "M4MCS_InternalTypes.h"
-#include "M4MCS_InternalConfig.h"
-#include "M4MCS_InternalFunctions.h"
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-#include "M4MCS_StillPicture.h"
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-/* Common headers (for aac) */
-#include "M4_Common.h"
-
-#include "NXPSW_CompilerSwitches.h"
-
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-#include "M4VD_EXTERNAL_Interface.h"
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-
-#include "M4AIR_API.h"
-#include "OMX_Video.h"
-
-/* Version */
-#define M4MCS_VERSION_MAJOR 3
-#define M4MCS_VERSION_MINOR 4
-#define M4MCS_VERSION_REVISION  3
-
-/**
- ********************************************************************
- * Static local functions
- ********************************************************************
- */
-
-static M4OSA_ERR M4MCS_intStepSet( M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intPrepareVideoDecoder(
-                                    M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intPrepareVideoEncoder(
-                                    M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intPrepareAudioProcessing(
-                                    M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intPrepareWriter( M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intPrepareAudioBeginCut(
-                                    M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intStepEncoding(
-                                    M4MCS_InternalContext *pC,
-                                    M4OSA_UInt8 *pTranscodedTime );
-static M4OSA_ERR M4MCS_intStepBeginVideoJump(
-                                    M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intStepBeginVideoDecode(
-                                    M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intAudioNullEncoding( M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intAudioTranscoding( M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intVideoNullEncoding( M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intVideoTranscoding( M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intGetInputClipProperties(
-                                    M4MCS_InternalContext   *pContext );
-static M4OSA_UInt32 M4MCS_intGetFrameSize_AMRNB(
-                                    M4OSA_MemAddr8 pAudioFrame );
-static M4OSA_UInt32 M4MCS_intGetFrameSize_EVRC(
-                                    M4OSA_MemAddr8 pAudioFrame );
-static M4OSA_ERR M4MCS_intCheckMaxFileSize( M4MCS_Context pContext );
-static M4VIDEOEDITING_Bitrate M4MCS_intGetNearestBitrate(
-                                    M4OSA_Int32 freebitrate,
-                                    M4OSA_Int8 mode );
-static M4OSA_ERR M4MCS_intCleanUp_ReadersDecoders(
-                                    M4MCS_InternalContext *pC );
-static M4OSA_ERR M4MCS_intReallocTemporaryAU(
-                                    M4OSA_MemAddr8 *addr,
-                                    M4OSA_UInt32 newSize );
-static M4OSA_ERR M4MCS_intCheckAndGetCodecProperties(
-                                 M4MCS_InternalContext *pC);
-
-static M4OSA_ERR M4MCS_intLimitBitratePerCodecProfileLevel(
-                                 M4ENCODER_AdvancedParams* EncParams);
-static M4OSA_Int32 M4MCS_intLimitBitrateForH263Enc(M4OSA_Int32 profile,
-                                 M4OSA_Int32 level, M4OSA_Int32 bitrate);
-static M4OSA_Int32 M4MCS_intLimitBitrateForMpeg4Enc(M4OSA_Int32 profile,
-                                 M4OSA_Int32 level, M4OSA_Int32 bitrate);
-static M4OSA_Int32 M4MCS_intLimitBitrateForH264Enc(M4OSA_Int32 profile,
-                                 M4OSA_Int32 level, M4OSA_Int32 bitrate);
-
-/**
- **********************************************************************
- * External function used only by VideoEditor and that does not appear
- * in the API
- **********************************************************************
- */
-
-M4OSA_ERR M4MCS_open_normalMode( M4MCS_Context pContext,
-                                 M4OSA_Void *pFileIn,
-                                 M4VIDEOEDITING_FileType InputFileType,
-                                 M4OSA_Void *pFileOut,
-                                 M4OSA_Void *pTempFile );
-
-/* All errors are fatal in the MCS */
-#define M4ERR_CHECK_RETURN(err) if(M4NO_ERROR!=err) return err;
-
-/* A define used with SSRC 1.04 and above to avoid taking blocks smaller
- * that the minimal block size
- */
-#define M4MCS_SSRC_MINBLOCKSIZE        100
-
-static M4OSA_UChar Tab_MCS[8] =
-{
-    17, 5, 3, 3, 1, 1, 1, 1
-};
-
-M4OSA_ERR H264MCS_Getinstance( NSWAVC_MCS_t ** instance )
-{
-    NSWAVC_MCS_t *p_bs = M4OSA_NULL;
-    M4OSA_ERR err = M4NO_ERROR;
-    p_bs = (NSWAVC_MCS_t *)M4OSA_32bitAlignedMalloc(sizeof(NSWAVC_MCS_t), M4MCS,
-        (M4OSA_Char *)"NSWAVC_MCS_t");
-
-    if( M4OSA_NULL == p_bs )
-    {
-        M4OSA_TRACE1_0("H264MCS_Getinstance: allocation error");
-        return M4ERR_ALLOC;
-    }
-
-    p_bs->prev_frame_num = 0;
-    p_bs->cur_frame_num = 0;
-    p_bs->log2_max_frame_num_minus4 = 0;
-    p_bs->prev_new_frame_num = 0;
-    p_bs->is_done = 0;
-    p_bs->is_first = 1;
-
-    p_bs->m_pDecoderSpecificInfo = M4OSA_NULL;
-    p_bs->m_decoderSpecificInfoSize = 0;
-
-    p_bs->m_pEncoderSPS = M4OSA_NULL;
-    p_bs->m_encoderSPSSize = 0;
-
-    p_bs->m_pEncoderPPS = M4OSA_NULL;
-    p_bs->m_encoderPPSSize = 0;
-
-    p_bs->m_pFinalDSI = M4OSA_NULL;
-    p_bs->m_pFinalDSISize = 0;
-
-    p_bs->p_clip_sps = M4OSA_NULL;
-    p_bs->m_encoder_SPS_Cnt = 0;
-
-    p_bs->p_clip_pps = M4OSA_NULL;
-    p_bs->m_encoder_PPS_Cnt = 0;
-
-    p_bs->p_encoder_sps = M4OSA_NULL;
-    p_bs->p_encoder_pps = M4OSA_NULL;
-
-    p_bs->encoder_pps.slice_group_id = M4OSA_NULL;
-
-    *instance = (NSWAVC_MCS_t *)p_bs;
-    return err;
-}
-
-M4OSA_UInt32 H264MCS_getBits( ComBitStreamMCS_t *p_bs, M4OSA_UInt32 numBits )
-{
-    M4OSA_UInt32 ui32RetBits;
-    M4OSA_UInt8 *pbs;
-    M4OSA_Int32 bcnt;
-    p_bs->i8BitCnt -= numBits;
-    bcnt = p_bs->i8BitCnt;
-
-    /* Measure the quantity of bits to be read in ui32TempBuff */
-    ui32RetBits = p_bs->ui32TempBuff >> (32 - numBits);
-
-    /* Read numBits in ui32TempBuff */
-    p_bs->ui32TempBuff <<= numBits;
-    p_bs->bitPos += numBits;
-
-    if( bcnt > 24 )
-    {
-        return (ui32RetBits);
-    }
-    else
-    { /* at least one byte can be buffered in ui32TempBuff */
-        pbs = (M4OSA_UInt8 *)p_bs->pui8BfrPtr;
-
-        if( bcnt < (int)(p_bs->numBitsInBuffer - p_bs->bitPos) )
-        { /* not enough remaining bits in ui32TempBuff: need to be filled */
-            do
-            {
-                /* On the fly detection of EPB byte */
-                if( ( *(pbs) == 0x03)
-                    && (!(( pbs[-1])
-                    | (pbs[-2])))) //(p_bs->ui32LastTwoBytes & 0x0000FFFF) == 0)
-                {
-                    /* EPB byte found: skip it and update bitPos accordingly */
-                            (pbs)++;
-                            p_bs->bitPos += 8;
-                        }
-
-                        p_bs->ui32TempBuff |= *(pbs)++ << (24 - bcnt);
-                        bcnt += 8;
-            } while ( bcnt <= 24 );
-
-            p_bs->pui8BfrPtr = (M4OSA_Int8 *)pbs;
-            p_bs->i8BitCnt = bcnt;
-            return (ui32RetBits);
-        }
-    }
-
-    if( p_bs->bitPos <= p_bs->numBitsInBuffer )
-    {
-        return (ui32RetBits);
-    }
-    else
-    {
-        return (0);
-    }
-}
-
-M4OSA_Void H264MCS_flushBits( ComBitStreamMCS_t *p_bs, M4OSA_UInt32 numBits )
-{
-    M4OSA_UInt8 *pbs;
-    M4OSA_UInt32 bcnt;
-    p_bs->i8BitCnt -= numBits;
-    bcnt = p_bs->i8BitCnt;
-
-    p_bs->ui32TempBuff <<= numBits;
-    p_bs->bitPos += numBits;
-
-    if( bcnt > 24 )
-    {
-        return;
-    }
-    else
-    { /* at least one byte can be buffered in ui32TempBuff */
-        pbs = (M4OSA_UInt8 *)p_bs->pui8BfrPtr;
-
-        if( bcnt < (p_bs->numBitsInBuffer - p_bs->bitPos) )
-        {   /* Not enough remaining bits in ui32TempBuff: need to be filled */
-            do
-            {
-                /*  On the fly detection of EPB byte */
-                if( ( *(pbs) == 0x03) && (!(( pbs[-1]) | (pbs[-2]))) )
-                { /* JC: EPB byte found: skip it and update bitPos accordingly */
-                    (pbs)++;
-                    p_bs->bitPos += 8;
-                }
-                p_bs->ui32TempBuff |= *(pbs)++ << (24 - bcnt);
-                bcnt += 8;
-            } while ( bcnt <= 24 );
-
-            p_bs->pui8BfrPtr = (M4OSA_Int8 *)pbs;
-            p_bs->i8BitCnt = bcnt;
-        }
-    }
-
-    return;
-}
-
-M4OSA_UInt32 H264MCS_DecVLCReadExpGolombCode( ComBitStreamMCS_t *p_bs )
-{
-    M4OSA_UInt32 code, l0 = 0, l1;
-    /* Reading 32 Bits from local cache buffer of Bitstream structure*/
-    code = p_bs->ui32TempBuff;
-
-    /* Checking in first 3 bits*/
-    if( code >> 29 )
-    {
-        l0 = Tab_MCS[(code >> 29)];
-        code = code >> (32 - l0);
-        H264MCS_flushBits(p_bs, l0);
-    }
-    else
-        {
-            if( code )
-            {
-                code <<= 3;
-
-                for ( l0 = 3; code < 0x80000000; code <<= 1, l0++ );
-
-                if( l0 < 16 ) /*all useful bits are inside the 32 bits read */
-                {
-                    code = code >> (31 - l0);
-                    H264MCS_flushBits(p_bs, 2 * l0 + 1);
-                }
-                else
-            { /* Read the useful bits in 2 parts */
-                    l1 = ( l0 << 1) - 31;
-                    code >>= l0;
-                    H264MCS_flushBits(p_bs, 32);
-                    code = ( code << l1) | H264MCS_getBits(p_bs, l1);
-                }
-            }
-            else
-            {
-                H264MCS_flushBits(p_bs, 32);
-
-                if( H264MCS_getBits(p_bs, 1) )
-                {
-                    /* if number of leading 0's is 32, the only code allowed is 1 followed
-                    by 32 0's */
-
-                    /*reading 32 more bits from bitstream buffer*/
-                    code = H264MCS_getBits(p_bs, 32);
-
-                    if( code == 0 )
-                    {
-                        return (code - 1);
-                    }
-                }
-                /*if number of leading 0's is >32, then symbol is >32 bits,
-                which is an error */
-                //p_bs->state = _BS_ERR;
-                //p_bs->flags |= _BF_SYM_ERR;
-                return (0);
-            }
-        }
-
-        if( 1 ) //(p_bs->state == _BS_OK)
-        {
-            return (code - 1);
-        }
-        else
-        {
-            return (0);
-        }
-    }
-
-M4OSA_Int32 H264MCS_DecVLCReadSignedExpGolombCode( ComBitStreamMCS_t *p_bs )
-{
-    M4OSA_Int32 codeNo, ret;
-
-    /* read the unsigned code number */
-    codeNo = H264MCS_DecVLCReadExpGolombCode(p_bs);
-
-    /* map to the signed value, if value is odd then it's positive,
-    if even then it's negative, formula is (-1)^(k+1)*CEIL(k/2) */
-
-    ret = (codeNo & 0x01) ? (( codeNo + 1) >> 1) : (( -codeNo) >> 1);
-
-    return ret;
-}
-
-M4OSA_Void DecBitStreamReset_MCS( ComBitStreamMCS_t *p_bs,
-                                 M4OSA_UInt32 bytes_read )
-{
-    p_bs->bitPos = 0;
-
-    p_bs->lastTotalBits = 0;
-    p_bs->numBitsInBuffer = bytes_read << 3;
-    p_bs->readableBytesInBuffer = bytes_read;
-    //p_bs->state = M4NO_ERROR;//_BS_OK;
-    //p_bs->flags = 0;
-
-    p_bs->ui32TempBuff = 0;
-    p_bs->i8BitCnt = 0;
-    p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
-    p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
-    H264MCS_getBits(p_bs, 0);
-}
-
-M4OSA_ERR NSWAVCMCS_initBitstream( NSWAVC_bitStream_t_MCS *bS )
-{
-    bS->bitPos = 0;
-    bS->byteCnt = 0;
-    bS->currBuff = 0;
-    bS->prevByte = 0xff;
-    bS->prevPrevByte = 0xff;
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR NSWAVCMCS_putBits( NSWAVC_bitStream_t_MCS *bS, M4OSA_UInt32 value,
-                            M4OSA_UInt8 length )
-{
-    M4OSA_UInt32 maskedValue = 0, temp = 0;
-    M4OSA_UInt8 byteOne;
-
-    M4OSA_UInt32 len1 = (length == 32) ? 31 : length;
-
-    if( !(length) )
-    {
-        /* Length = 0, return OK*/
-        return M4NO_ERROR;
-    }
-
-    maskedValue = (M4OSA_UInt32)(value &(( 1 << len1) - 1));
-
-    if( 32 > (length + bS->bitPos) )
-    {
-        bS->bitPos += length;
-        bS->currBuff |= maskedValue << (32 - bS->bitPos);
-    }
-    else
-    {
-        temp = (( bS->bitPos + length) - 32);
-
-        bS->currBuff |= (maskedValue >> (temp));
-
-        byteOne =
-            bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
-
-        if( (( bS->prevPrevByte
-            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
-        {
-            bS->byteCnt -= 1;
-            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
-            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
-        }
-        else
-        {
-            bS->prevPrevByte = bS->prevByte;
-            bS->prevByte = byteOne;
-        }
-        byteOne = bS->streamBuffer[bS->byteCnt++] =
-            (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
-
-        if( (( bS->prevPrevByte
-            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
-        {
-            bS->byteCnt -= 1;
-            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
-            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
-        }
-        else
-        {
-            bS->prevPrevByte = bS->prevByte;
-            bS->prevByte = byteOne;
-        }
-        byteOne = bS->streamBuffer[bS->byteCnt++] =
-            (M4OSA_UInt8)(( bS->currBuff >> 8) & 0xff);
-
-        if( (( bS->prevPrevByte
-            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
-        {
-            bS->byteCnt -= 1;
-            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
-            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
-        }
-        else
-        {
-            bS->prevPrevByte = bS->prevByte;
-            bS->prevByte = byteOne;
-        }
-        byteOne = bS->streamBuffer[bS->byteCnt++] =
-            (M4OSA_UInt8)((bS->currBuff) &0xff);
-
-        if( (( bS->prevPrevByte
-            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
-        {
-            bS->byteCnt -= 1;
-            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
-            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
-        }
-        else
-        {
-            bS->prevPrevByte = bS->prevByte;
-            bS->prevByte = byteOne;
-        }
-
-        bS->currBuff = 0;
-
-        bS->currBuff |= ( maskedValue &(( 1 << temp) - 1)) << (32 - temp);
-
-        bS->bitPos = temp;
-    }
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR NSWAVCMCS_putBit( NSWAVC_bitStream_t_MCS *bS, M4OSA_UInt32 value )
-{
-    M4OSA_UInt32 maskedValue = 0, temp = 0;
-    M4OSA_UInt8 byteOne;
-
-    maskedValue = (value ? 1 : 0);
-
-    if( 32 > (1 + bS->bitPos) )
-    {
-        bS->bitPos += 1;
-        bS->currBuff |= maskedValue << (32 - bS->bitPos);
-    }
-    else
-    {
-        temp = 0;
-
-        bS->currBuff |= (maskedValue);
-
-        /* writing it to memory*/
-        byteOne =
-            bS->streamBuffer[bS->byteCnt++] =
-            (M4OSA_UInt8)(bS->currBuff >> 24);
-
-        if( (( bS->prevPrevByte
-            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
-        {
-            bS->byteCnt -= 1;
-            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
-            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
-        }
-        else
-        {
-            bS->prevPrevByte = bS->prevByte;
-            bS->prevByte = byteOne;
-        }
-        byteOne = bS->streamBuffer[bS->byteCnt++] =
-            (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
-
-        if( (( bS->prevPrevByte
-            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
-        {
-            bS->byteCnt -= 1;
-            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
-            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
-        }
-        else
-        {
-            bS->prevPrevByte = bS->prevByte;
-            bS->prevByte = byteOne;
-        }
-        byteOne = bS->streamBuffer[bS->byteCnt++] =
-            (M4OSA_UInt8)(( bS->currBuff >> 8) & 0xff);
-
-        if( (( bS->prevPrevByte
-            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
-        {
-            bS->byteCnt -= 1;
-            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
-            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
-        }
-        else
-        {
-            bS->prevPrevByte = bS->prevByte;
-            bS->prevByte = byteOne;
-        }
-        byteOne = bS->streamBuffer[bS->byteCnt++] =
-            (M4OSA_UInt8)((bS->currBuff) &0xff);
-
-        if( (( bS->prevPrevByte
-            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
-        {
-            bS->byteCnt -= 1;
-            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
-            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
-        }
-        else
-        {
-            bS->prevPrevByte = bS->prevByte;
-            bS->prevByte = byteOne;
-        }
-        bS->currBuff = 0;
-        bS->bitPos = 0;
-    }
-
-    return M4NO_ERROR;
-}
-
-M4OSA_Int32 NSWAVCMCS_putRbspTbits( NSWAVC_bitStream_t_MCS *bS )
-{
-    M4OSA_UInt8 trailBits = 0;
-    M4OSA_UInt8 byteCnt = 0;
-
-    trailBits = (M4OSA_UInt8)(bS->bitPos % 8);
-
-    /* Already in the byte aligned position,
-    RBSP trailing bits will be 1000 0000 */
-    if( 0 == trailBits )
-    {
-        trailBits = (1 << 7);
-        NSWAVCMCS_putBits(bS, trailBits, 8);
-    }
-    else
-    {
-        trailBits = (8 - trailBits);
-        NSWAVCMCS_putBit(bS, 1);
-        trailBits--;
-
-        if( trailBits )
-        { /* put trailBits times zeros */
-            NSWAVCMCS_putBits(bS, 0, trailBits);
-        }
-    }
-
-    /* For writting the currBuff in streamBuff 4byte alignment is required*/
-    byteCnt = (M4OSA_UInt8)(( bS->bitPos + 4) / 8);
-
-    switch( byteCnt )
-    {
-        case 1:
-            bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
-            break;
-
-        case 2:
-            bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
-            bS->streamBuffer[bS->byteCnt++] =
-                (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
-            break;
-
-        case 3:
-            bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
-            bS->streamBuffer[bS->byteCnt++] =
-                (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
-            bS->streamBuffer[bS->byteCnt++] =
-                (M4OSA_UInt8)(( bS->currBuff >> 8) & 0xff);
-
-            break;
-
-        default:
-            /* It will not come here */
-            break;
-    }
-
-    //    bS->bitPos =0;
-    //    bS->currBuff = 0;
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR NSWAVCMCS_uExpVLC( NSWAVC_bitStream_t_MCS *bS, M4OSA_Int32 codeNum )
-{
-
-    M4OSA_Int32 loop, temp;
-    M4OSA_Int32 data = 0;
-    M4OSA_UInt8 codeLen = 0;
-
-    /* The codeNum cannot be less than zero for this ue(v) */
-    if( codeNum < 0 )
-    {
-        return 0;
-    }
-
-    /* Implementation for Encoding of the Table 9-1 in the Standard */
-    temp = codeNum + 1;
-
-    for ( loop = 0; temp != 0; loop++ )
-    {
-        temp /= 2;
-    }
-
-    codeLen = (( loop * 2) - 1);
-
-    data = codeNum + 1;
-
-    NSWAVCMCS_putBits(bS, data, codeLen);
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR NSWAVCMCS_sExpVLC( NSWAVC_bitStream_t_MCS *bS, M4OSA_Int32 codeNum )
-{
-
-    M4OSA_Int32 loop, temp1, temp2;
-    M4OSA_Int32 data = 0;
-    M4OSA_UInt8 codeLen = 0, isPositive = 0;
-    M4OSA_UInt32 abscodeNum;
-
-    if( codeNum > 0 )
-    {
-        isPositive = 1;
-    }
-
-    if( codeNum > 0 )
-    {
-        abscodeNum = codeNum;
-    }
-    else
-    {
-        abscodeNum = -codeNum;
-    }
-
-    temp1 = ( ( ( abscodeNum) << 1) - isPositive) + 1;
-    temp2 = temp1;
-
-    for ( loop = 0; loop < 16 && temp2 != 0; loop++ )
-    {
-        temp2 /= 2;
-    }
-
-    codeLen = ( loop * 2) - 1;
-
-    data = temp1;
-
-    NSWAVCMCS_putBits(bS, data, codeLen);
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR H264MCS_ProcessEncodedNALU(   M4OSA_Void *ainstance,
-                                        M4OSA_UInt8 *inbuff,
-                                        M4OSA_Int32 inbuf_size,
-                                        M4OSA_UInt8 *outbuff,
-                                        M4OSA_Int32 *outbuf_size )
-{
-    ComBitStreamMCS_t *p_bs, bs;
-    NSWAVC_MCS_t *instance;
-    M4OSA_UInt8 nalu_info;
-    M4OSA_Int32 forbidden_bit, nal_ref_idc, nal_unit_type;
-    M4OSA_Int32 first_mb_in_slice, slice_type, pic_parameter_set_id, frame_num;
-    M4OSA_Int32 seq_parameter_set_id;
-    M4OSA_UInt8 temp1, temp2, temp3, temp4;
-    M4OSA_Int32 temp_frame_num;
-    M4OSA_Int32 bitstoDiacard, bytes;
-    M4OSA_UInt32 mask_bits = 0xFFFFFFFF;
-    M4OSA_Int32 new_bytes, init_bit_pos;
-    M4OSA_UInt32 nal_size;
-    M4OSA_UInt32 cnt;
-    M4OSA_UInt32 outbuffpos = 0;
-    M4OSA_UInt32 nal_size_low16, nal_size_high16;
-    M4OSA_UInt32 frame_size = 0;
-    M4OSA_UInt32 temp = 0;
-
-    // StageFright encoder does not provide the size in the first 4 bytes of the AU, add it
-    M4OSA_Int8 *pTmpBuff1 = M4OSA_NULL;
-    M4OSA_Int8 *pTmpBuff2 = M4OSA_NULL;
-
-    p_bs = &bs;
-    instance = (NSWAVC_MCS_t *)ainstance;
-
-    M4OSA_TRACE1_2(
-        "In  H264MCS_ProcessEncodedNALU with FrameSize = %d  inBuf_Size=%d",
-        frame_size, inbuf_size);
-
-    // StageFright codecs may add a start code, make sure it is not present
-
-    if( !memcmp((void *)inbuff,
-        "\x00\x00\x00\x01", 4) )
-    {
-        M4OSA_TRACE1_3(
-            "H264MCS_ProcessNALU ERROR : NALU start code has not been removed %d "
-            "0x%X 0x%X", inbuf_size, ((M4OSA_UInt32 *)inbuff)[0],
-            ((M4OSA_UInt32 *)inbuff)[1]);
-
-        return M4ERR_PARAMETER;
-    }
-
-    // StageFright encoder does not provide the size in the first 4 bytes of the AU, add it
-    pTmpBuff1 = (M4OSA_Int8 *)M4OSA_32bitAlignedMalloc(inbuf_size + 4, M4MCS,
-        (M4OSA_Char *)"tmpNALU");
-    memcpy((void *)(pTmpBuff1 + 4), (void *)inbuff,
-        inbuf_size);
-    pTmpBuff1[3] = ( (M4OSA_UInt32)inbuf_size) & 0x000000FF;
-    pTmpBuff1[2] = ( (M4OSA_UInt32)inbuf_size >> 8) & 0x000000FF;
-    pTmpBuff1[1] = ( (M4OSA_UInt32)inbuf_size >> 16) & 0x000000FF;
-    pTmpBuff1[0] = ( (M4OSA_UInt32)inbuf_size >> 24) & 0x000000FF;
-    pTmpBuff2 = (M4OSA_Int8 *)inbuff;
-    inbuff = (M4OSA_UInt8 *)pTmpBuff1;
-    inbuf_size += 4;
-
-    // Make sure the available size was set
-    if( inbuf_size >= *outbuf_size )
-    {
-        M4OSA_TRACE1_1(
-            "!!! H264MCS_ProcessNALU ERROR : specified available size is incorrect %d ",
-            *outbuf_size);
-        return M4ERR_PARAMETER;
-    }
-
-
-
-    while( (M4OSA_Int32)frame_size < inbuf_size )
-    {
-        mask_bits = 0xFFFFFFFF;
-        p_bs->Buffer = (M4OSA_UInt8 *)(inbuff + frame_size);
-
-        // Use unsigned value to fix errors due to bit sign extension, this fix should be generic
-
-        nal_size_high16 = ( ( (M4OSA_UInt8 *)p_bs->Buffer)[0] << 8)
-            + ((M4OSA_UInt8 *)p_bs->Buffer)[1];
-        nal_size_low16 = ( ( (M4OSA_UInt8 *)p_bs->Buffer)[2] << 8)
-            + ((M4OSA_UInt8 *)p_bs->Buffer)[3];
-
-        nalu_info = (unsigned char)p_bs->Buffer[4];
-
-        outbuff[outbuffpos] = p_bs->Buffer[4];
-
-        p_bs->Buffer = p_bs->Buffer + 5;
-
-        p_bs->bitPos = 0;
-        p_bs->lastTotalBits = 0;
-        p_bs->numBitsInBuffer = ( inbuf_size - frame_size - 5) << 3;
-        p_bs->readableBytesInBuffer = inbuf_size - frame_size - 5;
-
-        p_bs->ui32TempBuff = 0;
-        p_bs->i8BitCnt = 0;
-        p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
-        p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
-
-        H264MCS_getBits(p_bs, 0);
-
-        nal_size = ( nal_size_high16 << 16) + nal_size_low16;
-
-        frame_size += nal_size + 4;
-
-        forbidden_bit = ( nalu_info >> 7) & 1;
-        nal_ref_idc = ( nalu_info >> 5) & 3;
-        nal_unit_type = (nalu_info) &0x1f;
-
-        NSWAVCMCS_initBitstream(&instance->encbs);
-
-        instance->encbs.streamBuffer = outbuff + outbuffpos + 1;
-
-        if( nal_unit_type == 8 )
-        {
-            M4OSA_TRACE1_0("Error : PPS");
-            return 0;
-        }
-
-        if( nal_unit_type == 7 )
-        {
-            /*SPS Packet */
-            M4OSA_TRACE1_0("Error : SPS");
-            return 0;
-        }
-
-        if( (nal_unit_type == 5) )
-        {
-            instance->frame_count = 0;
-            instance->POC_lsb = 0;
-        }
-
-        if( ( nal_unit_type == 1) || (nal_unit_type == 5) )
-        {
-            first_mb_in_slice = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            slice_type = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
-
-            /* First MB in slice */
-            NSWAVCMCS_uExpVLC(&instance->encbs, first_mb_in_slice);
-
-            /* Slice Type */
-            NSWAVCMCS_uExpVLC(&instance->encbs, slice_type);
-
-            /* Picture Parameter set Id */
-            pic_parameter_set_id = instance->encoder_pps.pic_parameter_set_id;
-            NSWAVCMCS_uExpVLC(&instance->encbs, pic_parameter_set_id);
-
-            temp = H264MCS_getBits(p_bs,
-                instance->encoder_sps.log2_max_frame_num_minus4 + 4);
-            NSWAVCMCS_putBits(&instance->encbs, instance->frame_count,
-                instance->clip_sps.log2_max_frame_num_minus4 + 4);
-
-            // In Baseline Profile: frame_mbs_only_flag should be ON
-            if( nal_unit_type == 5 )
-            {
-                temp = H264MCS_DecVLCReadExpGolombCode(p_bs);
-                NSWAVCMCS_uExpVLC(&instance->encbs, temp);
-            }
-
-            if( instance->encoder_sps.pic_order_cnt_type == 0 )
-            {
-                temp = H264MCS_getBits(p_bs,
-                    instance->encoder_sps.log2_max_pic_order_cnt_lsb_minus4
-                    + 4);
-
-                // in baseline profile field_pic_flag should be off.
-                if( instance->encoder_pps.pic_order_present_flag )
-                {
-                    temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
-                }
-            }
-
-            if( ( instance->encoder_sps.pic_order_cnt_type == 1)
-                && (instance->encoder_sps.delta_pic_order_always_zero_flag) )
-            {
-                temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
-
-                // in baseline profile field_pic_flag should be off.
-                if( instance->encoder_pps.pic_order_present_flag )
-                {
-                    temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
-                }
-            }
-
-            if( instance->clip_sps.pic_order_cnt_type == 0 )
-            {
-                NSWAVCMCS_putBits(&instance->encbs, instance->POC_lsb,
-                    instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4 + 4);
-
-                // in baseline profile field_pic_flag should be off.
-                if( instance->encoder_pps.pic_order_present_flag )
-                {
-                    NSWAVCMCS_sExpVLC(&instance->encbs, 0);
-                }
-            }
-
-            if( ( instance->clip_sps.pic_order_cnt_type == 1)
-                && (instance->clip_sps.delta_pic_order_always_zero_flag) )
-            {
-                NSWAVCMCS_sExpVLC(&instance->encbs, 0);
-
-                // in baseline profile field_pic_flag should be off.
-                if( instance->encoder_pps.pic_order_present_flag )
-                {
-                    NSWAVCMCS_sExpVLC(&instance->encbs, 0);
-                }
-            }
-
-            cnt = p_bs->bitPos & 0x7;
-
-            if( cnt )
-            {
-                cnt = 8 - cnt;
-                temp = H264MCS_getBits(p_bs, cnt);
-                NSWAVCMCS_putBits(&instance->encbs, temp, cnt);
-            }
-
-            cnt = p_bs->bitPos >> 3;
-
-            while( cnt < (nal_size - 2) )
-            {
-                temp = H264MCS_getBits(p_bs, 8);
-                NSWAVCMCS_putBits(&instance->encbs, temp, 8);
-                cnt = p_bs->bitPos >> 3;
-            }
-
-            temp = H264MCS_getBits(p_bs, 8);
-
-            if( temp != 0 )
-            {
-                cnt = 0;
-
-                while( ( temp & 0x1) == 0 )
-                {
-                    cnt++;
-                    temp = temp >> 1;
-                }
-                cnt++;
-                temp = temp >> 1;
-
-                if( 8 - cnt )
-                {
-                    NSWAVCMCS_putBits(&instance->encbs, temp, (8 - cnt));
-                }
-
-                NSWAVCMCS_putRbspTbits(&instance->encbs);
-            }
-            else
-            {
-
-                M4OSA_TRACE1_1(
-                    "H264MCS_ProcessEncodedNALU : 13 temp = 0 trailing bits = %d",
-                    instance->encbs.bitPos % 8);
-
-                if( instance->encbs.bitPos % 8 )
-                {
-                    NSWAVCMCS_putBits(&instance->encbs, 0,
-                        (8 - instance->encbs.bitPos % 8));
-                }
-            }
-
-            temp = instance->encbs.byteCnt;
-            temp = temp + 1;
-
-            outbuffpos = outbuffpos + temp;
-        }
-    }
-
-    *outbuf_size = outbuffpos;
-
-    instance->POC_lsb = instance->POC_lsb + 1;
-
-    if( instance->POC_lsb == instance->POC_lsb_mod )
-    {
-        instance->POC_lsb = 0;
-    }
-    instance->frame_count = instance->frame_count + 1;
-
-    if( instance->frame_count == instance->frame_mod_count )
-    {
-        instance->frame_count = 0;
-    }
-
-    // StageFright encoder does not provide the size in the first 4 bytes of the AU, add it
-
-    free(pTmpBuff1);
-    pTmpBuff1 = M4OSA_NULL;
-    inbuff = (M4OSA_UInt8 *)pTmpBuff2;
-
-    return M4NO_ERROR;
-}
-
-M4OSA_Int32 DecSPSMCS( ComBitStreamMCS_t *p_bs,
-                      ComSequenceParameterSet_t_MCS *sps )
-{
-    M4OSA_UInt32 i;
-    M4OSA_Int32 temp_max_dpb_size;
-    M4OSA_Int32 nb_ignore_bits;
-    M4OSA_Int32 error;
-    M4OSA_UInt8 profile_idc, level_idc, reserved_zero_4bits,
-        seq_parameter_set_id;
-    M4OSA_UInt8 constraint_set0_flag, constraint_set1_flag,
-        constraint_set2_flag, constraint_set3_flag;
-
-    sps->profile_idc = (M4OSA_UInt8)H264MCS_getBits(p_bs, 8);
-    sps->constraint_set0_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-    sps->constraint_set1_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-    sps->constraint_set2_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-    sps->constraint_set3_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-    reserved_zero_4bits = (M4OSA_UInt8)H264MCS_getBits(p_bs, 4);
-    sps->level_idc = (M4OSA_UInt8)H264MCS_getBits(p_bs, 8);
-    sps->seq_parameter_set_id =
-        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-    sps->log2_max_frame_num_minus4 =
-        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-    sps->MaxFrameNum = 1 << (sps->log2_max_frame_num_minus4 + 4);
-    sps->pic_order_cnt_type =
-        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-
-    if (sps->pic_order_cnt_type == 0)
-    {
-        sps->log2_max_pic_order_cnt_lsb_minus4 =
-            (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-        sps->MaxPicOrderCntLsb =
-            1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
-    }
-    else if( sps->pic_order_cnt_type == 1 )
-    {
-        sps->delta_pic_order_always_zero_flag =
-            (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-
-        // This fix should be generic to remove codec dependency
-
-        sps->offset_for_non_ref_pic =
-            H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
-        sps->offset_for_top_to_bottom_field =
-            H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
-
-
-        /*num_ref_frames_in_pic_order_cnt_cycle must be in the range 0, 255*/
-
-        sps->num_ref_frames_in_pic_order_cnt_cycle =
-            (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-
-        /* compute deltaPOC */
-        sps->expectedDeltaPerPicOrderCntCycle = 0;
-
-        for ( i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++ )
-        {
-            // This fix should be generic to remove codec dependency
-            sps->offset_for_ref_frame[i] =
-                H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
-
-            sps->expectedDeltaPerPicOrderCntCycle +=
-                sps->offset_for_ref_frame[i];
-        }
-    }
-
-    /* num_ref_frames must be in the range 0,16 */
-    sps->num_ref_frames = (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-    sps->gaps_in_frame_num_value_allowed_flag =
-        (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-
-    sps->pic_width_in_mbs_minus1 =
-        (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
-    sps->pic_height_in_map_units_minus1 =
-        (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
-
-    sps->frame_mbs_only_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-
-    if (!sps->frame_mbs_only_flag)
-    {
-        sps->mb_adaptive_frame_field_flag =
-            (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-    }
-    else
-    {
-        sps->mb_adaptive_frame_field_flag = 0;
-    }
-
-    sps->PicWidthInMbs = sps->pic_width_in_mbs_minus1 + 1;
-    sps->FrameHeightInMbs = ( 2 - sps->frame_mbs_only_flag) * \
-        (sps->pic_height_in_map_units_minus1 + 1);
-#ifdef _CAP_FMO_
-
-    sps->NumSliceGroupMapUnits =
-        sps->PicWidthInMbs * (sps->pic_height_in_map_units_minus1 + 1);
-    sps->MaxPicSizeInMbs = sps->PicWidthInMbs * sps->FrameHeightInMbs;
-
-#endif /*_CAP_FMO_*/
-
-    sps->direct_8x8_inference_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-
-    if( sps->frame_mbs_only_flag == 0 )
-        sps->direct_8x8_inference_flag = 1;
-
-    sps->frame_cropping_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-
-    if( sps->frame_cropping_flag )
-    {
-        sps->frame_crop_left_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
-        sps->frame_crop_right_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
-        sps->frame_crop_top_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
-        sps->frame_crop_bottom_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
-    }
-    else
-    {
-        sps->frame_crop_left_offset = 0;
-        sps->frame_crop_right_offset = 0;
-        sps->frame_crop_top_offset = 0;
-        sps->frame_crop_bottom_offset = 0;
-    }
-
-    sps->vui_parameters_present_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-
-    if (sps->vui_parameters_present_flag) {
-        /* no error message as stream can be decoded without VUI messages */
-    }
-
-    return M4NO_ERROR;
-}
-
-M4OSA_Int32 DecPPSMCS( ComBitStreamMCS_t *p_bs,
-                      ComPictureParameterSet_t_MCS *pps )
-{
-    M4OSA_Int32 error;
-    M4OSA_UInt32 pic_parameter_set_id;
-
-#ifdef _CAP_FMO_
-    M4OSA_UInt32 i, length, v;
-#endif
-
-    M4OSA_Int32 nb_ignore_bits;
-
-    pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
-    pps->pic_parameter_set_id = (M4OSA_UInt8)pic_parameter_set_id;
-
-    pps->seq_parameter_set_id =
-        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-
-    /* entropy_coding_mode_flag must be 0 or 1 */
-    pps->entropy_coding_mode_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-    pps->pic_order_present_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-
-    pps->num_slice_groups_minus1 =
-        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-
-#ifdef _CAP_FMO_
-    /* FMO stuff begins here */
-
-    pps->map_initialized = FALSE;
-
-    if( pps->num_slice_groups_minus1 > 0 )
-    {
-        pps->slice_group_map_type =
-            (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-
-        switch( pps->slice_group_map_type )
-        {
-            case 0:
-                for ( i = 0; i <= pps->num_slice_groups_minus1; i++ )
-                {
-                    pps->run_length_minus1[i] =
-                        (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
-                }
-                break;
-
-            case 2:
-                for ( i = 0; i < pps->num_slice_groups_minus1; i++ )
-                {
-                    pps->top_left[i] =
-                        (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
-                    pps->bottom_right[i] =
-                        (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
-                }
-                break;
-
-            case 3:
-            case 4:
-            case 5:
-                pps->slice_group_change_direction_flag =
-                    (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-                pps->slice_group_change_rate_minus1 =
-                    (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
-                break;
-
-            case 6:
-                pps->pic_size_in_map_units_minus1 =
-                    (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
-
-                pps->slice_group_id = (H264UInt8
-                    *)M4H264Dec_malloc((pps->pic_size_in_map_units_minus1
-                    + 1), M4H264_COREID, (M4OSA_Char *)"PPS");
-
-                if (M4OSA_NULL == pps->slice_group_id)
-                {
-                    M4OSA_TRACE1_0("DecPPSMCS: allocation error");
-                    return M4ERR_ALLOC;
-                }
-
-                for ( length = 0, v = pps->num_slice_groups_minus1 + 1; v != 0;
-                    v >>= 1, length++ );
-
-                    for ( i = 0; i <= pps->pic_size_in_map_units_minus1; i++ )
-                    {
-                        pps->slice_group_id[i] =
-                            (M4OSA_UInt8)getBits(p_vlc_engine->p_bs, length);
-                    }
-                    break;
-        }
-    }
-    else
-    {
-        pps->slice_group_map_type = 0;
-    }
-    /* End of FMO stuff */
-
-#else
-
-#endif /* _CAP_FMO_ */
-
-    /* num_ref_idx_l0_active_minus1 must be in the range 0, 31 */
-
-    pps->num_ref_idx_l0_active_minus1 =
-        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-    /* num_ref_idx_l1_active_minus1 must be in the range 0, 31 */
-    pps->num_ref_idx_l1_active_minus1 =
-        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
-    pps->weighted_pred_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-
-    /* weighted_bipred_idc must be in the range 0,2 */
-    pps->weighted_bipred_idc = (M4OSA_Bool)H264MCS_getBits(p_bs, 2);
-
-    /* pic_init_qp_minus26 must be in the range -26,25 */
-    pps->pic_init_qp_minus26 =
-        (M4OSA_Int16)H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
-
-    /* pic_init_qs_minus26 must be in the range -26,25 */
-    pps->pic_init_qs_minus26 =
-        (M4OSA_Int16)H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
-
-    /* chroma_qp_index_offset must be in the range -12,+12 */
-    pps->chroma_qp_index_offset =
-        (M4OSA_Int16)H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
-    pps->deblocking_filter_control_present_flag =
-        (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-    pps->constrained_intra_pred_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-    pps->redundant_pic_cnt_present_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR H264MCS_ProcessSPS_PPS( NSWAVC_MCS_t *instance, M4OSA_UInt8 *inbuff,
-                                 M4OSA_Int32 inbuf_size )
-{
-    ComBitStreamMCS_t *p_bs, bs;
-    ComBitStreamMCS_t *p_bs1, bs1;
-
-    M4OSA_UInt8 nalu_info = 0;
-    M4OSA_Int32 forbidden_bit, nal_ref_idc, nal_unit_type;
-    M4OSA_Int32 first_mb_in_slice, slice_type, pic_parameter_set_id = 0,
-        frame_num;
-    M4OSA_Int32 seq_parameter_set_id;
-    M4OSA_UInt8 temp1, temp2, temp3, temp4;
-    M4OSA_Int32 temp_frame_num;
-    M4OSA_Int32 bitstoDiacard, bytes;
-    M4OSA_UInt32 mask_bits = 0xFFFFFFFF;
-    M4OSA_Int32 new_bytes, init_bit_pos;
-    M4OSA_UInt32 nal_size = 0;
-    M4OSA_UInt32 cnt, cnt1;
-    M4OSA_UInt32 outbuffpos = 0;
-    M4OSA_UInt32 nal_size_low16, nal_size_high16;
-    M4OSA_UInt32 frame_size = 0;
-    M4OSA_UInt32 temp = 0;
-    M4OSA_UInt8 *lClipDSI;
-    M4OSA_UInt8 *lClipDSI_PPS_start;
-    M4OSA_UInt32 lClipDSI_PPS_offset = 0;
-
-    M4OSA_UInt8 *lPPS_Buffer = M4OSA_NULL;
-    M4OSA_UInt32 lPPS_Buffer_Size = 0;
-
-    M4OSA_UInt32 lSize, lSize1;
-    M4OSA_UInt32 lActiveSPSID_Clip;
-    M4OSA_UInt32 lClipPPSRemBits = 0;
-
-    M4OSA_UInt32 lEncoder_SPSID = 0;
-    M4OSA_UInt32 lEncoder_PPSID = 0;
-    M4OSA_UInt32 lEncoderPPSRemBits = 0;
-    M4OSA_UInt32 lFound = 0;
-    M4OSA_UInt32 size;
-
-    M4OSA_UInt8 Clip_SPSID[32] = { 0 };
-    M4OSA_UInt8 Clip_UsedSPSID[32] = { 0 };
-    M4OSA_UInt8 Clip_PPSID[256] = { 0 };
-    M4OSA_UInt8 Clip_SPSID_in_PPS[256] = { 0 };
-    M4OSA_UInt8 Clip_UsedPPSID[256] = { 0 };
-    M4OSA_ERR err = M4NO_ERROR;
-
-    p_bs = &bs;
-    p_bs1 = &bs1;
-
-    /* Find the active SPS ID */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == instance), M4ERR_PARAMETER,
-        "H264MCS_ProcessSPS_PPS: instance is M4OSA_NULL");
-
-    instance->m_Num_Bytes_NALUnitLength =
-            (instance->m_pDecoderSpecificInfo[4] & 0x03) + 1;
-
-    instance->m_encoder_SPS_Cnt = instance->m_pDecoderSpecificInfo[5] & 0x1F;
-
-    lClipDSI = instance->m_pDecoderSpecificInfo + 6;
-
-    lClipDSI_PPS_offset = 6;
-
-    for ( cnt = 0; cnt < instance->m_encoder_SPS_Cnt; cnt++ )
-    {
-        lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
-        lClipDSI = lClipDSI + 2;
-
-        p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 4);
-        DecBitStreamReset_MCS(p_bs, lSize - 4);
-
-        Clip_SPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
-        Clip_UsedSPSID[Clip_SPSID[cnt]] = 1;
-
-        lClipDSI = lClipDSI + lSize;
-        lClipDSI_PPS_offset = lClipDSI_PPS_offset + 2 + lSize;
-    }
-
-    instance->m_encoder_PPS_Cnt = lClipDSI[0];
-    lClipDSI = lClipDSI + 1;
-
-    lClipDSI_PPS_start = lClipDSI;
-
-    for ( cnt = 0; cnt < instance->m_encoder_PPS_Cnt; cnt++ )
-    {
-        lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
-        lClipDSI = lClipDSI + 2;
-
-        p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 1);
-        DecBitStreamReset_MCS(p_bs, lSize - 1);
-
-        Clip_PPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
-        Clip_UsedPPSID[Clip_PPSID[cnt]] = 1;
-        Clip_SPSID_in_PPS[Clip_PPSID[cnt]] =
-            H264MCS_DecVLCReadExpGolombCode(p_bs);
-
-        lClipDSI = lClipDSI + lSize;
-    }
-
-    /* Find the clip SPS ID used at the cut start frame */
-    while( ( (M4OSA_Int32)frame_size) < inbuf_size )
-    {
-        mask_bits = 0xFFFFFFFF;
-        p_bs->Buffer = (M4OSA_UInt8 *)(inbuff + frame_size);
-
-        switch( instance->m_Num_Bytes_NALUnitLength )
-        {
-            case 1:
-                nal_size = (unsigned char)p_bs->Buffer[0];
-                nalu_info = (unsigned char)p_bs->Buffer[1];
-                p_bs->Buffer = p_bs->Buffer + 2;
-
-                break;
-
-            case 2:
-                nal_size_high16 = ( p_bs->Buffer[0] << 8) + p_bs->Buffer[1];
-                nal_size = nal_size_high16;
-                nalu_info = (unsigned char)p_bs->Buffer[2];
-                p_bs->Buffer = p_bs->Buffer + 3;
-
-                break;
-
-            case 4:
-                nal_size_high16 = ( p_bs->Buffer[0] << 8) + p_bs->Buffer[1];
-                nal_size_low16 = ( p_bs->Buffer[2] << 8) + p_bs->Buffer[3];
-                nal_size = ( nal_size_high16 << 16) + nal_size_low16;
-                nalu_info = (unsigned char)p_bs->Buffer[4];
-                p_bs->Buffer = p_bs->Buffer + 5;
-
-                break;
-        }
-
-        if (nal_size == 0) {
-            M4OSA_TRACE1_1("0 size nal unit at line %d", __LINE__);
-            frame_size += instance->m_Num_Bytes_NALUnitLength;
-            continue;
-        }
-
-        p_bs->bitPos = 0;
-        p_bs->lastTotalBits = 0;
-        p_bs->numBitsInBuffer =
-            ( inbuf_size - frame_size - instance->m_Num_Bytes_NALUnitLength - 1)
-            << 3;
-        p_bs->readableBytesInBuffer =
-            inbuf_size - frame_size - instance->m_Num_Bytes_NALUnitLength - 1;
-
-        p_bs->ui32TempBuff = 0;
-        p_bs->i8BitCnt = 0;
-        p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
-        p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
-
-        H264MCS_getBits(p_bs, 0);
-
-        frame_size += nal_size + instance->m_Num_Bytes_NALUnitLength;
-
-        forbidden_bit = ( nalu_info >> 7) & 1;
-        nal_ref_idc = ( nalu_info >> 5) & 3;
-        nal_unit_type = (nalu_info) &0x1f;
-
-        if( nal_unit_type == 8 )
-        {
-            M4OSA_TRACE1_0("H264MCS_ProcessSPS_PPS() Error: PPS");
-            return err;
-        }
-
-        if( nal_unit_type == 7 )
-        {
-            /*SPS Packet */
-            M4OSA_TRACE1_0("H264MCS_ProcessSPS_PPS() Error: SPS");
-            return err;
-        }
-
-        if( ( nal_unit_type == 1) || (nal_unit_type == 5) )
-        {
-            first_mb_in_slice = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            slice_type = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            break;
-        }
-    }
-
-    lActiveSPSID_Clip = Clip_SPSID_in_PPS[pic_parameter_set_id];
-
-    instance->final_SPS_ID = lActiveSPSID_Clip;
-    /* Do we need to add encoder PPS to clip PPS */
-
-    lClipDSI = lClipDSI_PPS_start;
-
-    for ( cnt = 0; cnt < instance->m_encoder_PPS_Cnt; cnt++ )
-    {
-        lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
-        lClipDSI = lClipDSI + 2;
-
-        if( lActiveSPSID_Clip == Clip_SPSID_in_PPS[Clip_PPSID[cnt]] )
-        {
-            lPPS_Buffer = lClipDSI + 1;
-            lPPS_Buffer_Size = lSize - 1;
-
-            p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 1);
-            DecBitStreamReset_MCS(p_bs, lSize - 1);
-
-            Clip_PPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            Clip_UsedPPSID[Clip_SPSID[cnt]] = 1;
-            Clip_SPSID_in_PPS[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            lClipPPSRemBits = ( lSize - 1) << 3;
-            lClipPPSRemBits -= p_bs->bitPos;
-
-            temp = lClipDSI[lSize - 1];
-
-            cnt1 = 0;
-
-            while( ( temp & 0x1) == 0 )
-            {
-                cnt1++;
-                temp = temp >> 1;
-            }
-            cnt1++;
-            lClipPPSRemBits -= cnt1;
-
-            lSize1 = instance->m_encoderPPSSize - 1;
-            p_bs1->Buffer = (M4OSA_UInt8 *)(instance->m_pEncoderPPS + 1);
-            DecBitStreamReset_MCS(p_bs1, lSize1);
-
-            lEncoder_PPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
-            lEncoder_SPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
-
-            lEncoderPPSRemBits = ( lSize1) << 3;
-            lEncoderPPSRemBits -= p_bs1->bitPos;
-
-            temp = instance->m_pEncoderPPS[lSize1];
-
-            cnt1 = 0;
-
-            while( ( temp & 0x1) == 0 )
-            {
-                cnt1++;
-                temp = temp >> 1;
-            }
-            cnt1++;
-            lEncoderPPSRemBits -= cnt1;
-
-            if( lEncoderPPSRemBits == lClipPPSRemBits )
-            {
-                while( lEncoderPPSRemBits > 8 )
-                {
-                    temp1 = H264MCS_getBits(p_bs, 8);
-                    temp2 = H264MCS_getBits(p_bs1, 8);
-                    lEncoderPPSRemBits = lEncoderPPSRemBits - 8;
-
-                    if( temp1 != temp2 )
-                    {
-                        break;
-                    }
-                }
-
-                if( lEncoderPPSRemBits < 8 )
-                {
-                    if( lEncoderPPSRemBits )
-                    {
-                        temp1 = H264MCS_getBits(p_bs, lEncoderPPSRemBits);
-                        temp2 = H264MCS_getBits(p_bs1, lEncoderPPSRemBits);
-
-                        if( temp1 == temp2 )
-                        {
-                            lFound = 1;
-                        }
-                    }
-                    else
-                    {
-                        lFound = 1;
-                    }
-                }
-                break;
-            }
-        }
-
-        lClipDSI = lClipDSI + lSize;
-    }
-
-    /* Form the final SPS and PPS data */
-
-    if( lFound == 1 )
-    {
-        /* No need to add PPS */
-        instance->final_PPS_ID = Clip_PPSID[cnt];
-
-        instance->m_pFinalDSI =
-            (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(instance->m_decoderSpecificInfoSize,
-            M4MCS, (M4OSA_Char *)"instance->m_pFinalDSI");
-
-        if( instance->m_pFinalDSI == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("instance->m_pFinalDSI: allocation error");
-            return M4ERR_ALLOC;
-        }
-
-        instance->m_pFinalDSISize = instance->m_decoderSpecificInfoSize;
-        memcpy((void *)instance->m_pFinalDSI,
-            (void *)instance->m_pDecoderSpecificInfo,
-            instance->m_decoderSpecificInfoSize);
-    }
-    else
-    {
-        /* ADD PPS */
-        /* find the free PPS ID */
-
-        cnt = 0;
-
-        while( Clip_UsedPPSID[cnt] )
-        {
-            cnt++;
-        }
-        instance->final_PPS_ID = cnt;
-
-        size = instance->m_decoderSpecificInfoSize + instance->m_encoderPPSSize
-            + 10;
-
-        instance->m_pFinalDSI = (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(size, M4MCS,
-            (M4OSA_Char *)"instance->m_pFinalDSI");
-
-        if( instance->m_pFinalDSI == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("instance->m_pFinalDSI: allocation error");
-            return M4ERR_ALLOC;
-        }
-
-        memcpy((void *)instance->m_pFinalDSI,
-            (void *)instance->m_pDecoderSpecificInfo,
-            instance->m_decoderSpecificInfoSize);
-
-        temp = instance->m_pFinalDSI[lClipDSI_PPS_offset];
-        temp = temp + 1;
-        instance->m_pFinalDSI[lClipDSI_PPS_offset] = temp;
-
-        //temp = instance->m_pEncoderPPS[0];
-        lSize1 = instance->m_encoderPPSSize - 1;
-        p_bs1->Buffer = (M4OSA_UInt8 *)(instance->m_pEncoderPPS + 1);
-        DecBitStreamReset_MCS(p_bs1, lSize1);
-
-        lEncoder_PPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
-        lEncoder_SPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
-
-        lEncoderPPSRemBits = ( lSize1) << 3;
-        lEncoderPPSRemBits -= p_bs1->bitPos;
-
-        temp = instance->m_pEncoderPPS[lSize1];
-
-        cnt1 = 0;
-
-        while( ( temp & 0x1) == 0 )
-        {
-            cnt1++;
-            temp = temp >> 1;
-        }
-        cnt1++;
-        lEncoderPPSRemBits -= cnt1;
-
-        instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize + 2] =
-            instance->m_pEncoderPPS[0];
-
-        NSWAVCMCS_initBitstream(&instance->encbs);
-        instance->encbs.streamBuffer =
-            &(instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize + 3]);
-        lPPS_Buffer = instance->encbs.streamBuffer;
-
-        NSWAVCMCS_uExpVLC(&instance->encbs, instance->final_PPS_ID);
-        NSWAVCMCS_uExpVLC(&instance->encbs, instance->final_SPS_ID);
-
-        while( lEncoderPPSRemBits > 8 )
-        {
-            temp = H264MCS_getBits(p_bs1, 8);
-            NSWAVCMCS_putBits(&instance->encbs, temp, 8);
-            lEncoderPPSRemBits = lEncoderPPSRemBits - 8;
-        }
-
-        if( lEncoderPPSRemBits )
-        {
-            temp = H264MCS_getBits(p_bs1, lEncoderPPSRemBits);
-            NSWAVCMCS_putBits(&instance->encbs, temp, lEncoderPPSRemBits);
-        }
-        NSWAVCMCS_putRbspTbits(&instance->encbs);
-
-        temp = instance->encbs.byteCnt;
-        lPPS_Buffer_Size = temp;
-        temp = temp + 1;
-
-        instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize] =
-            ( temp >> 8) & 0xFF;
-        instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize + 1] =
-            (temp) &0xFF;
-        instance->m_pFinalDSISize =
-            instance->m_decoderSpecificInfoSize + 2 + temp;
-    }
-
-    /* Decode the clip SPS */
-
-    lClipDSI = instance->m_pDecoderSpecificInfo + 6;
-
-    lClipDSI_PPS_offset = 6;
-
-    for ( cnt = 0; cnt < instance->m_encoder_SPS_Cnt; cnt++ )
-    {
-        lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
-        lClipDSI = lClipDSI + 2;
-
-        if( Clip_SPSID[cnt] == instance->final_SPS_ID )
-        {
-            p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 1);
-            DecBitStreamReset_MCS(p_bs, lSize - 1);
-
-            err = DecSPSMCS(p_bs, &instance->clip_sps);
-            if(err != M4NO_ERROR) {
-                return M4ERR_PARAMETER;
-            }
-
-            //Clip_SPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            //Clip_UsedSPSID[Clip_SPSID[cnt]] = 1;
-            break;
-        }
-
-        lClipDSI = lClipDSI + lSize;
-    }
-
-    /* Decode encoder SPS */
-    p_bs->Buffer = (M4OSA_UInt8 *)(instance->m_pEncoderSPS + 1);
-    DecBitStreamReset_MCS(p_bs, instance->m_encoderSPSSize - 1);
-    err = DecSPSMCS(p_bs, &instance->encoder_sps);
-    if(err != M4NO_ERROR) {
-        return M4ERR_PARAMETER;
-    }
-
-    if( instance->encoder_sps.num_ref_frames
-    > instance->clip_sps.num_ref_frames )
-    {
-        return 100; //not supported
-    }
-
-    p_bs->Buffer = (M4OSA_UInt8 *)lPPS_Buffer;
-    DecBitStreamReset_MCS(p_bs, lPPS_Buffer_Size);
-    DecPPSMCS(p_bs, &instance->encoder_pps);
-
-    instance->frame_count = 0;
-    instance->frame_mod_count =
-        1 << (instance->clip_sps.log2_max_frame_num_minus4 + 4);
-
-    instance->POC_lsb = 0;
-    instance->POC_lsb_mod =
-        1 << (instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4 + 4);
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR H264MCS_ProcessNALU( NSWAVC_MCS_t *ainstance, M4OSA_UInt8 *inbuff,
-                               M4OSA_Int32 inbuf_size, M4OSA_UInt8 *outbuff,
-                               M4OSA_Int32 *outbuf_size )
-{
-    ComBitStreamMCS_t *p_bs, bs;
-    NSWAVC_MCS_t *instance;
-    M4OSA_UInt8 nalu_info;
-    M4OSA_Int32 forbidden_bit, nal_ref_idc, nal_unit_type;
-    M4OSA_Int32 first_mb_in_slice, slice_type, pic_parameter_set_id, frame_num;
-    M4OSA_Int32 seq_parameter_set_id;
-    M4OSA_UInt8 temp1, temp2, temp3, temp4;
-    M4OSA_Int32 temp_frame_num;
-    M4OSA_Int32 bitstoDiacard, bytes;
-    M4OSA_UInt32 mask_bits = 0xFFFFFFFF;
-    M4OSA_Int32 new_bytes, init_bit_pos;
-    M4OSA_UInt32 nal_size;
-    M4OSA_UInt32 cnt;
-    M4OSA_UInt32 outbuffpos = 0;
-    //#ifndef DGR_FIX // + new
-    M4OSA_UInt32 nal_size_low16, nal_size_high16;
-    //#endif // + end new
-    M4OSA_UInt32 frame_size = 0;
-    M4OSA_UInt32 temp = 0;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt8 *buff;
-
-    p_bs = &bs;
-    instance = (NSWAVC_MCS_t *)ainstance;
-    M4OSA_DEBUG_IF2((M4OSA_NULL == instance), M4ERR_PARAMETER,
-        "H264MCS_ProcessNALU: instance is M4OSA_NULL");
-
-    if( instance->is_done )
-        return err;
-
-    inbuff[0] = 0x00;
-    inbuff[1] = 0x00;
-    inbuff[2] = 0x00;
-    inbuff[3] = 0x01;
-
-
-    while( (M4OSA_Int32)frame_size < inbuf_size )
-    {
-        mask_bits = 0xFFFFFFFF;
-        p_bs->Buffer = (M4OSA_UInt8 *)(inbuff + frame_size);
-
-
-        nalu_info = (unsigned char)p_bs->Buffer[4];
-
-        outbuff[outbuffpos] = p_bs->Buffer[0];
-        outbuff[outbuffpos + 1] = p_bs->Buffer[1];
-        outbuff[outbuffpos + 2] = p_bs->Buffer[2];
-        outbuff[outbuffpos + 3] = p_bs->Buffer[3];
-        outbuff[outbuffpos + 4] = p_bs->Buffer[4];
-
-        p_bs->Buffer = p_bs->Buffer + 5;
-
-        p_bs->bitPos = 0;
-        p_bs->lastTotalBits = 0;
-        p_bs->numBitsInBuffer = ( inbuf_size - frame_size - 5) << 3;
-        p_bs->readableBytesInBuffer = inbuf_size - frame_size - 5;
-
-        p_bs->ui32TempBuff = 0;
-        p_bs->i8BitCnt = 0;
-        p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
-        p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
-
-        H264MCS_getBits(p_bs, 0);
-
-
-
-        nal_size = inbuf_size - frame_size - 4;
-        buff = inbuff + frame_size + 4;
-
-        while( nal_size > 4 )
-        {
-            if( ( buff[0] == 0x00) && (buff[1] == 0x00) && (buff[2] == 0x00)
-                && (buff[3] == 0x01) )
-            {
-                break;
-            }
-            buff = buff + 1;
-            nal_size = nal_size - 1;
-        }
-
-        if( nal_size <= 4 )
-        {
-            nal_size = 0;
-        }
-        nal_size = ( inbuf_size - frame_size - 4) - nal_size;
-
-        //      M4OSA_TRACE1_3("H264MCS_ProcessNALU frame  input buff size = %d  current position
-        //= %d   nal size = %d",
-        //  inbuf_size, frame_size,  nal_size + 4);
-        frame_size += nal_size + 4;
-
-
-
-        forbidden_bit = ( nalu_info >> 7) & 1;
-        nal_ref_idc = ( nalu_info >> 5) & 3;
-        nal_unit_type = (nalu_info) &0x1f;
-
-        if( nal_unit_type == 5 )
-        {
-            /*IDR/PPS Packet - Do nothing*/
-            instance->is_done = 1;
-            return err;
-        }
-
-        NSWAVCMCS_initBitstream(&instance->encbs);
-        instance->encbs.streamBuffer = outbuff + outbuffpos + 5;
-
-        if( nal_unit_type == 8 )
-        {
-            M4OSA_TRACE1_0("H264MCS_ProcessNALU() Error: PPS");
-            return err;
-        }
-
-        if( nal_unit_type == 7 )
-        {
-            /*SPS Packet */
-            M4OSA_TRACE1_0("H264MCS_ProcessNALU() Error: SPS");
-            return 0;
-        }
-
-        if( (nal_unit_type == 5) )
-        {
-            instance->frame_count = 0;
-            instance->POC_lsb = 0;
-        }
-
-        if( (nal_unit_type == 1) )
-        {
-            first_mb_in_slice = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            NSWAVCMCS_uExpVLC(&instance->encbs, first_mb_in_slice);
-
-            slice_type = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            NSWAVCMCS_uExpVLC(&instance->encbs, slice_type);
-
-            pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
-            NSWAVCMCS_uExpVLC(&instance->encbs, pic_parameter_set_id);
-
-            temp = H264MCS_getBits(p_bs,
-                instance->clip_sps.log2_max_frame_num_minus4 + 4);
-            NSWAVCMCS_putBits(&instance->encbs, instance->frame_count,
-                instance->clip_sps.log2_max_frame_num_minus4 + 4);
-
-            // In Baseline Profile: frame_mbs_only_flag should be ON
-
-            if( nal_unit_type == 5 )
-            {
-                temp = H264MCS_DecVLCReadExpGolombCode(p_bs);
-                NSWAVCMCS_uExpVLC(&instance->encbs, temp);
-            }
-
-            if( instance->clip_sps.pic_order_cnt_type == 0 )
-            {
-                temp = H264MCS_getBits(p_bs,
-                    instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4
-                    + 4);
-                NSWAVCMCS_putBits(&instance->encbs, instance->POC_lsb,
-                    instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4 + 4);
-            }
-
-            if( ( instance->clip_sps.pic_order_cnt_type == 1)
-                && (instance->clip_sps.delta_pic_order_always_zero_flag) )
-            {
-                temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
-                NSWAVCMCS_sExpVLC(&instance->encbs, temp);
-            }
-
-            cnt = p_bs->bitPos & 0x7;
-
-            if( cnt )
-            {
-                cnt = 8 - cnt;
-                temp = H264MCS_getBits(p_bs, cnt);
-                NSWAVCMCS_putBits(&instance->encbs, temp, cnt);
-            }
-
-            cnt = p_bs->bitPos >> 3;
-
-            while( cnt < (nal_size - 2) )
-            {
-                temp = H264MCS_getBits(p_bs, 8);
-                NSWAVCMCS_putBits(&instance->encbs, temp, 8);
-                cnt = p_bs->bitPos >> 3;
-            }
-
-            temp = H264MCS_getBits(p_bs, 8);
-
-            if( temp != 0 )
-            {
-                cnt = 0;
-
-                while( ( temp & 0x1) == 0 )
-                {
-                    cnt++;
-                    temp = temp >> 1;
-                }
-                cnt++;
-                temp = temp >> 1;
-
-                if( 8 - cnt )
-                {
-                    NSWAVCMCS_putBits(&instance->encbs, temp, (8 - cnt));
-                }
-
-                NSWAVCMCS_putRbspTbits(&instance->encbs);
-            }
-            else
-            {
-                if( instance->encbs.bitPos % 8 )
-                {
-                    NSWAVCMCS_putBits(&instance->encbs, 0,
-                        (8 - instance->encbs.bitPos % 8));
-                }
-            }
-
-            temp = instance->encbs.byteCnt;
-            temp = temp + 1;
-
-            outbuff[outbuffpos] = (M4OSA_UInt8)(( temp >> 24) & 0xFF);
-            outbuff[outbuffpos + 1] = (M4OSA_UInt8)(( temp >> 16) & 0xFF);
-            outbuff[outbuffpos + 2] = (M4OSA_UInt8)(( temp >> 8) & 0xFF);
-            outbuff[outbuffpos + 3] = (M4OSA_UInt8)((temp) &0xFF);
-            outbuffpos = outbuffpos + temp + 4;
-        }
-        else
-        {
-            p_bs->Buffer = p_bs->Buffer - 5;
-            memcpy((void *) &outbuff[outbuffpos],
-                (void *)p_bs->Buffer, nal_size + 4);
-
-            outbuff[outbuffpos] = (M4OSA_UInt8)((nal_size >> 24)& 0xFF);
-        outbuff[outbuffpos + 1] = (M4OSA_UInt8)((nal_size >> 16)& 0xFF);;
-        outbuff[outbuffpos + 2] = (M4OSA_UInt8)((nal_size >> 8)& 0xFF);;
-        outbuff[outbuffpos + 3] = (M4OSA_UInt8)((nal_size)& 0xFF);;
-
-            outbuffpos = outbuffpos + nal_size + 4;
-        }
-    }
-
-    *outbuf_size = outbuffpos;
-
-    instance->POC_lsb = instance->POC_lsb + 1;
-
-    if( instance->POC_lsb == instance->POC_lsb_mod )
-    {
-        instance->POC_lsb = 0;
-    }
-    instance->frame_count = instance->frame_count + 1;
-
-    if( instance->frame_count == instance->frame_mod_count )
-    {
-        instance->frame_count = 0;
-    }
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR   M4MCS_convetFromByteStreamtoNALStream(  M4OSA_UInt8 *inbuff,
-                                                    M4OSA_UInt32 inbuf_size )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt32 framesize = 0;
-    M4OSA_UInt32 nal_size =0;
-    M4OSA_UInt8 *buff;
-
-
-    while(framesize < inbuf_size)
-    {
-            nal_size = inbuf_size - framesize - 4;
-            buff =  inbuff + framesize + 4;
-
-            while(nal_size > 4){
-                if((buff[0] == 0x00) &&
-                (buff[1] == 0x00) &&
-                (buff[2] == 0x00) &&
-                (buff[3] == 0x01)){
-                    break;
-                }
-                buff = buff + 1;
-                nal_size = nal_size -1;
-            }
-
-            if(nal_size <= 4){
-                nal_size = 0;
-            }
-            nal_size = (inbuf_size - framesize - 4) - nal_size;
-
-        inbuff[framesize + 0]  = (M4OSA_UInt8)((nal_size >> 24)& 0xFF);
-        inbuff[framesize + 1]  = (M4OSA_UInt8)((nal_size >> 16)& 0xFF);
-        inbuff[framesize + 2]  = (M4OSA_UInt8)((nal_size >> 8)& 0xFF);
-        inbuff[framesize + 3]  = (M4OSA_UInt8)((nal_size )& 0xFF);
-        framesize += nal_size + 4;
-
-        M4OSA_TRACE1_2("M4MCS_convetFromByteStreamtoNALStream framesize = %x nalsize = %x",
-            framesize, nal_size)
-    }
-
-    return  err;
-}
-
-
-M4OSA_ERR H264MCS_Freeinstance( NSWAVC_MCS_t *instance )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_DEBUG_IF2((M4OSA_NULL == instance), M4ERR_PARAMETER,
-        "H264MCS_Freeinstance: instance is M4OSA_NULL");
-
-    if( M4OSA_NULL != instance->encoder_pps.slice_group_id )
-    {
-        free(instance->encoder_pps.slice_group_id);
-    }
-
-    if( M4OSA_NULL != instance->p_encoder_sps )
-    {
-        free(instance->p_encoder_sps);
-        instance->p_encoder_sps = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != instance->p_encoder_pps )
-    {
-        free(instance->p_encoder_pps);
-        instance->p_encoder_pps = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != instance->m_pFinalDSI )
-    {
-        free(instance->m_pFinalDSI);
-        instance->m_pFinalDSI = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != instance )
-    {
-        free(instance);
-        instance = M4OSA_NULL;
-    }
-
-    return err;
-}
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_getVersion(M4_VersionInfo* pVersionInfo);
- * @brief    Get the MCS version.
- * @note Can be called anytime. Do not need any context.
- * @param    pVersionInfo        (OUT) Pointer to a version info structure
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_getVersion( M4_VersionInfo *pVersionInfo )
-{
-    M4OSA_TRACE3_1("M4MCS_getVersion called with pVersionInfo=0x%x",
-        pVersionInfo);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pVersionInfo), M4ERR_PARAMETER,
-        "M4MCS_getVersion: pVersionInfo is M4OSA_NULL");
-
-    pVersionInfo->m_major = M4MCS_VERSION_MAJOR;
-    pVersionInfo->m_minor = M4MCS_VERSION_MINOR;
-    pVersionInfo->m_revision = M4MCS_VERSION_REVISION;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_getVersion(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * @brief    Initializes the MCS (allocates an execution context).
- * @note
- * @param    pContext            (OUT) Pointer on the MCS context to allocate
- * @param    pFileReadPtrFct     (IN) Pointer to OSAL file reader functions
- * @param    pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (If Debug Level >= 2)
- * @return   M4ERR_ALLOC:        There is no more available memory
- ******************************************************************************
- */
-
-M4OSA_ERR M4MCS_init( M4MCS_Context *pContext,
-                     M4OSA_FileReadPointer *pFileReadPtrFct,
-                     M4OSA_FileWriterPointer *pFileWritePtrFct )
-{
-    M4MCS_InternalContext *pC = M4OSA_NULL;
-    M4OSA_ERR err;
-
-    M4OSA_TRACE3_3(
-        "M4MCS_init called with pContext=0x%x, pFileReadPtrFct=0x%x, pFileWritePtrFct=0x%x",
-        pContext, pFileReadPtrFct, pFileWritePtrFct);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4MCS_init: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
-        "M4MCS_init: pFileReadPtrFct is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
-        "M4MCS_init: pFileWritePtrFct is M4OSA_NULL");
-
-    /**
-    * Allocate the MCS context and return it to the user */
-    pC = (M4MCS_InternalContext *)M4OSA_32bitAlignedMalloc(sizeof(M4MCS_InternalContext),
-        M4MCS, (M4OSA_Char *)"M4MCS_InternalContext");
-    *pContext = pC;
-
-    if( M4OSA_NULL == pC )
-    {
-        M4OSA_TRACE1_0(
-            "M4MCS_init(): unable to allocate M4MCS_InternalContext, returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-    /**
-    * Init the context. All pointers must be initialized to M4OSA_NULL
-    * because CleanUp() can be called just after Init(). */
-    pC->State = M4MCS_kState_CREATED;
-    pC->pOsaFileReadPtr = pFileReadPtrFct;
-    pC->pOsaFileWritPtr = pFileWritePtrFct;
-    pC->VideoState = M4MCS_kStreamState_NOSTREAM;
-    pC->AudioState = M4MCS_kStreamState_NOSTREAM;
-    pC->noaudio = M4OSA_FALSE;
-    pC->novideo = M4OSA_FALSE;
-    pC->uiProgress = 0;
-
-    /**
-    * Reader stuff */
-    pC->pInputFile = M4OSA_NULL;
-    pC->InputFileType = M4VIDEOEDITING_kFileType_Unsupported;
-    pC->bFileOpenedInFastMode = M4OSA_FALSE;
-    pC->pReaderContext = M4OSA_NULL;
-    pC->pReaderVideoStream = M4OSA_NULL;
-    pC->pReaderAudioStream = M4OSA_NULL;
-    pC->bUnsupportedVideoFound = M4OSA_FALSE;
-    pC->bUnsupportedAudioFound = M4OSA_FALSE;
-    pC->iAudioCtsOffset = 0;
-    /* First temporary video AU to have more precise end video cut*/
-    pC->ReaderVideoAU1.m_structSize = 0;
-    /* Second temporary video AU to have more precise end video cut*/
-    pC->ReaderVideoAU2.m_structSize = 0;
-    pC->ReaderAudioAU1.m_structSize = 0;
-    pC->ReaderAudioAU2.m_structSize = 0;
-    pC->m_audioAUDuration = 0;
-    pC->m_pDataAddress1 = M4OSA_NULL;
-    pC->m_pDataAddress2 = M4OSA_NULL;
-    /* First temporary video AU data to have more precise end video cut*/
-    pC->m_pDataVideoAddress1 = M4OSA_NULL;
-    /* Second temporary video AU data to have more precise end video cut*/
-    pC->m_pDataVideoAddress2 = M4OSA_NULL;
-
-    /**
-    * Video decoder stuff */
-    pC->pViDecCtxt = M4OSA_NULL;
-    pC->dViDecStartingCts = 0.0;
-    pC->iVideoBeginDecIncr = 0;
-    pC->dViDecCurrentCts = 0.0;
-    pC->dCtsIncrement = 0.0;
-    pC->isRenderDup = M4OSA_FALSE;
-
-    /**
-    * Video encoder stuff */
-    pC->pViEncCtxt = M4OSA_NULL;
-    pC->pPreResizeFrame = M4OSA_NULL;
-    pC->uiEncVideoBitrate = 0;
-    pC->encoderState = M4MCS_kNoEncoder;
-
-    /**
-    * Audio decoder stuff */
-    pC->pAudioDecCtxt = M4OSA_NULL;
-    pC->AudioDecBufferIn.m_dataAddress = M4OSA_NULL;
-    pC->AudioDecBufferIn.m_bufferSize = 0;
-    pC->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
-    pC->AudioDecBufferOut.m_bufferSize = 0;
-    pC->pPosInDecBufferOut = M4OSA_NULL;
-    /**
-    * Ssrc stuff */
-    pC->pSsrcBufferIn = M4OSA_NULL;
-    pC->pSsrcBufferOut = M4OSA_NULL;
-    pC->pPosInSsrcBufferIn = M4OSA_NULL;
-    pC->pPosInSsrcBufferOut = M4OSA_NULL;
-    pC->iSsrcNbSamplIn = 0;
-    pC->iSsrcNbSamplOut = 0;
-    pC->SsrcScratch = M4OSA_NULL;
-    pC->pLVAudioResampler = M4OSA_NULL;
-    /**
-    * Audio encoder */
-    pC->pAudioEncCtxt = M4OSA_NULL;
-    pC->pAudioEncDSI.infoSize = 0;
-    pC->pAudioEncDSI.pInfo = M4OSA_NULL;
-    pC->pAudioEncoderBuffer = M4OSA_NULL;
-    pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
-    pC->audioEncoderGranularity = 0;
-
-    /**
-    * Writer stuff */
-    pC->pOutputFile = M4OSA_NULL;
-    pC->pTemporaryFile = M4OSA_NULL;
-    pC->pWriterContext = M4OSA_NULL;
-    pC->uiVideoAUCount = 0;
-    pC->uiVideoMaxAuSize = 0;
-    pC->uiVideoMaxChunckSize = 0;
-    pC->uiAudioAUCount = 0;
-    pC->uiAudioMaxAuSize = 0;
-
-    pC->uiAudioCts = 0;
-    pC->b_isRawWriter = M4OSA_FALSE;
-    pC->pOutputPCMfile = M4OSA_NULL;
-
-    /* Encoding config */
-    pC->EncodingVideoFormat = M4ENCODER_kNULL; /**< No format set yet */
-    pC->EncodingWidth = 0;                     /**< No size set yet */
-    pC->EncodingHeight = 0;                    /**< No size set yet */
-    pC->EncodingVideoFramerate = 0;            /**< No framerate set yet */
-
-    pC->uiBeginCutTime = 0;                    /**< No begin cut */
-    pC->uiEndCutTime = 0;                      /**< No end cut */
-    pC->uiMaxFileSize = 0;                     /**< No limit */
-    pC->uiAudioBitrate =
-        M4VIDEOEDITING_kUndefinedBitrate; /**< No bitrate set yet */
-    pC->uiVideoBitrate =
-        M4VIDEOEDITING_kUndefinedBitrate; /**< No bitrate set yet */
-
-    pC->WriterVideoStream.streamType = M4SYS_kVideoUnknown;
-    pC->WriterVideoStreamInfo.Header.pBuf = M4OSA_NULL;
-    pC->WriterAudioStream.streamType = M4SYS_kAudioUnknown;
-
-    pC->outputVideoTimescale = 0;
-
-    /*FB 2008/10/20: add media rendering parameter and AIR context to keep media aspect ratio*/
-    pC->MediaRendering = M4MCS_kResizing;
-    pC->m_air_context = M4OSA_NULL;
-    /**/
-
-    /**
-    * FlB 2009.03.04: add audio Effects*/
-    pC->pEffects = M4OSA_NULL;
-    pC->nbEffects = 0;
-    pC->pActiveEffectNumber = -1;
-    /**/
-
-    /*
-    * Reset pointers for media and codecs interfaces */
-    err = M4MCS_clearInterfaceTables(pC);
-    M4ERR_CHECK_RETURN(err);
-
-    /*
-    *  Call the media and codecs subscription module */
-    err = M4MCS_subscribeMediaAndCodec(pC);
-    M4ERR_CHECK_RETURN(err);
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-    /**
-    * Initialize the Still picture part of MCS*/
-
-    err = M4MCS_stillPicInit(pC, pFileReadPtrFct, pFileWritePtrFct);
-    M4ERR_CHECK_RETURN(err);
-
-    pC->m_bIsStillPicture = M4OSA_FALSE;
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    pC->m_pInstance = M4OSA_NULL;
-    pC->H264MCSTempBuffer = M4OSA_NULL;
-    pC->H264MCSTempBufferSize = 0;
-    pC->H264MCSTempBufferDataSize = 0;
-    pC->bH264Trim = M4OSA_FALSE;
-
-    /* Flag to get the last decoded frame cts */
-    pC->bLastDecodedFrameCTS = M4OSA_FALSE;
-
-    if( pC->m_pInstance == M4OSA_NULL )
-    {
-        err = H264MCS_Getinstance(&pC->m_pInstance);
-    }
-    pC->bExtOMXAudDecoder = M4OSA_FALSE;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_init(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_open(M4MCS_Context pContext, M4OSA_Void* pFileIn,
- *                         M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
- * @brief   Set the MCS input and output files.
- * @note    It opens the input file, but the output file is not created yet.
- * @param   pContext            (IN) MCS context
- * @param   pFileIn             (IN) Input file to transcode (The type of this parameter
- *                                 (URL, pipe...) depends on the OSAL implementation).
- * @param   mediaType           (IN) Container type (.3gp,.amr,mp3 ...) of input file.
- * @param   pFileOut            (IN) Output file to create  (The type of this parameter
- *                                    (URL, pipe...) depends on the OSAL implementation).
- * @param   pTempFile           (IN) Temporary file for the constant memory writer to
- *                                     store metadata ("moov.bin").
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4ERR_ALLOC:        There is no more available memory
- * @return  M4ERR_FILE_NOT_FOUND:   The input file has not been found
- * @return  M4MCS_ERR_INVALID_INPUT_FILE:   The input file is not a valid file, or is corrupted
- * @return  M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM:  The input file contains no
- *                                supported audio or video stream
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_open( M4MCS_Context pContext, M4OSA_Void *pFileIn,
-                     M4VIDEOEDITING_FileType InputFileType, M4OSA_Void *pFileOut,
-                     M4OSA_Void *pTempFile )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-    M4OSA_ERR err;
-
-    M4READER_MediaFamily mediaFamily;
-    M4_StreamHandler *pStreamHandler;
-
-    M4OSA_TRACE2_3(
-        "M4MCS_open called with pContext=0x%x, pFileIn=0x%x, pFileOut=0x%x",
-        pContext, pFileIn, pFileOut);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4MCS_open: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileIn), M4ERR_PARAMETER,
-        "M4MCS_open: pFileIn is M4OSA_NULL");
-
-    if( ( InputFileType == M4VIDEOEDITING_kFileType_JPG)
-        || (InputFileType == M4VIDEOEDITING_kFileType_PNG)
-        || (InputFileType == M4VIDEOEDITING_kFileType_GIF)
-        || (InputFileType == M4VIDEOEDITING_kFileType_BMP) )
-    {
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-        /**
-        * Indicate that we must use the still picture functions*/
-
-        pC->m_bIsStillPicture = M4OSA_TRUE;
-
-        /**
-        * Call the still picture MCS functions*/
-        return M4MCS_stillPicOpen(pC, pFileIn, InputFileType, pFileOut);
-
-#else
-
-        M4OSA_TRACE1_0(
-            "M4MCS_open: Still picture is not supported with this version of MCS");
-        return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    }
-
-    /**
-    * Check state automaton */
-    if( M4MCS_kState_CREATED != pC->State )
-    {
-        M4OSA_TRACE1_1("M4MCS_open(): Wrong State (%d), returning M4ERR_STATE",
-            pC->State);
-        return M4ERR_STATE;
-    }
-
-    /* Copy function input parameters into our context */
-    pC->pInputFile = pFileIn;
-    pC->InputFileType = InputFileType;
-    pC->pOutputFile = pFileOut;
-    pC->pTemporaryFile = pTempFile;
-    pC->uiProgress = 0;
-
-    /***********************************/
-    /* Open input file with the reader */
-    /***********************************/
-
-    err = M4MCS_setCurrentReader(pContext, pC->InputFileType);
-    M4ERR_CHECK_RETURN(err);
-
-    /**
-    * Reset reader related variables */
-    pC->VideoState = M4MCS_kStreamState_NOSTREAM;
-    pC->AudioState = M4MCS_kStreamState_NOSTREAM;
-    pC->pReaderVideoStream = M4OSA_NULL;
-    pC->pReaderAudioStream = M4OSA_NULL;
-
-    /*******************************************************/
-    /* Initializes the reader shell and open the data file */
-    /*******************************************************/
-    err = pC->m_pReader->m_pFctCreate(&pC->pReaderContext);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1("M4MCS_open(): m_pReader->m_pFctCreate returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Link the reader interface to the reader context */
-    pC->m_pReaderDataIt->m_readerContext = pC->pReaderContext;
-
-    /**
-    * Set the reader shell file access functions */
-    err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
-        M4READER_kOptionID_SetOsaFileReaderFctsPtr,
-        (M4OSA_DataOption)pC->pOsaFileReadPtr);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1("M4MCS_open(): m_pReader->m_pFctSetOption returns 0x%x",
-            err);
-        return err;
-    }
-
-#ifdef M4MCS_WITH_FAST_OPEN
-
-    if( M4OSA_FALSE == pC->bFileOpenedInFastMode )
-    {
-        M4OSA_Bool trueValue = M4OSA_TRUE;
-
-        /* For first call use fast open mode */
-        err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
-            M4READER_3GP_kOptionID_FastOpenMode, &trueValue);
-
-        if( M4NO_ERROR == err )
-        {
-            pC->bFileOpenedInFastMode = M4OSA_TRUE;
-        }
-        else
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_open(): M4READER_3GP_kOptionID_FastOpenMode returns 0x%x",
-                err);
-
-            if( ( ( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID) == err)
-                || (( (M4OSA_UInt32)M4ERR_PARAMETER) == err) )
-            {
-                /* Not fatal, some readers may not support fast open mode */
-                pC->bFileOpenedInFastMode = M4OSA_FALSE;
-            }
-            else
-                return err;
-        }
-    }
-    else
-    {
-        M4OSA_Bool falseValue = M4OSA_FALSE;
-
-        /* For second call use normal open mode */
-        err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
-            M4READER_3GP_kOptionID_FastOpenMode, &falseValue);
-    }
-
-#endif /* M4MCS_WITH_FAST_OPEN */
-
-    /**
-    * Open the input file */
-
-    err = pC->m_pReader->m_pFctOpen(pC->pReaderContext, pC->pInputFile);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_UInt32 uiDummy, uiCoreId;
-        M4OSA_TRACE1_1("M4MCS_open(): m_pReader->m_pFctOpen returns 0x%x", err);
-
-        /**
-        * If the error is from the core reader, we change it to a public VXS error */
-        M4OSA_ERR_SPLIT(err, uiDummy, uiCoreId, uiDummy);
-
-        if( M4MP4_READER == uiCoreId )
-        {
-            M4OSA_TRACE1_0(
-                "M4MCS_open(): returning M4MCS_ERR_INVALID_INPUT_FILE");
-            return M4MCS_ERR_INVALID_INPUT_FILE;
-        }
-        return err;
-    }
-
-    /**
-    * Get the streams from the input file */
-    while( M4NO_ERROR == err )
-    {
-        err =
-            pC->m_pReader->m_pFctGetNextStream( pC->pReaderContext,
-                                                &mediaFamily,
-                                                &pStreamHandler);
-
-        /**
-        * In case we found a BIFS stream or something else...*/
-        if( ( err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE))
-            || (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)) )
-        {
-            err = M4NO_ERROR;
-            continue;
-        }
-
-        if( M4NO_ERROR == err ) /**< One stream found */
-        {
-            /**
-            * Found the first video stream */
-            if( ( M4READER_kMediaFamilyVideo == mediaFamily)
-                && (M4OSA_NULL == pC->pReaderVideoStream) )
-            {
-                if( ( M4DA_StreamTypeVideoH263 == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeVideoMpeg4
-                    == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeVideoMpeg4Avc
-                    == pStreamHandler->m_streamType) )
-                {
-                    M4OSA_TRACE3_0(
-                        "M4MCS_open(): Found a H263 or MPEG-4 video stream in input 3gpp clip");
-
-                    /**
-                    * Keep pointer to the video stream */
-                    pC->pReaderVideoStream =
-                        (M4_VideoStreamHandler *)pStreamHandler;
-                    pC->bUnsupportedVideoFound = M4OSA_FALSE;
-                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
-
-                    /**
-                    * Init our video stream state variable */
-                    pC->VideoState = M4MCS_kStreamState_STARTED;
-
-                    /**
-                    * Reset the stream reader */
-                    err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
-                        (M4_StreamHandler *)pC->pReaderVideoStream);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4MCS_open():\
-                            m_pReader->m_pFctReset(video) returns 0x%x",
-                            err);
-                        return err;
-                    }
-
-                    /**
-                    * Initializes an access Unit */
-                    err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
-                        pStreamHandler, &pC->ReaderVideoAU);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4MCS_open():\
-                            m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-                else /**< Not H263 or MPEG-4 (H264, etc.) */
-                {
-                    M4OSA_TRACE1_1("M4MCS_open(): Found an unsupported video stream (0x%x) in\
-                                   input 3gpp clip",
-                                   pStreamHandler->m_streamType);
-
-                    pC->bUnsupportedVideoFound = M4OSA_TRUE;
-                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
-                }
-                /* +CRLV6775 -H.264 Trimming */
-                if( M4DA_StreamTypeVideoMpeg4Avc
-                    == pStreamHandler->m_streamType )
-                {
-
-                    // SPS and PPS are storead as per the 3gp file format
-                    pC->m_pInstance->m_pDecoderSpecificInfo =
-                        pStreamHandler->m_pH264DecoderSpecificInfo;
-                    pC->m_pInstance->m_decoderSpecificInfoSize =
-                        pStreamHandler->m_H264decoderSpecificInfoSize;
-                }
-                /* -CRLV6775 -H.264 Trimming */
-            }
-            /**
-            * Found the first audio stream */
-            else if( ( M4READER_kMediaFamilyAudio == mediaFamily)
-                && (M4OSA_NULL == pC->pReaderAudioStream) )
-            {
-                if( ( M4DA_StreamTypeAudioAmrNarrowBand
-                    == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeAudioAac == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeAudioMp3
-                    == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeAudioEvrc
-                    == pStreamHandler->m_streamType) )
-                {
-                    M4OSA_TRACE3_0(
-                        "M4MCS_open(): Found an AMR-NB, AAC or MP3 audio stream in input clip");
-
-                    /**
-                    * Keep pointer to the audio stream */
-                    pC->pReaderAudioStream =
-                        (M4_AudioStreamHandler *)pStreamHandler;
-                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
-                    pC->bUnsupportedAudioFound = M4OSA_FALSE;
-
-                    /**
-                    * Init our audio stream state variable */
-                    pC->AudioState = M4MCS_kStreamState_STARTED;
-
-                    /**
-                    * Reset the stream reader */
-                    err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
-                        (M4_StreamHandler *)pC->pReaderAudioStream);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4MCS_open():\
-                            m_pReader->m_pFctReset(audio) returns 0x%x",
-                            err);
-                        return err;
-                    }
-
-                    /**
-                    * Initializes an access Unit */
-                    err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
-                        pStreamHandler, &pC->ReaderAudioAU);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4MCS_open():\
-                            m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
-                            err);
-                        return err;
-                    }
-
-                    /**
-                    * Output max AU size is equal to input max AU size (this value
-                    * will be changed if there is audio transcoding) */
-                    pC->uiAudioMaxAuSize = pStreamHandler->m_maxAUSize;
-                }
-                else
-                {
-                    /**< Not AMR-NB, AAC, MP3 nor EVRC (AMR-WB, WAV...) */
-                    M4OSA_TRACE1_1("M4MCS_open(): Found an unsupported audio stream (0x%x) in \
-                                   input 3gpp clip", pStreamHandler->m_streamType);
-
-                    pC->bUnsupportedAudioFound = M4OSA_TRUE;
-                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
-                }
-            }
-        }
-    } /**< end of while (M4NO_ERROR == err) */
-
-    /**
-    * Check we found at least one supported stream */
-    if( ( M4OSA_NULL == pC->pReaderVideoStream)
-        && (M4OSA_NULL == pC->pReaderAudioStream) )
-    {
-        M4OSA_TRACE1_0(
-            "M4MCS_open(): returning M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM");
-        return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
-    }
-
-    if( pC->VideoState == M4MCS_kStreamState_STARTED )
-    {
-        err = M4MCS_setCurrentVideoDecoder(pContext,
-            pC->pReaderVideoStream->m_basicProperties.m_streamType);
-        /*FB 2009-02-09: the error is check and returned only if video codecs are compiled,
-        else only audio is used, that is why the editing process can continue*/
-#ifndef M4MCS_AUDIOONLY
-
-        M4ERR_CHECK_RETURN(err);
-
-#else
-
-        if( ( M4NO_ERROR != err) && (M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED != err) )
-        {
-            M4ERR_CHECK_RETURN(err);
-        }
-
-#endif /*M4MCS_AUDIOONLY*/
-
-    }
-
-    if( pC->AudioState == M4MCS_kStreamState_STARTED )
-    {
-        //EVRC
-        if( M4DA_StreamTypeAudioEvrc
-            != pStreamHandler->
-            m_streamType ) /* decoder not supported yet, but allow to do null encoding */
-        {
-            err = M4MCS_setCurrentAudioDecoder(pContext,
-                pC->pReaderAudioStream->m_basicProperties.m_streamType);
-            M4ERR_CHECK_RETURN(err);
-        }
-    }
-
-    /**
-    * Get the audio and video stream properties */
-    err = M4MCS_intGetInputClipProperties(pC);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_open(): M4MCS_intGetInputClipProperties returns 0x%x", err);
-        return err;
-    }
-
-    /**
-    * Set the begin cut decoding increment according to the input frame rate */
-    if( 0. != pC->InputFileProperties.fAverageFrameRate ) /**< sanity check */
-    {
-        pC->iVideoBeginDecIncr = (M4OSA_Int32)(3000.
-            / pC->InputFileProperties.
-            fAverageFrameRate); /**< about 3 frames */
-    }
-    else
-    {
-        pC->iVideoBeginDecIncr =
-            200; /**< default value: 200 milliseconds (3 frames @ 15fps)*/
-    }
-
-    /**
-    * Update state automaton */
-    pC->State = M4MCS_kState_OPENED;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_open(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_step(M4MCS_Context pContext, M4OSA_UInt8 *pProgress);
- * @brief   Perform one step of trancoding.
- * @note
- * @param   pContext            (IN) MCS context
- * @param   pProgress           (OUT) Progress percentage (0 to 100) of the transcoding
- * @note    pProgress must be a valid address.
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    One of the parameters is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4MCS_WAR_TRANSCODING_DONE: Transcoding is over, user should now call M4MCS_close()
- * @return  M4MCS_ERR_AUDIO_CONVERSION_FAILED: The audio conversion (AAC to AMR-NB or MP3) failed
- * @return  M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY: The input file contains an AAC audio track
- *                                 with an invalid sampling frequency (should never happen)
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_step( M4MCS_Context pContext, M4OSA_UInt8 *pProgress )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-
-    M4OSA_TRACE3_1("M4MCS_step called with pContext=0x%x", pContext);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4MCS_step: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pProgress), M4ERR_PARAMETER,
-        "M4MCS_step: pProgress is M4OSA_NULL");
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-
-    if( pC->m_bIsStillPicture )
-    {
-        /**
-        * Call the still picture MCS functions*/
-        return M4MCS_stillPicStep(pC, pProgress);
-    }
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    /**
-    * Check state automaton */
-
-    switch( pC->State )
-    {
-        case M4MCS_kState_READY:
-            *pProgress = 0;
-            return M4MCS_intStepSet(pC);
-            break;
-
-        case M4MCS_kState_BEGINVIDEOJUMP:
-            *pProgress = pC->uiProgress;
-            return M4MCS_intStepBeginVideoJump(pC);
-            break;
-
-        case M4MCS_kState_BEGINVIDEODECODE:
-            *pProgress = pC->uiProgress;
-            return M4MCS_intStepBeginVideoDecode(pC);
-            break;
-
-        case M4MCS_kState_PROCESSING:
-            {
-                M4OSA_ERR err = M4NO_ERROR;
-                err = M4MCS_intStepEncoding(pC, pProgress);
-                /* Save progress info in case of pause */
-                pC->uiProgress = *pProgress;
-                return err;
-            }
-            break;
-
-        default: /**< State error */
-            M4OSA_TRACE1_1(
-                "M4MCS_step(): Wrong State (%d), returning M4ERR_STATE",
-                pC->State);
-            return M4ERR_STATE;
-    }
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_pause(M4MCS_Context pContext);
- * @brief   Pause the transcoding i.e. release the (external hardware) video decoder.
- * @note    This function is not needed if no hardware accelerators are used.
- *          In that case, pausing the MCS is simply achieved by temporarily suspending
- *          the M4MCS_step function calls.
- * @param   pContext            (IN) MCS context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_pause( M4MCS_Context pContext )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-    M4OSA_ERR err;
-
-    M4OSA_TRACE2_1("M4MCS_pause called with pContext=0x%x", pContext);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4MCS_pause: pContext is M4OSA_NULL");
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-
-    if( pC->m_bIsStillPicture )
-    {
-        /**
-        * Call the corresponding still picture MCS function*/
-        return M4MCS_stillPicPause(pC);
-    }
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    /**
-    * Check state automaton */
-
-    switch( pC->State )
-    {
-        case M4MCS_kState_BEGINVIDEOJUMP: /**< the video decoder has been created,
-                                            we must destroy it */
-        case M4MCS_kState_BEGINVIDEODECODE: /**< the video is being used, we must destroy it */
-        case M4MCS_kState_PROCESSING: /**< the video is being used, we must destroy it */
-                    /**< OK, nothing to do here */
-            break;
-
-        default: /**< State error */
-            M4OSA_TRACE1_1(
-                "M4MCS_pause(): Wrong State (%d), returning M4ERR_STATE",
-                pC->State);
-            return M4ERR_STATE;
-    }
-
-    /**
-    * Set the CTS at which we will resume the decoding */
-    if( pC->dViDecCurrentCts > pC->dViDecStartingCts )
-    {
-        /**
-        * We passed the starting CTS, so the resume target is the current CTS */
-        pC->dViDecStartingCts = pC->dViDecCurrentCts;
-    }
-    else {
-        /**
-        * We haven't passed the starting CTS yet, so the resume target is still the starting CTS
-        * --> nothing to do in the else block */
-    }
-
-    /**
-    * Free video decoder stuff */
-    if( M4OSA_NULL != pC->pViDecCtxt )
-    {
-        err = pC->m_pVideoDecoder->m_pFctDestroy(pC->pViDecCtxt);
-        pC->pViDecCtxt = M4OSA_NULL;
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_pause: m_pVideoDecoder->pFctDestroy returns 0x%x", err);
-            return err;
-        }
-    }
-
-    /**
-    * State transition */
-    pC->State = M4MCS_kState_PAUSED;
-
-    M4OSA_TRACE3_0("M4MCS_pause(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_resume(M4MCS_Context pContext);
- * @brief   Resume the transcoding after a pause (see M4MCS_pause).
- * @note    This function is not needed if no hardware accelerators are used.
- *          In that case, resuming the MCS is simply achieved by calling
- *          the M4MCS_step function.
- * @param   pContext            (IN) MCS context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_resume( M4MCS_Context pContext )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-    M4OSA_ERR err;
-
-    M4OSA_TRACE2_1("M4MCS_resume called with pContext=0x%x", pContext);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4MCS_resume: pContext is M4OSA_NULL");
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-
-    if( pC->m_bIsStillPicture )
-    {
-        /**
-        * Call the corresponding still picture MCS function*/
-        return M4MCS_stillPicResume(pC);
-    }
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    /**
-    * Check state automaton */
-
-    switch( pC->State )
-    {
-        case M4MCS_kState_PAUSED: /**< OK, nothing to do here */
-            break;
-
-        default:                  /**< State error */
-            M4OSA_TRACE1_1(
-                "M4MCS_resume(): Wrong State (%d), returning M4ERR_STATE",
-                pC->State);
-            return M4ERR_STATE;
-            break;
-    }
-
-    /**
-    * Prepare the video decoder */
-    err = M4MCS_intPrepareVideoDecoder(pC);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_resume(): M4MCS_intPrepareVideoDecoder() returns 0x%x", err);
-        return err;
-    }
-
-    /**
-    * State transition */
-    if( 0.0 == pC->dViDecStartingCts )
-    {
-        /**
-        * We are still at the beginning of the decoded stream, no need to jump, we can proceed */
-        pC->State = M4MCS_kState_PROCESSING;
-    }
-    else
-    {
-        /**
-        * Jumping */
-        pC->State = M4MCS_kState_BEGINVIDEOJUMP;
-    }
-
-    M4OSA_TRACE3_0("M4MCS_resume(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_close(M4MCS_Context pContext);
- * @brief    Finish the MCS transcoding.
- * @note The output 3GPP file is ready to be played after this call
- * @param    pContext            (IN) MCS context
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
- * @return   M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_close( M4MCS_Context pContext )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-    M4ENCODER_Header *encHeader;
-    M4SYS_StreamIDmemAddr streamHeader;
-
-    M4OSA_ERR err = M4NO_ERROR, err2;
-
-    M4OSA_TRACE2_1("M4MCS_close called with pContext=0x%x", pContext);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4MCS_close: pContext is M4OSA_NULL");
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-
-    if( pC->m_bIsStillPicture )
-    {
-        /**
-        * Indicate that current file is no longer a still picture*/
-        pC->m_bIsStillPicture = M4OSA_FALSE;
-
-        /**
-        * Call the corresponding still picture MCS function*/
-        return M4MCS_stillPicClose(pC);
-    }
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    /**
-    * Check state automaton */
-
-    if( M4MCS_kState_FINISHED != pC->State )
-    {
-        M4OSA_TRACE1_1("M4MCS_close(): Wrong State (%d), returning M4ERR_STATE",
-            pC->State);
-        return M4ERR_STATE;
-    }
-
-    /* Close the encoder before the writer to be certain all the AUs have been written and we can
-    get the DSI. */
-
-    /* Has the encoder actually been started? Don't stop it if that's not the case. */
-    if( M4MCS_kEncoderRunning == pC->encoderState )
-    {
-        if( pC->pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
-        {
-            err = pC->pVideoEncoderGlobalFcts->pFctStop(pC->pViEncCtxt);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_close: pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
-                    err);
-                /* Well... how the heck do you handle a failed cleanup? */
-            }
-        }
-
-        pC->encoderState = M4MCS_kEncoderStopped;
-    }
-
-    /* Has the encoder actually been opened? Don't close it if that's not the case. */
-    if( M4MCS_kEncoderStopped == pC->encoderState )
-    {
-        err = pC->pVideoEncoderGlobalFcts->pFctClose(pC->pViEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_close: pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
-                err);
-            /* Well... how the heck do you handle a failed cleanup? */
-        }
-
-        pC->encoderState = M4MCS_kEncoderClosed;
-    }
-
-    /**********************************/
-    /******** Close the writer ********/
-    /**********************************/
-    if( M4OSA_NULL != pC->pWriterContext ) /* happens in state _SET */
-    {
-        /* HW encoder: fetch the DSI from the shell video encoder, and feed it to the writer before
-        closing it. */
-
-        if( pC->novideo != M4OSA_TRUE )
-        {
-            if( ( M4ENCODER_kMPEG4 == pC->EncodingVideoFormat)
-                || (M4ENCODER_kH264 == pC->EncodingVideoFormat) )
-            {
-                err = pC->pVideoEncoderGlobalFcts->pFctGetOption(pC->pViEncCtxt,
-                    M4ENCODER_kOptionID_EncoderHeader,
-                    (M4OSA_DataOption) &encHeader);
-
-                if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4MCS_close: failed to get the encoder header (err 0x%x)",
-                        err);
-                    /**< no return here, we still have stuff to deallocate after close, even
-                     if it fails. */
-                }
-                else
-                {
-                    /* set this header in the writer */
-                    streamHeader.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
-                    streamHeader.size = encHeader->Size;
-                    streamHeader.addr = (M4OSA_MemAddr32)encHeader->pBuf;
-                }
-
-                M4OSA_TRACE1_0("calling set option");
-                err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
-                    M4WRITER_kDSI, &streamHeader);
-                M4OSA_TRACE1_0("set option done");
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4MCS_close: failed to set the DSI in the writer (err 0x%x)",
-                        err);
-                }
-            }
-
-            if( ( M4OSA_TRUE == pC->bH264Trim)
-                && (M4ENCODER_kNULL == pC->EncodingVideoFormat) )
-            {
-                if(pC->uiBeginCutTime == 0)
-                {
-                    M4OSA_TRACE1_1("Decoder specific info size = %d",
-                        pC->m_pInstance->m_decoderSpecificInfoSize);
-                    pC->m_pInstance->m_pFinalDSISize =
-                        pC->m_pInstance->m_decoderSpecificInfoSize;
-                    M4OSA_TRACE1_1("Decoder specific info pointer = %d",
-                        (M4OSA_MemAddr8)pC->m_pInstance->m_pDecoderSpecificInfo);
-
-                    pC->m_pInstance->m_pFinalDSI =
-                        (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(pC->m_pInstance-> \
-                        m_decoderSpecificInfoSize, M4MCS,
-                        (M4OSA_Char *)"instance->m_pFinalDSI");
-
-                    if( pC->m_pInstance->m_pFinalDSI == M4OSA_NULL )
-                    {
-                        M4OSA_TRACE1_0("instance->m_pFinalDSI: allocation error");
-                        return M4ERR_ALLOC;
-                    }
-                    memcpy((void *)pC->m_pInstance->m_pFinalDSI,
-                        (void *)pC-> \
-                        m_pInstance->m_pDecoderSpecificInfo,
-                        pC->m_pInstance->m_decoderSpecificInfoSize);
-                }
-                streamHeader.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
-                streamHeader.size = pC->m_pInstance->m_pFinalDSISize;
-                streamHeader.addr =
-                    (M4OSA_MemAddr32)pC->m_pInstance->m_pFinalDSI;
-                M4OSA_TRACE1_0("calling set option");
-                err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
-                    M4WRITER_kDSI, &streamHeader);
-                M4OSA_TRACE1_0("set option done");
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4MCS_close: failed to set the DSI in the writer (err 0x%x)",
-                        err);
-                }
-            }
-        }
-        /* Write and close the 3GP output file */
-        err2 = pC->pWriterGlobalFcts->pFctCloseWrite(pC->pWriterContext);
-        pC->pWriterContext = M4OSA_NULL;
-
-        if( M4NO_ERROR != err2 )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_close: pWriterGlobalFcts->pFctCloseWrite returns 0x%x",
-                err2);
-
-            if( M4NO_ERROR == err )
-                err = err2;
-            /**< no return here, we still have stuff to deallocate after close, even if it fails.*/
-        }
-    }
-
-    /* Close output PCM file if needed */
-    if( pC->pOutputPCMfile != M4OSA_NULL )
-    {
-        pC->pOsaFileWritPtr->closeWrite(pC->pOutputPCMfile);
-        pC->pOutputPCMfile = M4OSA_NULL;
-    }
-
-    /*FlB 2009.03.04: add audio effects,
-    free effects list*/
-    if( M4OSA_NULL != pC->pEffects )
-    {
-        free(pC->pEffects);
-        pC->pEffects = M4OSA_NULL;
-    }
-    pC->nbEffects = 0;
-    pC->pActiveEffectNumber = -1;
-
-    /**
-    * State transition */
-    pC->State = M4MCS_kState_CLOSED;
-
-    if( M4OSA_NULL != pC->H264MCSTempBuffer )
-    {
-        free(pC->H264MCSTempBuffer);
-    }
-
-    M4OSA_TRACE3_0("M4MCS_close(): returning M4NO_ERROR");
-    return err;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_cleanUp(M4MCS_Context pContext);
- * @brief    Free all resources used by the MCS.
- * @note The context is no more valid after this call
- * @param    pContext            (IN) MCS context
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
- * @return   M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_cleanUp( M4MCS_Context pContext )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-
-    M4OSA_TRACE3_1("M4MCS_cleanUp called with pContext=0x%x", pContext);
-
-#ifdef MCS_DUMP_PCM_TO_FILE
-
-    if( file_au_reader )
-    {
-        fclose(file_au_reader);
-        file_au_reader = NULL;
-    }
-
-    if( file_pcm_decoder )
-    {
-        fclose(file_pcm_decoder);
-        file_pcm_decoder = NULL;
-    }
-
-    if( file_pcm_encoder )
-    {
-        fclose(file_pcm_encoder);
-        file_pcm_encoder = NULL;
-    }
-
-#endif
-
-    /**
-    * Check input parameter */
-
-    if( M4OSA_NULL == pContext )
-    {
-        M4OSA_TRACE1_0(
-            "M4MCS_cleanUp: pContext is M4OSA_NULL, returning M4ERR_PARAMETER");
-        return M4ERR_PARAMETER;
-    }
-
-    /**
-    * Check state automaton */
-    if( M4MCS_kState_CLOSED != pC->State )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_cleanUp(): Wrong State (%d), returning M4ERR_STATE",
-            pC->State);
-        return M4ERR_STATE;
-    }
-
-    if( M4OSA_NULL != pC->m_pInstance )
-    {
-        err = H264MCS_Freeinstance(pC->m_pInstance);
-        pC->m_pInstance = M4OSA_NULL;
-    }
-
-    /* ----- Free video encoder stuff, if needed ----- */
-
-    if( ( M4OSA_NULL != pC->pViEncCtxt)
-        && (M4OSA_NULL != pC->pVideoEncoderGlobalFcts) )
-    {
-        err = pC->pVideoEncoderGlobalFcts->pFctCleanup(pC->pViEncCtxt);
-        pC->pViEncCtxt = M4OSA_NULL;
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_cleanUp: pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        pC->encoderState = M4MCS_kNoEncoder;
-    }
-
-    /**
-    * In the H263 case, we allocated our own DSI buffer */
-    if( ( M4ENCODER_kH263 == pC->EncodingVideoFormat)
-        && (M4OSA_NULL != pC->WriterVideoStreamInfo.Header.pBuf) )
-    {
-        free(pC->WriterVideoStreamInfo.Header.pBuf);
-        pC->WriterVideoStreamInfo.Header.pBuf = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->pPreResizeFrame )
-    {
-        if( M4OSA_NULL != pC->pPreResizeFrame[0].pac_data )
-        {
-            free(pC->pPreResizeFrame[0].pac_data);
-            pC->pPreResizeFrame[0].pac_data = M4OSA_NULL;
-        }
-
-        if( M4OSA_NULL != pC->pPreResizeFrame[1].pac_data )
-        {
-            free(pC->pPreResizeFrame[1].pac_data);
-            pC->pPreResizeFrame[1].pac_data = M4OSA_NULL;
-        }
-
-        if( M4OSA_NULL != pC->pPreResizeFrame[2].pac_data )
-        {
-            free(pC->pPreResizeFrame[2].pac_data);
-            pC->pPreResizeFrame[2].pac_data = M4OSA_NULL;
-        }
-        free(pC->pPreResizeFrame);
-        pC->pPreResizeFrame = M4OSA_NULL;
-    }
-
-    /* ----- Free the ssrc stuff ----- */
-
-    if( M4OSA_NULL != pC->SsrcScratch )
-    {
-        free(pC->SsrcScratch);
-        pC->SsrcScratch = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->pSsrcBufferIn )
-    {
-        free(pC->pSsrcBufferIn);
-        pC->pSsrcBufferIn = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->pSsrcBufferOut )
-    {
-        free(pC->pSsrcBufferOut);
-        pC->pSsrcBufferOut = M4OSA_NULL;
-    }
-
-    if (pC->pLVAudioResampler != M4OSA_NULL)
-    {
-        LVDestroy(pC->pLVAudioResampler);
-        pC->pLVAudioResampler = M4OSA_NULL;
-    }
-
-    /* ----- Free the audio encoder stuff ----- */
-
-    if( M4OSA_NULL != pC->pAudioEncCtxt )
-    {
-        err = pC->pAudioEncoderGlobalFcts->pFctClose(pC->pAudioEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_cleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        err = pC->pAudioEncoderGlobalFcts->pFctCleanUp(pC->pAudioEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_cleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        pC->pAudioEncCtxt = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->pAudioEncoderBuffer )
-    {
-        free(pC->pAudioEncoderBuffer);
-        pC->pAudioEncoderBuffer = M4OSA_NULL;
-    }
-
-    /* ----- Free all other stuff ----- */
-
-    /**
-    * Free the readers and the decoders */
-    M4MCS_intCleanUp_ReadersDecoders(pC);
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-    /**
-    * Free the still picture resources */
-
-    M4MCS_stillPicCleanUp(pC);
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    /**
-    * Free the shells interfaces */
-
-    M4MCS_unRegisterAllWriters(pContext);
-    M4MCS_unRegisterAllEncoders(pContext);
-    M4MCS_unRegisterAllReaders(pContext);
-    M4MCS_unRegisterAllDecoders(pContext);
-
-    /**
-    * Free the context itself */
-    free(pC);
-    pC = M4OSA_NULL;
-
-    M4OSA_TRACE3_0("M4MCS_cleanUp(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_abort(M4MCS_Context pContext);
- * @brief    Finish the MCS transcoding and free all resources used by the MCS
- *          whatever the state is.
- * @note    The context is no more valid after this call
- * @param    pContext            (IN) MCS context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_abort( M4MCS_Context pContext )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_ERR err1 = M4NO_ERROR;
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-
-    if( M4OSA_NULL == pContext )
-    {
-        return M4NO_ERROR;
-    }
-
-    if( ( pC->State == M4MCS_kState_CREATED)
-        || (pC->State == M4MCS_kState_CLOSED) )
-    {
-        pC->State = M4MCS_kState_CLOSED;
-
-        err = M4MCS_cleanUp(pContext);
-
-        if( err != M4NO_ERROR )
-        {
-            M4OSA_TRACE1_1("M4MCS_abort : M4MCS_cleanUp fails err = 0x%x", err);
-        }
-    }
-    else
-    {
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-
-        if( pC->m_bIsStillPicture )
-        {
-            /**
-            * Cancel the ongoing processes if any*/
-            err = M4MCS_stillPicCancel(pC);
-
-            if( err != M4NO_ERROR )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_abort : M4MCS_stillPicCancel fails err = 0x%x", err);
-            }
-            /*Still picture process is now stopped; Carry on with close and cleanup*/
-        }
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-        pC->State = M4MCS_kState_FINISHED;
-
-        err = M4MCS_close(pContext);
-
-        if( err != M4NO_ERROR )
-        {
-            M4OSA_TRACE1_1("M4MCS_abort : M4MCS_close fails err = 0x%x", err);
-            err1 = err;
-        }
-
-        err = M4MCS_cleanUp(pContext);
-
-        if( err != M4NO_ERROR )
-        {
-            M4OSA_TRACE1_1("M4MCS_abort : M4MCS_cleanUp fails err = 0x%x", err);
-        }
-    }
-    err = (err1 == M4NO_ERROR) ? err : err1;
-    return err;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_getInputFileProperties(M4MCS_Context pContext,
- *                                         M4VIDEOEDITING_ClipProperties* pFileProperties);
- * @brief   Retrieves the properties of the audio and video streams from the input file.
- * @param   pContext            (IN) MCS context
- * @param   pProperties         (OUT) Pointer on an allocated M4VIDEOEDITING_ClipProperties
-structure which is filled with the input stream properties.
- * @note    The structure pProperties must be allocated and further de-allocated
-by the application. The function must be called in the opened state.
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_getInputFileProperties( M4MCS_Context pContext,
-                                       M4VIDEOEDITING_ClipProperties *pFileProperties )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-
-    M4OSA_TRACE2_2("M4MCS_getInputFileProperties called with pContext=0x%x, \
-                   pFileProperties=0x%x", pContext, pFileProperties);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4MCS_getInputFileProperties: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileProperties), M4ERR_PARAMETER,
-        "M4MCS_getInputFileProperties: pProperties is M4OSA_NULL");
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-
-    if( pC->m_bIsStillPicture )
-    {
-        /**
-        * Call the corresponding still picture MCS function*/
-        return M4MCS_stillPicGetInputFileProperties(pC, pFileProperties);
-    }
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    /**
-    * Check state automaton */
-
-    if( M4MCS_kState_OPENED != pC->State )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_getInputFileProperties(): Wrong State (%d), returning M4ERR_STATE",
-            pC->State);
-        return M4ERR_STATE;
-    }
-
-    /**
-    * Copy previously computed properties into given structure */
-    memcpy((void *)pFileProperties,
-        (void *) &pC->InputFileProperties,
-        sizeof(M4VIDEOEDITING_ClipProperties));
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_setOutputParams(M4MCS_Context pContext, M4MCS_OutputParams* pParams);
- * @brief   Set the MCS video output parameters.
- * @note    Must be called after M4MCS_open. Must be called before M4MCS_step.
- * @param   pContext            (IN) MCS context
- * @param   pParams             (IN/OUT) Transcoding parameters
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263 : Output video frame size parameter is
- *                                                        incompatible with H263 encoding
- * @return  M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263 : Output video frame size parameter is
- *                                                        incompatible with H263 encoding
- * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT     : Undefined output video format parameter
- * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE : Undefined output video frame size
- * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE : Undefined output video frame rate
- * @return  M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT : Undefined output audio format parameter
- * @return  M4MCS_ERR_DURATION_IS_NULL : Specified output parameters define a null duration stream
- *                                         (no audio and video)
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_setOutputParams( M4MCS_Context pContext,
-                                M4MCS_OutputParams *pParams )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-    M4OSA_UInt32 uiFrameWidth;
-    M4OSA_UInt32 uiFrameHeight;
-    M4OSA_ERR err;
-
-    M4OSA_TRACE2_2(
-        "M4MCS_setOutputParams called with pContext=0x%x, pParams=0x%x",
-        pContext, pParams);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4MCS_setOutputParams: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams), M4ERR_PARAMETER,
-        "M4MCS_setOutputParams: pParam is M4OSA_NULL");
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-
-    if( pC->m_bIsStillPicture )
-    {
-        /**
-        * Call the corresponding still picture MCS function*/
-        return M4MCS_stillPicSetOutputParams(pC, pParams);
-    }
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    /**
-    * Check state automaton */
-
-    if( M4MCS_kState_OPENED != pC->State )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_setOutputParams(): Wrong State (%d), returning M4ERR_STATE",
-            pC->State);
-        return M4ERR_STATE;
-    }
-
-    /* Ignore audio or video stream if the output do not need it, */
-    /* or if the input file does not have any audio or video stream */
-    /*FlB 26.02.2009: add mp3 as mcs output format*/
-    if( ( pParams->OutputVideoFormat == M4VIDEOEDITING_kNoneVideo)
-        || (pC->VideoState == M4MCS_kStreamState_NOSTREAM)
-        || (pParams->OutputFileType == M4VIDEOEDITING_kFileType_AMR)
-        || (pParams->OutputFileType == M4VIDEOEDITING_kFileType_MP3) )
-    {
-        pC->novideo = M4OSA_TRUE;
-    }
-
-    if( ( pParams->OutputAudioFormat == M4VIDEOEDITING_kNoneAudio)
-        || (pC->AudioState == M4MCS_kStreamState_NOSTREAM) )
-    {
-        pC->noaudio = M4OSA_TRUE;
-    }
-
-    if( pC->noaudio && pC->novideo )
-    {
-        M4OSA_TRACE1_0(
-            "!!! M4MCS_setOutputParams : clip is NULL, there is no audio, no video");
-        return M4MCS_ERR_DURATION_IS_NULL;
-    }
-
-    /* Set writer */
-    err = M4MCS_setCurrentWriter(pContext, pParams->OutputFileType);
-    M4ERR_CHECK_RETURN(err);
-
-    /* Set video parameters */
-    if( pC->novideo == M4OSA_FALSE )
-    {
-        /**
-        * Check Video Format correctness */
-
-        switch( pParams->OutputVideoFormat )
-        {
-            case M4VIDEOEDITING_kH263:
-                if( pParams->OutputFileType == M4VIDEOEDITING_kFileType_MP4 )
-                    return M4MCS_ERR_H263_FORBIDDEN_IN_MP4_FILE;
-
-                pC->EncodingVideoFormat = M4ENCODER_kH263;
-                err = M4MCS_setCurrentVideoEncoder(pContext,
-                    pParams->OutputVideoFormat);
-                M4ERR_CHECK_RETURN(err);
-                break;
-
-            case M4VIDEOEDITING_kMPEG4:
-
-                pC->EncodingVideoFormat = M4ENCODER_kMPEG4;
-                err = M4MCS_setCurrentVideoEncoder(pContext,
-                    pParams->OutputVideoFormat);
-                M4ERR_CHECK_RETURN(err);
-                break;
-
-            case M4VIDEOEDITING_kH264:
-
-                pC->EncodingVideoFormat = M4ENCODER_kH264;
-                err = M4MCS_setCurrentVideoEncoder(pContext,
-                    pParams->OutputVideoFormat);
-                M4ERR_CHECK_RETURN(err);
-                break;
-
-            case M4VIDEOEDITING_kNullVideo:
-                if( ( pParams->OutputFileType == M4VIDEOEDITING_kFileType_MP4)
-                    && (pC->InputFileProperties.VideoStreamType
-                    == M4VIDEOEDITING_kH263) )
-                    return M4MCS_ERR_H263_FORBIDDEN_IN_MP4_FILE;
-
-
-                /* Encoder needed for begin cut to generate an I-frame */
-                pC->EncodingVideoFormat = M4ENCODER_kNULL;
-                err = M4MCS_setCurrentVideoEncoder(pContext,
-                    pC->InputFileProperties.VideoStreamType);
-                M4ERR_CHECK_RETURN(err);
-                break;
-
-            default:
-                M4OSA_TRACE1_1("M4MCS_setOutputParams: Undefined output video format (%d),\
-                               returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT",
-                               pParams->OutputVideoFormat);
-                return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT;
-        }
-
-        /**
-        * Check Video frame size correctness */
-        if( M4VIDEOEDITING_kNullVideo == pParams->OutputVideoFormat )
-        {
-            uiFrameWidth =
-                pC->EncodingWidth = pC->InputFileProperties.uiVideoWidth;
-            uiFrameHeight =
-                pC->EncodingHeight = pC->InputFileProperties.uiVideoHeight;
-
-            /**
-            * Set output video profile and level */
-            pC->encodingVideoProfile = pC->InputFileProperties.uiVideoProfile;
-            /** Set the target video level, because input 3gp file may
-             *  have wrong video level value (some encoders do not respect
-             *  level restrictions like video resolution when content is created).
-             **/
-            pC->encodingVideoLevel = pParams->outputVideoLevel;
-
-            // Clip's original width and height may not be
-            // multiple of 16.
-            // Ensure encoding width and height are multiple of 16
-
-            uint32_t remainder = pC->EncodingWidth % 16;
-            if (remainder != 0) {
-                if (remainder >= 8) {
-                    // Roll forward
-                    pC->EncodingWidth =
-                        pC->EncodingWidth + (16-remainder);
-                } else {
-                    // Roll backward
-                    pC->EncodingWidth =
-                        pC->EncodingWidth - remainder;
-                }
-                uiFrameWidth = pC->EncodingWidth;
-            }
-
-            remainder = pC->EncodingHeight % 16;
-            if (remainder != 0) {
-                if (remainder >= 8) {
-                    // Roll forward
-                    pC->EncodingHeight =
-                        pC->EncodingHeight + (16-remainder);
-                } else {
-                    // Roll backward
-                    pC->EncodingHeight =
-                        pC->EncodingHeight - remainder;
-                }
-                uiFrameHeight = pC->EncodingHeight;
-            }
-
-        }
-        else
-        {
-            /**
-            * Set output video profile and level */
-            pC->encodingVideoProfile = pParams->outputVideoProfile;
-            pC->encodingVideoLevel = pParams->outputVideoLevel;
-
-            switch( pParams->OutputVideoFrameSize )
-            {
-                case M4VIDEOEDITING_kSQCIF:
-                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_SQCIF_Width;
-                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_SQCIF_Height;
-                    break;
-
-                case M4VIDEOEDITING_kQQVGA:
-                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_QQVGA_Width;
-                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_QQVGA_Height;
-                    break;
-
-                case M4VIDEOEDITING_kQCIF:
-                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_QCIF_Width;
-                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_QCIF_Height;
-                    break;
-
-                case M4VIDEOEDITING_kQVGA:
-                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_QVGA_Width;
-                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_QVGA_Height;
-                    break;
-
-                case M4VIDEOEDITING_kCIF:
-                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_CIF_Width;
-                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_CIF_Height;
-                    break;
-
-                case M4VIDEOEDITING_kVGA:
-                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_VGA_Width;
-                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_VGA_Height;
-                    break;
-                    /* +PR LV5807 */
-                case M4VIDEOEDITING_kWVGA:
-                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_WVGA_Width;
-                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_WVGA_Height;
-                    break;
-
-                case M4VIDEOEDITING_kNTSC:
-                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_NTSC_Width;
-                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_NTSC_Height;
-                    break;
-                    /* -PR LV5807*/
-                    /* +CR Google */
-                case M4VIDEOEDITING_k640_360:
-                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_640_360_Width;
-                    uiFrameHeight =
-                        pC->EncodingHeight = M4ENCODER_640_360_Height;
-                    break;
-
-                case M4VIDEOEDITING_k854_480:
-                    uiFrameWidth =
-                        pC->EncodingWidth = M4ENCODER_854_480_Width;
-                    uiFrameHeight =
-                        pC->EncodingHeight = M4ENCODER_854_480_Height;
-                    break;
-
-                case M4VIDEOEDITING_k1280_720:
-                    uiFrameWidth =
-                        pC->EncodingWidth = M4ENCODER_1280_720_Width;
-                    uiFrameHeight =
-                        pC->EncodingHeight = M4ENCODER_1280_720_Height;
-                    break;
-
-                case M4VIDEOEDITING_k1080_720:
-                    uiFrameWidth =
-                        pC->EncodingWidth = M4ENCODER_1080_720_Width;
-                    uiFrameHeight =
-                        pC->EncodingHeight = M4ENCODER_1080_720_Height;
-                    break;
-
-                case M4VIDEOEDITING_k960_720:
-                    uiFrameWidth =
-                        pC->EncodingWidth = M4ENCODER_960_720_Width;
-                    uiFrameHeight =
-                        pC->EncodingHeight = M4ENCODER_960_720_Height;
-                    break;
-
-                case M4VIDEOEDITING_k1920_1080:
-                    uiFrameWidth =
-                        pC->EncodingWidth = M4ENCODER_1920_1080_Width;
-                    uiFrameHeight =
-                        pC->EncodingHeight = M4ENCODER_1920_1080_Height;
-                    break;
-                    /* -CR Google */
-                default:
-                    M4OSA_TRACE1_1(
-                        "M4MCS_setOutputParams: Undefined output video frame size \
-                        (%d), returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE",
-                        pParams->OutputVideoFrameSize);
-                    return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
-            }
-        }
-
-        /**
-        * Compute video max au size and max chunck size.
-        * We do it here because it depends on the frame size only, and
-        * because we need it for the file size/video bitrate estimations */
-        pC->uiVideoMaxAuSize =
-            (M4OSA_UInt32)(1.5F *(M4OSA_Float)(uiFrameWidth * uiFrameHeight) \
-            *M4MCS_VIDEO_MIN_COMPRESSION_RATIO);
-        pC->uiVideoMaxChunckSize = (M4OSA_UInt32)(pC->uiVideoMaxAuSize       \
-            *
-            M4MCS_VIDEO_CHUNK_AU_SIZE_RATIO); /**< from max AU size to max Chunck size */
-
-        if( 0 == pC->uiVideoMaxAuSize )
-        {
-            /* Size may be zero in case of null encoding with unrecognized stream */
-            M4OSA_TRACE1_0("M4MCS_setOutputParams: video frame size is 0 returning\
-                           M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE");
-            return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
-        }
-
-
-        /**
-        * Size check for H263 (only valid sizes are CIF, QCIF and SQCIF) */
-
-        if( M4VIDEOEDITING_kH263 == pParams->OutputVideoFormat )
-        {
-            switch( pParams->OutputVideoFrameSize )
-            {
-                case M4VIDEOEDITING_kSQCIF:
-                case M4VIDEOEDITING_kQCIF:
-                case M4VIDEOEDITING_kCIF:
-                    /* OK */
-                    break;
-
-                default:
-                    M4OSA_TRACE1_0(
-                        "M4MCS_setOutputParams():\
-                        returning M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263");
-                    return M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263;
-            }
-        }
-
-        /**
-        * Check Video Frame rate correctness */
-        if( M4VIDEOEDITING_kNullVideo != pParams->OutputVideoFormat )
-        {
-            switch( pParams->OutputVideoFrameRate )
-            {
-                case M4VIDEOEDITING_k5_FPS:
-                    pC->EncodingVideoFramerate = M4ENCODER_k5_FPS;
-                    break;
-
-                case M4VIDEOEDITING_k7_5_FPS:
-                    pC->EncodingVideoFramerate = M4ENCODER_k7_5_FPS;
-                    break;
-
-                case M4VIDEOEDITING_k10_FPS:
-                    pC->EncodingVideoFramerate = M4ENCODER_k10_FPS;
-                    break;
-
-                case M4VIDEOEDITING_k12_5_FPS:
-                    pC->EncodingVideoFramerate = M4ENCODER_k12_5_FPS;
-                    break;
-
-                case M4VIDEOEDITING_k15_FPS:
-                    pC->EncodingVideoFramerate = M4ENCODER_k15_FPS;
-                    break;
-
-                case M4VIDEOEDITING_k20_FPS:
-                    pC->EncodingVideoFramerate = M4ENCODER_k20_FPS;
-                    break;
-
-                case M4VIDEOEDITING_k25_FPS:
-                    pC->EncodingVideoFramerate = M4ENCODER_k25_FPS;
-                    break;
-
-                case M4VIDEOEDITING_k30_FPS:
-                    pC->EncodingVideoFramerate = M4ENCODER_k30_FPS;
-                    break;
-
-                default:
-                    M4OSA_TRACE1_1(
-                        "M4MCS_setOutputParams: Undefined output video frame rate\
-                        (%d), returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE",
-                        pParams->OutputVideoFrameRate);
-                    return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE;
-            }
-        }
-
-        /**
-        * Frame rate check for H263 (only dividers of 30 fps (29.97 actually)) */
-        if( M4VIDEOEDITING_kH263 == pParams->OutputVideoFormat )
-        {
-            switch( pC->EncodingVideoFramerate )
-            {
-                case M4ENCODER_k5_FPS:
-                case M4ENCODER_k7_5_FPS:
-                case M4ENCODER_k10_FPS:
-                case M4ENCODER_k15_FPS:
-                case M4ENCODER_k30_FPS:
-                    /* OK */
-                    break;
-
-                default:
-                    M4OSA_TRACE1_0(
-                        "M4MCS_setOutputParams():\
-                        returning M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263");
-                    return M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263;
-            }
-        }
-    }
-
-    /* Set audio parameters */
-    if( pC->noaudio == M4OSA_FALSE )
-    {
-        /**
-        * Check Audio Format correctness */
-        switch( pParams->OutputAudioFormat )
-        {
-            case M4VIDEOEDITING_kAMR_NB:
-
-                err = M4MCS_setCurrentAudioEncoder(pContext,
-                    pParams->OutputAudioFormat);
-                M4ERR_CHECK_RETURN(err);
-
-                pC->AudioEncParams.Format = M4ENCODER_kAMRNB;
-                pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
-                pC->AudioEncParams.ChannelNum = M4ENCODER_kMono;
-                pC->AudioEncParams.SpecifParam.AmrSID = M4ENCODER_kAmrNoSID;
-                break;
-
-            case M4VIDEOEDITING_kAAC:
-
-                err = M4MCS_setCurrentAudioEncoder(pContext,
-                    pParams->OutputAudioFormat);
-                M4ERR_CHECK_RETURN(err);
-
-                pC->AudioEncParams.Format = M4ENCODER_kAAC;
-                pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
-
-                switch( pParams->OutputAudioSamplingFrequency )
-                {
-                    case M4VIDEOEDITING_k8000_ASF:
-                        pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
-                        break;
-
-                    case M4VIDEOEDITING_k16000_ASF:
-                        pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
-                        break;
-
-                    case M4VIDEOEDITING_k22050_ASF:
-                        pC->AudioEncParams.Frequency = M4ENCODER_k22050Hz;
-                        break;
-
-                    case M4VIDEOEDITING_k24000_ASF:
-                        pC->AudioEncParams.Frequency = M4ENCODER_k24000Hz;
-                        break;
-
-                    case M4VIDEOEDITING_k32000_ASF:
-                        pC->AudioEncParams.Frequency = M4ENCODER_k32000Hz;
-                        break;
-
-                    case M4VIDEOEDITING_k44100_ASF:
-                        pC->AudioEncParams.Frequency = M4ENCODER_k44100Hz;
-                        break;
-
-                    case M4VIDEOEDITING_k48000_ASF:
-                        pC->AudioEncParams.Frequency = M4ENCODER_k48000Hz;
-                        break;
-
-                    case M4VIDEOEDITING_k11025_ASF:
-                    case M4VIDEOEDITING_k12000_ASF:
-                    case M4VIDEOEDITING_kDefault_ASF:
-                        break;
-                }
-                    pC->AudioEncParams.ChannelNum =
-                        (pParams->bAudioMono == M4OSA_TRUE) ? \
-                        M4ENCODER_kMono : M4ENCODER_kStereo;
-                    pC->AudioEncParams.SpecifParam.AacParam.Regulation =
-                        M4ENCODER_kAacRegulNone; //M4ENCODER_kAacBitReservoir
-                    /* unused */
-                    pC->AudioEncParams.SpecifParam.AacParam.bIS = M4OSA_FALSE;
-                    pC->AudioEncParams.SpecifParam.AacParam.bMS = M4OSA_FALSE;
-                    pC->AudioEncParams.SpecifParam.AacParam.bPNS = M4OSA_FALSE;
-                    pC->AudioEncParams.SpecifParam.AacParam.bTNS = M4OSA_FALSE;
-                    /* TODO change into highspeed asap */
-                    pC->AudioEncParams.SpecifParam.AacParam.bHighSpeed =
-                        M4OSA_FALSE;
-                    break;
-
-                    /*FlB 26.02.2009: add mp3 as mcs output format, add mp3 encoder*/
-                case M4VIDEOEDITING_kMP3:
-                    err = M4MCS_setCurrentAudioEncoder(pContext,
-                        pParams->OutputAudioFormat);
-                    M4ERR_CHECK_RETURN(err);
-
-                    pC->AudioEncParams.Format = M4ENCODER_kMP3;
-                    pC->AudioEncParams.ChannelNum =
-                        (pParams->bAudioMono == M4OSA_TRUE) ? \
-                        M4ENCODER_kMono : M4ENCODER_kStereo;
-
-                    pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
-
-                    switch( pParams->OutputAudioSamplingFrequency )
-                    {
-                        case M4VIDEOEDITING_k8000_ASF:
-                            pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
-                            break;
-
-                        case M4VIDEOEDITING_k11025_ASF:
-                            pC->AudioEncParams.Frequency = M4ENCODER_k11025Hz;
-                            break;
-
-                        case M4VIDEOEDITING_k12000_ASF:
-                            pC->AudioEncParams.Frequency = M4ENCODER_k12000Hz;
-                            break;
-
-                        case M4VIDEOEDITING_k16000_ASF:
-                            pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
-                            break;
-
-                        case M4VIDEOEDITING_k22050_ASF:
-                            pC->AudioEncParams.Frequency = M4ENCODER_k22050Hz;
-                            break;
-
-                        case M4VIDEOEDITING_k24000_ASF:
-                            pC->AudioEncParams.Frequency = M4ENCODER_k24000Hz;
-                            break;
-
-                        case M4VIDEOEDITING_k32000_ASF:
-                            pC->AudioEncParams.Frequency = M4ENCODER_k32000Hz;
-                            break;
-
-                        case M4VIDEOEDITING_k44100_ASF:
-                            pC->AudioEncParams.Frequency = M4ENCODER_k44100Hz;
-                            break;
-
-                        case M4VIDEOEDITING_k48000_ASF:
-                            pC->AudioEncParams.Frequency = M4ENCODER_k48000Hz;
-                            break;
-
-                        case M4VIDEOEDITING_kDefault_ASF:
-                            break;
-                    }
-
-                    break;
-
-                case M4VIDEOEDITING_kNullAudio:
-                    if( pParams->pEffects == M4OSA_NULL || pParams->nbEffects == 0 )
-                    {
-                        /* no encoder needed */
-                        pC->AudioEncParams.Format = M4ENCODER_kAudioNULL;
-                        pC->AudioEncParams.Frequency =
-                            pC->pReaderAudioStream->m_samplingFrequency;
-                        pC->AudioEncParams.ChannelNum =
-                            (pC->pReaderAudioStream->m_nbChannels == 1) ? \
-                            M4ENCODER_kMono : M4ENCODER_kStereo;
-                    }
-                    else
-                    {
-                        pC->AudioEncParams.Frequency =
-                            pC->pReaderAudioStream->m_samplingFrequency;
-                        pC->AudioEncParams.ChannelNum =
-                            (pC->pReaderAudioStream->m_nbChannels == 1) ? \
-                            M4ENCODER_kMono : M4ENCODER_kStereo;
-
-                        switch( pC->InputFileProperties.AudioStreamType )
-                        {
-                            case M4VIDEOEDITING_kAMR_NB:
-                                M4OSA_TRACE3_0(
-                                    "M4MCS_setOutputParams calling \
-                                    M4MCS_setCurrentAudioEncoder M4VIDEOEDITING_kNull, AMR");
-                                err = M4MCS_setCurrentAudioEncoder(pContext,
-                                    pC->InputFileProperties.AudioStreamType);
-                                M4ERR_CHECK_RETURN(err);
-
-                                pC->AudioEncParams.Format = M4ENCODER_kAMRNB;
-                                pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
-                                pC->AudioEncParams.ChannelNum = M4ENCODER_kMono;
-
-                                if( pC->pReaderAudioStream->m_samplingFrequency
-                                    != 8000 )
-                                {
-                                    pC->AudioEncParams.Format = M4ENCODER_kAMRNB;
-                                }
-                                pC->AudioEncParams.SpecifParam.AmrSID =
-                                    M4ENCODER_kAmrNoSID;
-                                break;
-
-                            case M4VIDEOEDITING_kAAC:
-                                M4OSA_TRACE3_0(
-                                    "M4MCS_setOutputParams calling \
-                                    M4MCS_setCurrentAudioEncoder M4VIDEOEDITING_kNull, AAC");
-                                err = M4MCS_setCurrentAudioEncoder(pContext,
-                                    pC->InputFileProperties.AudioStreamType);
-                                M4ERR_CHECK_RETURN(err);
-
-                                pC->AudioEncParams.Format = M4ENCODER_kAAC;
-                                pC->AudioEncParams.SpecifParam.AacParam.Regulation =
-                                    M4ENCODER_kAacRegulNone; //M4ENCODER_kAacBitReservoir
-                                pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
-                                pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
-
-                                switch( pC->pReaderAudioStream->
-                                    m_samplingFrequency )
-                                {
-                                case 16000:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k16000Hz;
-                                    break;
-
-                                case 22050:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k22050Hz;
-                                    break;
-
-                                case 24000:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k24000Hz;
-                                    break;
-
-                                case 32000:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k32000Hz;
-                                    break;
-
-                                case 44100:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k44100Hz;
-                                    break;
-
-                                case 48000:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k48000Hz;
-                                    break;
-
-                                default:
-                                    pC->AudioEncParams.Format = M4ENCODER_kAAC;
-                                    break;
-                            }
-                            /* unused */
-                            pC->AudioEncParams.SpecifParam.AacParam.bIS =
-                                M4OSA_FALSE;
-                            pC->AudioEncParams.SpecifParam.AacParam.bMS =
-                                M4OSA_FALSE;
-                            pC->AudioEncParams.SpecifParam.AacParam.bPNS =
-                                M4OSA_FALSE;
-                            pC->AudioEncParams.SpecifParam.AacParam.bTNS =
-                                M4OSA_FALSE;
-                            /* TODO change into highspeed asap */
-                            pC->AudioEncParams.SpecifParam.AacParam.bHighSpeed =
-                                M4OSA_FALSE;
-                            break;
-
-                        case M4VIDEOEDITING_kMP3:
-                            M4OSA_TRACE3_0(
-                                "M4MCS_setOutputParams calling\
-                                M4MCS_setCurrentAudioEncoder M4VIDEOEDITING_kNull, MP3");
-                            err = M4MCS_setCurrentAudioEncoder(pContext,
-                                pC->InputFileProperties.AudioStreamType);
-                            M4ERR_CHECK_RETURN(err);
-
-                            pC->AudioEncParams.Format = M4ENCODER_kMP3;
-                            pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
-
-                            switch( pC->pReaderAudioStream->
-                                m_samplingFrequency )
-                            {
-                                case 8000:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k8000Hz;
-                                    break;
-
-                                case 16000:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k16000Hz;
-                                    break;
-
-                                case 22050:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k22050Hz;
-                                    break;
-
-                                case 24000:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k24000Hz;
-                                    break;
-
-                                case 32000:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k32000Hz;
-                                    break;
-
-                                case 44100:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k44100Hz;
-                                    break;
-
-                                case 48000:
-                                    pC->AudioEncParams.Frequency =
-                                        M4ENCODER_k48000Hz;
-                                    break;
-
-                                default:
-                                    pC->AudioEncParams.Format = M4ENCODER_kMP3;
-                                    break;
-                            }
-                            break;
-
-                        case M4VIDEOEDITING_kEVRC:
-                        case M4VIDEOEDITING_kUnsupportedAudio:
-                        default:
-                            M4OSA_TRACE1_1(
-                                "M4MCS_setOutputParams: Output audio format (%d) is\
-                                incompatible with audio effects, returning \
-                                M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
-                                pC->InputFileProperties.AudioStreamType);
-                            return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
-                        }
-                    }
-                    break;
-                    /* EVRC
-                    //            case M4VIDEOEDITING_kEVRC:
-                    //
-                    //                err = M4MCS_setCurrentAudioEncoder(pContext, pParams->\
-                    //                    OutputAudioFormat);
-                    //                M4ERR_CHECK_RETURN(err);
-                    //
-                    //                pC->AudioEncParams.Format = M4ENCODER_kEVRC;
-                    //                pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
-                    //                pC->AudioEncParams.ChannelNum = M4ENCODER_kMono;
-                    //                break; */
-
-                default:
-                    M4OSA_TRACE1_1("M4MCS_setOutputParams: Undefined output audio format (%d),\
-                                   returning M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
-                                   pParams->OutputAudioFormat);
-                    return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
-        }
-    }
-
-    if( pParams->pOutputPCMfile != M4OSA_NULL )
-    {
-        pC->pOutputPCMfile = pParams->pOutputPCMfile;
-
-        /* Open output PCM file */
-        pC->pOsaFileWritPtr->openWrite(&(pC->pOutputPCMfile),
-            pParams->pOutputPCMfile, M4OSA_kFileWrite);
-    }
-    else
-    {
-        pC->pOutputPCMfile = M4OSA_NULL;
-    }
-
-    /*Store media rendering parameter into the internal context*/
-    pC->MediaRendering = pParams->MediaRendering;
-
-    /* Add audio effects*/
-    /*Copy MCS effects structure into internal context*/
-    if( pParams->nbEffects > 0 )
-    {
-        M4OSA_UInt32 j = 0;
-        pC->nbEffects = pParams->nbEffects;
-        pC->pEffects = (M4MCS_EffectSettings *)M4OSA_32bitAlignedMalloc(pC->nbEffects \
-            *sizeof(M4MCS_EffectSettings), M4MCS,
-            (M4OSA_Char *)"Allocation of effects list");
-
-        if( pC->pEffects == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("M4MCS_setOutputParams(): allocation error");
-            return M4ERR_ALLOC;
-        }
-
-        for ( j = 0; j < pC->nbEffects; j++ )
-        {
-            /* Copy effect to "local" structure */
-            memcpy((void *) &(pC->pEffects[j]),
-                (void *) &(pParams->pEffects[j]),
-                sizeof(M4MCS_EffectSettings));
-
-            switch( pC->pEffects[j].AudioEffectType )
-            {
-                case M4MCS_kAudioEffectType_None:
-                    M4OSA_TRACE3_1(
-                        "M4MCS_setOutputParams(): effect type %i is None", j);
-                    pC->pEffects[j].pExtAudioEffectFctCtxt = M4OSA_NULL;
-                    pC->pEffects[j].ExtAudioEffectFct = M4OSA_NULL;
-                    break;
-
-                case M4MCS_kAudioEffectType_FadeIn:
-                    M4OSA_TRACE3_1(
-                        "M4MCS_setOutputParams(): effect type %i is FadeIn", j);
-                    pC->pEffects[j].pExtAudioEffectFctCtxt = M4OSA_NULL;
-                    pC->pEffects[j].ExtAudioEffectFct =
-                        M4MCS_editAudioEffectFct_FadeIn;
-                    break;
-
-                case M4MCS_kAudioEffectType_FadeOut:
-                    M4OSA_TRACE3_1(
-                        "M4MCS_setOutputParams(): effect type %i is FadeOut",
-                        j);
-                    pC->pEffects[j].pExtAudioEffectFctCtxt = M4OSA_NULL;
-                    pC->pEffects[j].ExtAudioEffectFct =
-                        M4MCS_editAudioEffectFct_FadeOut;
-                    break;
-
-                case M4MCS_kAudioEffectType_External:
-                    M4OSA_TRACE3_1(
-                        "M4MCS_setOutputParams(): effect type %i is External",
-                        j);
-
-                    if( pParams->pEffects != M4OSA_NULL )
-                    {
-                        if( pParams->pEffects[j].ExtAudioEffectFct
-                            == M4OSA_NULL )
-                        {
-                            M4OSA_TRACE1_1("M4MCS_setOutputParams(): no external effect function\
-                                           associated to external effect number %i", j);
-                            return M4ERR_PARAMETER;
-                        }
-                        pC->pEffects[j].pExtAudioEffectFctCtxt =
-                            pParams->pEffects[j].pExtAudioEffectFctCtxt;
-
-                        pC->pEffects[j].ExtAudioEffectFct =
-                            pParams->pEffects[j].ExtAudioEffectFct;
-                    }
-
-                    break;
-
-                default:
-                    M4OSA_TRACE1_0(
-                        "M4MCS_setOutputParams(): effect type not recognized");
-                    return M4ERR_PARAMETER;
-            }
-        }
-    }
-    else
-    {
-        pC->nbEffects = 0;
-        pC->pEffects = M4OSA_NULL;
-    }
-
-    /**
-    * Update state automaton */
-    pC->State = M4MCS_kState_SET;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_setOutputParams(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_setEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
- * @brief   Set the values of the encoding parameters
- * @note    Must be called before M4MCS_checkParamsAndStart().
- * @param   pContext           (IN) MCS context
- * @param   pRates             (IN) Transcoding parameters
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4MCS_ERR_AUDIOBITRATE_TOO_HIGH: Audio bitrate too high (we limit to 96 kbps)
- * @return  M4MCS_ERR_AUDIOBITRATE_TOO_LOW: Audio bitrate is too low (16 kbps min for aac, 12.2
- *                                            for amr, 8 for mp3)
- * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Begin cut and End cut are equals
- * @return  M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION: Begin cut time is larger than the input clip
- *                                                     duration
- * @return  M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT: End cut time is smaller than begin cut time
- * @return  M4MCS_ERR_MAXFILESIZE_TOO_SMALL: Not enough space to store whole output file at given
- *                                             bitrates
- * @return  M4MCS_ERR_VIDEOBITRATE_TOO_HIGH: Video bitrate too high (we limit to 800 kbps)
- * @return  M4MCS_ERR_VIDEOBITRATE_TOO_LOW:  Video bitrate too low
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_setEncodingParams( M4MCS_Context pContext,
-                                  M4MCS_EncodingParams *pRates )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-    M4OSA_UInt32 j = 0;
-
-    M4OSA_TRACE2_2(
-        "M4MCS_setEncodingParams called with pContext=0x%x, pRates=0x%x",
-        pContext, pRates);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4MCS_setEncodingParams: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pRates), M4ERR_PARAMETER,
-        "M4MCS_setEncodingParams: pRates is M4OSA_NULL");
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-
-    if( pC->m_bIsStillPicture )
-    {
-        /**
-        * Call the corresponding still picture MCS function*/
-        return M4MCS_stillPicSetEncodingParams(pC, pRates);
-    }
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    /**
-    * Check state automaton */
-
-    if( M4MCS_kState_SET != pC->State )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_setEncodingParams(): Wrong State (%d), returning M4ERR_STATE",
-            pC->State);
-        return M4ERR_STATE;
-    }
-
-    /* Set given values */
-    pC->uiVideoBitrate = pRates->OutputVideoBitrate;
-    pC->uiAudioBitrate = pRates->OutputAudioBitrate;
-    pC->uiBeginCutTime = pRates->BeginCutTime;
-    pC->uiEndCutTime = pRates->EndCutTime;
-    pC->uiMaxFileSize = pRates->OutputFileSize;
-
-    /**
-    * Check begin cut time validity */
-    if( pC->uiBeginCutTime >= pC->InputFileProperties.uiClipDuration )
-    {
-        M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Begin cut larger than duration (%d>%d),\
-                       returning M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION",
-                       pC->uiBeginCutTime, pC->InputFileProperties.uiClipDuration);
-        return M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION;
-    }
-
-    /**
-    * If end cut time is too large, we set it to the clip duration */
-    if( pC->uiEndCutTime > pC->InputFileProperties.uiClipDuration )
-    {
-        pC->uiEndCutTime = pC->InputFileProperties.uiClipDuration;
-    }
-
-    /**
-    * Check end cut time validity */
-    if( pC->uiEndCutTime > 0 )
-    {
-        if( pC->uiEndCutTime < pC->uiBeginCutTime )
-        {
-            M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Begin cut greater than end cut (%d,%d), \
-                           returning M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT",
-                           pC->uiBeginCutTime, pC->uiEndCutTime);
-            return M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT;
-        }
-
-        if( pC->uiEndCutTime == pC->uiBeginCutTime )
-        {
-            M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Begin and End cuts are equal (%d,%d),\
-                           returning M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT",
-                           pC->uiBeginCutTime, pC->uiEndCutTime);
-            return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
-        }
-    }
-
-    /**
-    * FlB 2009.03.04: check audio effects start time and duration validity*/
-    for ( j = 0; j < pC->nbEffects; j++ )
-    {
-        M4OSA_UInt32 outputEndCut = pC->uiEndCutTime;
-
-        if( pC->uiEndCutTime == 0 )
-        {
-            outputEndCut = pC->InputFileProperties.uiClipDuration;
-        }
-
-        if( pC->pEffects[j].uiStartTime > (outputEndCut - pC->uiBeginCutTime) )
-        {
-            M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Effects start time is larger than\
-                           duration (%d,%d), returning M4ERR_PARAMETER",
-                           pC->pEffects[j].uiStartTime,
-                           (pC->uiEndCutTime - pC->uiBeginCutTime));
-            return M4ERR_PARAMETER;
-        }
-
-        if( pC->pEffects[j].uiStartTime + pC->pEffects[j].uiDuration > \
-            (outputEndCut - pC->uiBeginCutTime) )
-        {
-            /* Re-adjust the effect duration until the end of the output clip*/
-            pC->pEffects[j].uiDuration = (outputEndCut - pC->uiBeginCutTime) - \
-                pC->pEffects[j].uiStartTime;
-        }
-    }
-
-    /* Check audio bitrate consistency */
-    if( ( pC->noaudio == M4OSA_FALSE)
-        && (pC->AudioEncParams.Format != M4ENCODER_kAudioNULL) )
-    {
-        if( pC->uiAudioBitrate != M4VIDEOEDITING_kUndefinedBitrate )
-        {
-            if( pC->AudioEncParams.Format == M4ENCODER_kAMRNB )
-            {
-                if( pC->uiAudioBitrate > M4VIDEOEDITING_k12_2_KBPS )
-                    return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
-
-                if( pC->uiAudioBitrate < M4VIDEOEDITING_k12_2_KBPS )
-                    return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
-            }
-            //EVRC
-            //            else if(pC->AudioEncParams.Format == M4ENCODER_kEVRC)
-            //            {
-            //                if(pC->uiAudioBitrate > M4VIDEOEDITING_k9_2_KBPS)
-            //                    return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
-            //                if(pC->uiAudioBitrate < M4VIDEOEDITING_k9_2_KBPS)
-            //                     return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
-            //            }
-            /*FlB 26.02.2009: add mp3 as mcs output format, add mp3 encoder*/
-            else if( pC->AudioEncParams.Format == M4ENCODER_kMP3 )
-            {
-                if( pC->AudioEncParams.Frequency >= M4ENCODER_k32000Hz )
-                {
-                    /*Mpeg layer 1*/
-                    if( pC->uiAudioBitrate > 320000 )
-                        return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
-
-                    if( pC->uiAudioBitrate < 32000 )
-                        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
-                }
-                else if( pC->AudioEncParams.Frequency >= M4ENCODER_k16000Hz )
-                {
-                    /*Mpeg layer 2*/
-                    if( pC->uiAudioBitrate > 160000 )
-                        return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
-
-                    if( ( pC->uiAudioBitrate < 8000
-                        && pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
-                        || (pC->uiAudioBitrate < 16000
-                        && pC->AudioEncParams.ChannelNum
-                        == M4ENCODER_kStereo) )
-                        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
-                }
-                else if( pC->AudioEncParams.Frequency == M4ENCODER_k8000Hz
-                    || pC->AudioEncParams.Frequency == M4ENCODER_k11025Hz
-                    || pC->AudioEncParams.Frequency == M4ENCODER_k12000Hz )
-                {
-                    /*Mpeg layer 2.5*/
-                    if( pC->uiAudioBitrate > 64000 )
-                        return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
-
-                    if( ( pC->uiAudioBitrate < 8000
-                        && pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
-                        || (pC->uiAudioBitrate < 16000
-                        && pC->AudioEncParams.ChannelNum
-                        == M4ENCODER_kStereo) )
-                        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
-                }
-                else
-                {
-                    M4OSA_TRACE1_1("M4MCS_setEncodingParams: MP3 audio sampling frequency error\
-                                   (%d)", pC->AudioEncParams.Frequency);
-                    return M4ERR_PARAMETER;
-                }
-            }
-            else
-            {
-                if( pC->uiAudioBitrate > M4VIDEOEDITING_k192_KBPS )
-                    return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
-
-                if( pC->AudioEncParams.ChannelNum == M4ENCODER_kMono )
-                {
-                    if( pC->uiAudioBitrate < M4VIDEOEDITING_k16_KBPS )
-                        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
-                }
-                else
-                {
-                    if( pC->uiAudioBitrate < M4VIDEOEDITING_k32_KBPS )
-                        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
-                }
-            }
-        }
-    }
-    else
-    {
-        /* NULL audio : copy input file bitrate */
-        pC->uiAudioBitrate = pC->InputFileProperties.uiAudioBitrate;
-    }
-
-    /* Check video bitrate consistency */
-    if( ( pC->novideo == M4OSA_FALSE)
-        && (pC->EncodingVideoFormat != M4ENCODER_kNULL) )
-    {
-        if( pC->uiVideoBitrate != M4VIDEOEDITING_kUndefinedBitrate )
-        {
-            if( pC->uiVideoBitrate > M4VIDEOEDITING_k8_MBPS )
-                return M4MCS_ERR_VIDEOBITRATE_TOO_HIGH;
-
-            if( pC->uiVideoBitrate < M4VIDEOEDITING_k16_KBPS )
-                return M4MCS_ERR_VIDEOBITRATE_TOO_LOW;
-        }
-    }
-    else
-    {
-        /* NULL video : copy input file bitrate */
-        pC->uiVideoBitrate = pC->InputFileProperties.uiVideoBitrate;
-    }
-
-    if( pRates->OutputVideoTimescale <= 30000
-        && pRates->OutputVideoTimescale > 0 )
-    {
-        pC->outputVideoTimescale = pRates->OutputVideoTimescale;
-    }
-
-    /* Check file size */
-    return M4MCS_intCheckMaxFileSize(pC);
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_getExtendedEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
- * @brief   Get the extended values of the encoding parameters
- * @note    Could be called after M4MCS_setEncodingParams.
- * @param   pContext           (IN) MCS context
- * @param   pRates             (OUT) Transcoding parameters
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Encoding settings would produce a null duration
- *                                             clip = encoding is impossible
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_getExtendedEncodingParams( M4MCS_Context pContext,
-                                          M4MCS_EncodingParams *pRates )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-
-    M4OSA_Int32 minaudiobitrate;
-    M4OSA_Int32 minvideobitrate;
-    M4OSA_Int32 maxcombinedbitrate;
-
-    M4OSA_Int32 calcbitrate;
-
-    M4OSA_UInt32 maxduration;
-    M4OSA_UInt32 calcduration;
-
-    M4OSA_Bool fixed_audio = M4OSA_FALSE;
-    M4OSA_Bool fixed_video = M4OSA_FALSE;
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-
-    if( pC->m_bIsStillPicture )
-    {
-        /**
-        * Call the corresponding still picture MCS function*/
-        return M4MCS_stillPicGetExtendedEncodingParams(pC, pRates);
-    }
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    pRates->OutputVideoBitrate =
-        M4MCS_intGetNearestBitrate(pC->uiVideoBitrate, 0);
-    pRates->OutputAudioBitrate =
-        M4MCS_intGetNearestBitrate(pC->uiAudioBitrate, 0);
-    pRates->BeginCutTime = pC->uiBeginCutTime;
-    pRates->EndCutTime = pC->uiEndCutTime;
-    pRates->OutputFileSize = pC->uiMaxFileSize;
-
-    /**
-    * Check state automaton */
-    if( M4MCS_kState_SET != pC->State )
-    {
-        M4OSA_TRACE1_1("M4MCS_getExtendedEncodingParams(): Wrong State (%d),\
-                       returning M4ERR_STATE", pC->State);
-        return M4ERR_STATE;
-    }
-
-    /* Compute min audio bitrate */
-    if( pC->noaudio )
-    {
-        fixed_audio = M4OSA_TRUE;
-        pRates->OutputAudioBitrate = 0;
-        minaudiobitrate = 0;
-    }
-    else if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
-    {
-        fixed_audio = M4OSA_TRUE;
-        pRates->OutputAudioBitrate = pC->InputFileProperties.uiAudioBitrate;
-        minaudiobitrate = pC->InputFileProperties.uiAudioBitrate;
-    }
-    else
-    {
-        if( pC->AudioEncParams.Format == M4ENCODER_kAMRNB )
-        {
-            fixed_audio = M4OSA_TRUE;
-            pRates->OutputAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
-            minaudiobitrate = M4VIDEOEDITING_k12_2_KBPS;
-        }
-        //EVRC
-        //        if(pC->AudioEncParams.Format == M4ENCODER_kEVRC)
-        //        {
-        //            fixed_audio = M4OSA_TRUE;
-        //            pRates->OutputAudioBitrate = M4VIDEOEDITING_k9_2_KBPS;
-        //            minaudiobitrate = M4VIDEOEDITING_k9_2_KBPS;
-        //        }
-        /*FlB 26.02.2009: add mp3 as mcs output format*/
-        else if( pC->AudioEncParams.Format == M4ENCODER_kMP3 )
-        {
-            minaudiobitrate =
-                M4VIDEOEDITING_k32_KBPS; /*Default min audio bitrate for MPEG layer 1,
-                                             for both mono and stereo channels*/
-        }
-        else
-        {
-            minaudiobitrate = (pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
-                ? M4VIDEOEDITING_k16_KBPS : M4VIDEOEDITING_k32_KBPS;
-        }
-    }
-
-    /* Check audio bitrate is in the correct range */
-    if( fixed_audio == M4OSA_FALSE )
-    {
-        if( ( pC->uiAudioBitrate > 0)
-            && (pRates->OutputAudioBitrate < minaudiobitrate) )
-        {
-            pRates->OutputAudioBitrate = minaudiobitrate;
-        }
-
-        if( pRates->OutputAudioBitrate > M4VIDEOEDITING_k96_KBPS )
-        {
-            pRates->OutputAudioBitrate = M4VIDEOEDITING_k96_KBPS;
-        }
-    }
-
-    /* Compute min video bitrate */
-    if( pC->novideo )
-    {
-        fixed_video = M4OSA_TRUE;
-        pRates->OutputVideoBitrate = 0;
-        minvideobitrate = 0;
-    }
-    else if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
-    {
-        fixed_video = M4OSA_TRUE;
-        pRates->OutputVideoBitrate = pC->InputFileProperties.uiVideoBitrate;
-        minvideobitrate = pC->InputFileProperties.uiVideoBitrate;
-    }
-    else
-    {
-        minvideobitrate = M4VIDEOEDITING_k16_KBPS;
-    }
-
-    /* Check video bitrate is in the correct range */
-    if( fixed_video == M4OSA_FALSE )
-    {
-        if( ( pC->uiVideoBitrate > 0)
-            && (pRates->OutputVideoBitrate < minvideobitrate) )
-        {
-            pRates->OutputVideoBitrate = minvideobitrate;
-        }
-        /*+ New Encoder bitrates */
-        if( pRates->OutputVideoBitrate > M4VIDEOEDITING_k8_MBPS )
-        {
-            pRates->OutputVideoBitrate = M4VIDEOEDITING_k8_MBPS;
-        }
-        /*- New Encoder bitrates */
-    }
-
-    /* Check cut times are in correct range */
-    if( ( pRates->BeginCutTime >= pC->InputFileProperties.uiClipDuration)
-        || (( pRates->BeginCutTime >= pRates->EndCutTime)
-        && (pRates->EndCutTime > 0)) )
-    {
-        pRates->BeginCutTime = 0;
-        pRates->EndCutTime = 0;
-    }
-
-    if( pRates->EndCutTime == 0 )
-        calcduration =
-        pC->InputFileProperties.uiClipDuration - pRates->BeginCutTime;
-    else
-        calcduration = pRates->EndCutTime - pRates->BeginCutTime;
-
-    /* priority 1 : max file size */
-    if( pRates->OutputFileSize == 0 )
-    {
-        /* we can put maximum values for all undefined parameters */
-        if( pRates->EndCutTime == 0 )
-        {
-            pRates->EndCutTime = pC->InputFileProperties.uiClipDuration;
-        }
-
-        if( ( pRates->OutputAudioBitrate == M4VIDEOEDITING_kUndefinedBitrate)
-            && (fixed_audio == M4OSA_FALSE) )
-        {
-            pRates->OutputAudioBitrate = M4VIDEOEDITING_k96_KBPS;
-        }
-
-        if( ( pRates->OutputVideoBitrate == M4VIDEOEDITING_kUndefinedBitrate)
-            && (fixed_video == M4OSA_FALSE) )
-        {
-            /*+ New Encoder bitrates */
-            pRates->OutputVideoBitrate = M4VIDEOEDITING_k8_MBPS;
-            /*- New Encoder bitrates */
-        }
-    }
-    else
-    {
-        /* compute max duration */
-        maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
-            / M4MCS_MOOV_OVER_FILESIZE_RATIO
-            / (minvideobitrate + minaudiobitrate) * 8000.0);
-
-        if( maxduration
-            + pRates->BeginCutTime > pC->InputFileProperties.uiClipDuration )
-        {
-            maxduration =
-                pC->InputFileProperties.uiClipDuration - pRates->BeginCutTime;
-        }
-
-        /* priority 2 : cut times */
-        if( ( pRates->BeginCutTime > 0) || (pRates->EndCutTime > 0) )
-        {
-            if( calcduration > maxduration )
-            {
-                calcduration = maxduration;
-            }
-
-            if( calcduration == 0 )
-            {
-                return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
-            }
-
-            maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
-                / M4MCS_MOOV_OVER_FILESIZE_RATIO / (calcduration / 8000.0));
-
-            /* audio and video bitrates */
-            if( ( pRates->OutputAudioBitrate
-                == M4VIDEOEDITING_kUndefinedBitrate)
-                && (pRates->OutputVideoBitrate
-                == M4VIDEOEDITING_kUndefinedBitrate) )
-            {
-                /* set audio = 1/3 and video = 2/3 */
-                if( fixed_audio == M4OSA_FALSE )
-                {
-                    if( pC->novideo )
-                        pRates->OutputAudioBitrate =
-                        M4MCS_intGetNearestBitrate(maxcombinedbitrate, 0);
-                    else
-                        pRates->OutputAudioBitrate =
-                        M4MCS_intGetNearestBitrate(maxcombinedbitrate / 3,
-                        0);
-
-                    if( pRates->OutputAudioBitrate < minaudiobitrate )
-                        pRates->OutputAudioBitrate = minaudiobitrate;
-
-                    if( pRates->OutputAudioBitrate > M4VIDEOEDITING_k96_KBPS )
-                        pRates->OutputAudioBitrate = M4VIDEOEDITING_k96_KBPS;
-                }
-
-                if( fixed_video == M4OSA_FALSE )
-                {
-                    pRates->OutputVideoBitrate =
-                        M4MCS_intGetNearestBitrate(maxcombinedbitrate
-                        - pRates->OutputAudioBitrate, 0);
-
-                    if( pRates->OutputVideoBitrate < minvideobitrate )
-                        pRates->OutputVideoBitrate = minvideobitrate;
-
-                    if( pRates->OutputVideoBitrate > M4VIDEOEDITING_k8_MBPS )
-                        pRates->OutputVideoBitrate =
-                        M4VIDEOEDITING_k8_MBPS; /*+ New Encoder
-                                                bitrates */
-                }
-            }
-            else
-            {
-                /* priority 3 : audio bitrate */
-                if( pRates->OutputAudioBitrate
-                    != M4VIDEOEDITING_kUndefinedBitrate )
-                {
-                    while( ( fixed_audio == M4OSA_FALSE)
-                        && (pRates->OutputAudioBitrate >= minaudiobitrate)
-                        && (pRates->OutputAudioBitrate
-                        + minvideobitrate > maxcombinedbitrate) )
-                    {
-                        pRates->OutputAudioBitrate =
-                            M4MCS_intGetNearestBitrate(
-                            pRates->OutputAudioBitrate, -1);
-                    }
-
-                    if( ( fixed_audio == M4OSA_FALSE)
-                        && (pRates->OutputAudioBitrate < minaudiobitrate) )
-                    {
-                        pRates->OutputAudioBitrate = minaudiobitrate;
-                    }
-
-                    calcbitrate = M4MCS_intGetNearestBitrate(
-                                    maxcombinedbitrate
-                                    - pRates->OutputAudioBitrate, 0);
-
-                    if( calcbitrate < minvideobitrate )
-                        calcbitrate = minvideobitrate;
-
-                    if( calcbitrate > M4VIDEOEDITING_k8_MBPS )
-                        calcbitrate = M4VIDEOEDITING_k8_MBPS;
-
-                    if( ( fixed_video == M4OSA_FALSE)
-                        && (( pRates->OutputVideoBitrate
-                        == M4VIDEOEDITING_kUndefinedBitrate)
-                        || (pRates->OutputVideoBitrate > calcbitrate)) )
-                    {
-                        pRates->OutputVideoBitrate = calcbitrate;
-                    }
-                }
-                else
-                {
-                    /* priority 4 : video bitrate */
-                    if( pRates->OutputVideoBitrate
-                        != M4VIDEOEDITING_kUndefinedBitrate )
-                    {
-                        while( ( fixed_video == M4OSA_FALSE)
-                            && (pRates->OutputVideoBitrate >= minvideobitrate)
-                            && (pRates->OutputVideoBitrate
-                            + minaudiobitrate > maxcombinedbitrate) )
-                        {
-                            pRates->OutputVideoBitrate =
-                                M4MCS_intGetNearestBitrate(
-                                pRates->OutputVideoBitrate, -1);
-                        }
-
-                        if( ( fixed_video == M4OSA_FALSE)
-                            && (pRates->OutputVideoBitrate < minvideobitrate) )
-                        {
-                            pRates->OutputVideoBitrate = minvideobitrate;
-                        }
-
-                        calcbitrate =
-                            M4MCS_intGetNearestBitrate(maxcombinedbitrate
-                            - pRates->OutputVideoBitrate, 0);
-
-                        if( calcbitrate < minaudiobitrate )
-                            calcbitrate = minaudiobitrate;
-
-                        if( calcbitrate > M4VIDEOEDITING_k96_KBPS )
-                            calcbitrate = M4VIDEOEDITING_k96_KBPS;
-
-                        if( ( fixed_audio == M4OSA_FALSE)
-                            && (( pRates->OutputAudioBitrate
-                            == M4VIDEOEDITING_kUndefinedBitrate)
-                            || (pRates->OutputAudioBitrate > calcbitrate)) )
-                        {
-                            pRates->OutputAudioBitrate = calcbitrate;
-                        }
-                    }
-                }
-            }
-        }
-        else
-        {
-            /* priority 3 : audio bitrate */
-            if( pRates->OutputAudioBitrate != M4VIDEOEDITING_kUndefinedBitrate )
-            {
-                /* priority 4 : video bitrate */
-                if( pRates->OutputVideoBitrate
-                    != M4VIDEOEDITING_kUndefinedBitrate )
-                {
-                    /* compute max duration */
-                    maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
-                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
-                        / (pRates->OutputVideoBitrate
-                        + pRates->OutputAudioBitrate) * 8000.0);
-
-                    if( maxduration + pRates->BeginCutTime
-                        > pC->InputFileProperties.uiClipDuration )
-                    {
-                        maxduration = pC->InputFileProperties.uiClipDuration
-                            - pRates->BeginCutTime;
-                    }
-
-                    if( calcduration > maxduration )
-                    {
-                        calcduration = maxduration;
-                    }
-
-                    if( calcduration == 0 )
-                    {
-                        return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
-                    }
-                }
-                else
-                {
-                    /* start with min video bitrate */
-                    pRates->OutputVideoBitrate = minvideobitrate;
-
-                    /* compute max duration */
-                    maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
-                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
-                        / (pRates->OutputVideoBitrate
-                        + pRates->OutputAudioBitrate) * 8000.0);
-
-                    if( maxduration + pRates->BeginCutTime
-                        > pC->InputFileProperties.uiClipDuration )
-                    {
-                        maxduration = pC->InputFileProperties.uiClipDuration
-                            - pRates->BeginCutTime;
-                    }
-
-                    if( calcduration > maxduration )
-                    {
-                        calcduration = maxduration;
-                    }
-
-                    if( calcduration == 0 )
-                    {
-                        return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
-                    }
-
-                    /* search max possible video bitrate */
-                    maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
-                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
-                        / (calcduration / 8000.0));
-
-                    while( ( fixed_video == M4OSA_FALSE)
-                        && (pRates->OutputVideoBitrate
-                        < M4VIDEOEDITING_k8_MBPS) ) /*+ New Encoder bitrates */
-                    {
-                        calcbitrate = M4MCS_intGetNearestBitrate(
-                            pRates->OutputVideoBitrate, +1);
-
-                        if( calcbitrate
-                            + pRates->OutputAudioBitrate <= maxcombinedbitrate )
-                            pRates->OutputVideoBitrate = calcbitrate;
-                        else
-                            break;
-                    }
-                }
-            }
-            else
-            {
-                /* priority 4 : video bitrate */
-                if( pRates->OutputVideoBitrate
-                    != M4VIDEOEDITING_kUndefinedBitrate )
-                {
-                    /* start with min audio bitrate */
-                    pRates->OutputAudioBitrate = minaudiobitrate;
-
-                    /* compute max duration */
-                    maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
-                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
-                        / (pRates->OutputVideoBitrate
-                        + pRates->OutputAudioBitrate) * 8000.0);
-
-                    if( maxduration + pRates->BeginCutTime
-                        > pC->InputFileProperties.uiClipDuration )
-                    {
-                        maxduration = pC->InputFileProperties.uiClipDuration
-                            - pRates->BeginCutTime;
-                    }
-
-                    if( calcduration > maxduration )
-                    {
-                        calcduration = maxduration;
-                    }
-
-                    if( calcduration == 0 )
-                    {
-                        return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
-                    }
-
-                    /* search max possible audio bitrate */
-                    maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
-                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
-                        / (calcduration / 8000.0));
-
-                    while( ( fixed_audio == M4OSA_FALSE)
-                        && (pRates->OutputAudioBitrate
-                        < M4VIDEOEDITING_k96_KBPS) )
-                    {
-                        calcbitrate = M4MCS_intGetNearestBitrate(
-                            pRates->OutputAudioBitrate, +1);
-
-                        if( calcbitrate
-                            + pRates->OutputVideoBitrate <= maxcombinedbitrate )
-                            pRates->OutputAudioBitrate = calcbitrate;
-                        else
-                            break;
-                    }
-                }
-                else
-                {
-                    /* compute max duration */
-                    maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
-                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
-                        / (minvideobitrate + minaudiobitrate) * 8000.0);
-
-                    if( maxduration + pRates->BeginCutTime
-                        > pC->InputFileProperties.uiClipDuration )
-                    {
-                        maxduration = pC->InputFileProperties.uiClipDuration
-                            - pRates->BeginCutTime;
-                    }
-
-                    if( calcduration > maxduration )
-                    {
-                        calcduration = maxduration;
-                    }
-
-                    if( calcduration == 0 )
-                    {
-                        return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
-                    }
-
-                    /* set audio = 1/3 and video = 2/3 */
-                    maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
-                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
-                        / (calcduration / 8000.0));
-
-                    if( fixed_audio == M4OSA_FALSE )
-                    {
-                        if( pC->novideo )
-                            pRates->OutputAudioBitrate =
-                            M4MCS_intGetNearestBitrate(maxcombinedbitrate,
-                            0);
-                        else
-                            pRates->OutputAudioBitrate =
-                            M4MCS_intGetNearestBitrate(maxcombinedbitrate
-                            / 3, 0);
-
-                        if( pRates->OutputAudioBitrate < minaudiobitrate )
-                            pRates->OutputAudioBitrate = minaudiobitrate;
-
-                        if( pRates->OutputAudioBitrate
-                        > M4VIDEOEDITING_k96_KBPS )
-                        pRates->OutputAudioBitrate =
-                        M4VIDEOEDITING_k96_KBPS;
-                    }
-
-                    if( fixed_video == M4OSA_FALSE )
-                    {
-                        pRates->OutputVideoBitrate =
-                            M4MCS_intGetNearestBitrate(maxcombinedbitrate
-                            - pRates->OutputAudioBitrate, 0);
-
-                        if( pRates->OutputVideoBitrate < minvideobitrate )
-                            pRates->OutputVideoBitrate = minvideobitrate;
-
-                        if( pRates->OutputVideoBitrate
-                        > M4VIDEOEDITING_k8_MBPS )
-                        pRates->OutputVideoBitrate =
-                        M4VIDEOEDITING_k8_MBPS; /*+ New Encoder
-                                                bitrates */
-                    }
-                }
-            }
-        }
-    }
-
-    /* recompute max duration with final bitrates */
-    if( pRates->OutputFileSize > 0 )
-    {
-        maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
-            / M4MCS_MOOV_OVER_FILESIZE_RATIO
-            / (pRates->OutputVideoBitrate + pRates->OutputAudioBitrate)
-            * 8000.0);
-    }
-    else
-    {
-        maxduration = pC->InputFileProperties.uiClipDuration;
-    }
-
-    if( maxduration
-        + pRates->BeginCutTime > pC->InputFileProperties.uiClipDuration )
-    {
-        maxduration =
-            pC->InputFileProperties.uiClipDuration - pRates->BeginCutTime;
-    }
-
-    if( pRates->EndCutTime == 0 )
-    {
-        pRates->EndCutTime = pRates->BeginCutTime + maxduration;
-    }
-    else
-    {
-        calcduration = pRates->EndCutTime - pRates->BeginCutTime;
-
-        if( calcduration > maxduration )
-        {
-            pRates->EndCutTime = pRates->BeginCutTime + maxduration;
-        }
-    }
-
-    /* Should never happen : constraints are too strong */
-    if( pRates->EndCutTime == pRates->BeginCutTime )
-    {
-        return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
-    }
-
-    /* estimated resulting file size */
-    pRates->OutputFileSize = (M4OSA_UInt32)(M4MCS_MOOV_OVER_FILESIZE_RATIO
-        * (pRates->OutputVideoBitrate + pRates->OutputAudioBitrate)
-        * (( pRates->EndCutTime - pRates->BeginCutTime) / 8000.0));
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_checkParamsAndStart(M4MCS_Context pContext)
- * @brief   Check parameters to start
- * @note
- * @param   pContext           (IN) MCS context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for
- *                              this function to be called
- * @return  M4MCS_ERR_AUDIOBITRATE_TOO_HIGH:
- *                              Audio bitrate too high (we limit to 96 kbps)
- * @return  M4MCS_ERR_AUDIOBITRATE_TOO_LOW:
- *                              Audio bitrate is too low (16 kbps min for aac,
- *                              12.2 for amr, 8 for mp3)
- * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT:
- *                              Begin cut and End cut are equals
- * @return  M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION:
- *                              Begin cut time is larger than the input
- *                              clip duration
- * @return  M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT:
- *                              End cut time is smaller than begin cut time
- * @return  M4MCS_ERR_MAXFILESIZE_TOO_SMALL:
- *                              Not enough space to store whole output
- *                              file at given bitrates
- * @return  M4MCS_ERR_VIDEOBITRATE_TOO_HIGH:
- *                              Video bitrate too high (we limit to 800 kbps)
- * @return  M4MCS_ERR_VIDEOBITRATE_TOO_LOW:
- *                              Video bitrate too low
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_checkParamsAndStart( M4MCS_Context pContext )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-    M4MCS_EncodingParams VerifyRates;
-    M4OSA_ERR err;
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4MCS_checkParamsAndStart: pContext is M4OSA_NULL");
-
-#ifdef M4MCS_SUPPORT_STILL_PICTURE
-
-    if( pC->m_bIsStillPicture )
-    {
-        /**
-        * Call the corresponding still picture MCS function*/
-        return M4MCS_stillPicCheckParamsAndStart(pC);
-    }
-
-#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
-
-    /**
-    * Check state automaton */
-
-    if( M4MCS_kState_SET != pC->State )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_checkParamsAndStart(): Wrong State (%d), returning M4ERR_STATE",
-            pC->State);
-        return M4ERR_STATE;
-    }
-
-    /* Audio bitrate should not stay undefined at this point */
-    if( ( pC->noaudio == M4OSA_FALSE)
-        && (pC->AudioEncParams.Format != M4ENCODER_kAudioNULL)
-        && (pC->uiAudioBitrate == M4VIDEOEDITING_kUndefinedBitrate) )
-    {
-        M4OSA_TRACE1_0("M4MCS_checkParamsAndStart : undefined audio bitrate");
-        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
-    }
-
-    /* Video bitrate should not stay undefined at this point */
-    if( ( pC->novideo == M4OSA_FALSE)
-        && (pC->EncodingVideoFormat != M4ENCODER_kNULL)
-        && (pC->uiVideoBitrate == M4VIDEOEDITING_kUndefinedBitrate) )
-    {
-        M4OSA_TRACE1_0("M4MCS_checkParamsAndStart : undefined video bitrate");
-        return M4MCS_ERR_VIDEOBITRATE_TOO_LOW;
-    }
-
-    /* Set end cut time if necessary (not an error) */
-    if( pC->uiEndCutTime == 0 )
-    {
-        pC->uiEndCutTime = pC->InputFileProperties.uiClipDuration;
-    }
-
-    /* Force a re-set to check validity of parameters */
-    VerifyRates.OutputVideoBitrate = pC->uiVideoBitrate;
-    VerifyRates.OutputAudioBitrate = pC->uiAudioBitrate;
-    VerifyRates.BeginCutTime = pC->uiBeginCutTime;
-    VerifyRates.EndCutTime = pC->uiEndCutTime;
-    VerifyRates.OutputFileSize = pC->uiMaxFileSize;
-    VerifyRates.OutputVideoTimescale = pC->outputVideoTimescale;
-
-    err = M4MCS_setEncodingParams(pContext, &VerifyRates);
-
-    /**
-    * Check parameters consistency */
-    if( err != M4NO_ERROR )
-    {
-        M4OSA_TRACE1_0("M4MCS_checkParamsAndStart : invalid parameter found");
-        return err;
-    }
-
-    /**
-    * All is OK : update state automaton */
-    pC->uiEncVideoBitrate = pC->uiVideoBitrate;
-    pC->AudioEncParams.Bitrate = pC->uiAudioBitrate;
-
-#ifdef M4MCS_WITH_FAST_OPEN
-    /**
-    * Remake the open if it was done in fast mode */
-
-    if( M4OSA_TRUE == pC->bFileOpenedInFastMode )
-    {
-        /* Close the file opened in fast mode */
-        M4MCS_intCleanUp_ReadersDecoders(pC);
-
-        pC->State = M4MCS_kState_CREATED;
-
-        /* Reopen it in normal mode */
-        err = M4MCS_open(pContext, pC->pInputFile, pC->InputFileType,
-            pC->pOutputFile, pC->pTemporaryFile);
-
-        if( err != M4NO_ERROR )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_checkParamsAndStart : M4MCS_Open returns 0x%x", err);
-            return err;
-        }
-    }
-
-#endif /* M4MCS_WITH_FAST_OPEN */
-
-    pC->State = M4MCS_kState_READY;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intStepSet(M4MCS_InternalContext* pC)
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intStepSet( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err;
-    M4ENCODER_Header *encHeader;
-
-    /**
-    * Prepare the video decoder */
-    err = M4MCS_intPrepareVideoDecoder(pC);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intStepSet(): M4MCS_intPrepareVideoDecoder() returns 0x%x",
-            err);
-        return err;
-    }
-
-    if( ( pC->InputFileProperties.VideoStreamType == M4VIDEOEDITING_kH264)
-        && (pC->EncodingVideoFormat == M4ENCODER_kNULL) )
-    {
-        pC->bH264Trim = M4OSA_TRUE;
-    }
-
-    /**
-    * Prepare the video encoder */
-    err = M4MCS_intPrepareVideoEncoder(pC);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intStepSet(): M4MCS_intPrepareVideoEncoder() returns 0x%x",
-            err);
-        return err;
-    }
-
-    if( ( pC->uiBeginCutTime != 0)
-        && (pC->InputFileProperties.VideoStreamType == M4VIDEOEDITING_kH264)
-        && (pC->EncodingVideoFormat == M4ENCODER_kNULL) )
-    {
-
-        err = pC->pVideoEncoderGlobalFcts->pFctSetOption(pC->pViEncCtxt,
-            M4ENCODER_kOptionID_H264ProcessNALUContext,
-            (M4OSA_DataOption)pC->m_pInstance);
-
-        if( err != M4NO_ERROR )
-        {
-            M4OSA_TRACE1_1("M4MCS_intStetSet :pFctSetOption failed  (err 0x%x)",
-                err);
-            return err;
-        }
-
-        err = pC->pVideoEncoderGlobalFcts->pFctSetOption(pC->pViEncCtxt,
-            M4ENCODER_kOptionID_SetH264ProcessNALUfctsPtr,
-            (M4OSA_DataOption) &H264MCS_ProcessEncodedNALU);
-
-        if( err != M4NO_ERROR )
-        {
-            M4OSA_TRACE1_1("M4MCS_intStetSet :pFctSetOption failed  (err 0x%x)",
-                err);
-            return err;
-        }
-
-        err = pC->pVideoEncoderGlobalFcts->pFctGetOption(pC->pViEncCtxt,
-            M4ENCODER_kOptionID_EncoderHeader,
-            (M4OSA_DataOption) &encHeader);
-
-        if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_close: failed to get the encoder header (err 0x%x)",
-                err);
-            /**< no return here, we still have stuff to deallocate after close, even if it fails.*/
-        }
-        else
-        {
-            // Handle DSI first bits
-#define SPS_START_POS 6
-
-            pC->m_pInstance->m_encoderSPSSize =
-                ( encHeader->pBuf[SPS_START_POS] << 8)
-                + encHeader->pBuf[SPS_START_POS + 1];
-            pC->m_pInstance->m_pEncoderSPS =
-                (M4OSA_UInt8 *)(encHeader->pBuf) + SPS_START_POS + 2;
-
-            pC->m_pInstance->m_encoderPPSSize =
-                ( encHeader->pBuf[SPS_START_POS + 3
-                + pC->m_pInstance->m_encoderSPSSize] << 8)
-                + encHeader->pBuf[SPS_START_POS + 4
-                + pC->m_pInstance->m_encoderSPSSize];
-            pC->m_pInstance->m_pEncoderPPS = (M4OSA_UInt8 *)encHeader->pBuf + SPS_START_POS + 5
-                + pC->m_pInstance->m_encoderSPSSize;
-
-            /* Check the DSI integrity */
-            if( encHeader->Size != (pC->m_pInstance->m_encoderSPSSize
-                + pC->m_pInstance->m_encoderPPSSize + 5 + SPS_START_POS) )
-            {
-                M4OSA_TRACE1_3(
-                    "!!! M4MCS_intStepSet ERROR : invalid SPS / PPS %d %d %d",
-                    encHeader->Size, pC->m_pInstance->m_encoderSPSSize,
-                    pC->m_pInstance->m_encoderPPSSize);
-                return M4ERR_PARAMETER;
-            }
-        }
-    }
-
-    /**
-    * Prepare audio processing */
-    err = M4MCS_intPrepareAudioProcessing(pC);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intStepSet(): M4MCS_intPrepareAudioProcessing() returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Prepare the writer */
-    err = M4MCS_intPrepareWriter(pC);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intStepSet(): M4MCS_intPrepareWriter() returns 0x%x", err);
-        return err;
-    }
-
-    /**
-    * Jump the audio stream to the begin cut time (all AUs are RAP)
-    * Must be done after the 3gpp writer init, because it may write the first
-    * audio AU in some cases */
-    err = M4MCS_intPrepareAudioBeginCut(pC);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intStepSet(): M4MCS_intPrepareAudioBeginCut() returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Update state automaton */
-    if( 0 == pC->uiBeginCutTime )
-    {
-        pC->dViDecStartingCts = 0.0;
-        /**
-        * No begin cut, do the encoding */
-        pC->State = M4MCS_kState_PROCESSING;
-    }
-    else
-    {
-        /**
-        * Remember that we must start the decode/encode process at the begin cut time */
-        pC->dViDecStartingCts = (M4OSA_Double)pC->uiBeginCutTime;
-
-        /**
-        * Jumping */
-        pC->State = M4MCS_kState_BEGINVIDEOJUMP;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intStepSet(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intPrepareVideoDecoder(M4MCS_InternalContext* pC);
- * @brief    Prepare the video decoder.
- * @param    pC          (IN) MCS private context
- * @return   M4NO_ERROR  No error
- * @return   M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED
- * @return   Any error returned by an underlaying module
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intPrepareVideoDecoder( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err;
-    M4OSA_Void *decoderUserData;
-    M4DECODER_OutputFilter FilterOption;
-
-    if( pC->novideo )
-        return M4NO_ERROR;
-
-    /**
-    * Create the decoder, if it has not been created yet (to get video properties for example) */
-    if( M4OSA_NULL == pC->pViDecCtxt )
-    {
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-
-        decoderUserData = pC->m_pCurrentVideoDecoderUserData;
-
-#else
-
-        decoderUserData = M4OSA_NULL;
-
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS ? */
-
-        err = pC->m_pVideoDecoder->m_pFctCreate(&pC->pViDecCtxt,
-            &pC->pReaderVideoStream->m_basicProperties, pC->m_pReader,
-            pC->m_pReaderDataIt, &pC->ReaderVideoAU, decoderUserData);
-
-        if( (M4OSA_UInt32)(M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err )
-        {
-            /**
-            * Our decoder is not compatible with H263 profile other than 0.
-            * So it returns this internal error code.
-            * We translate it to our own error code */
-            M4OSA_TRACE1_0("M4MCS_intPrepareVideoDecoder:\
-                           returning M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED");
-            return M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED;
-        }
-        else if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1("M4MCS_intPrepareVideoDecoder:\
-                           m_pVideoDecoder->m_pFctCreate returns 0x%x", err);
-            return err;
-        }
-
-        if( M4VIDEOEDITING_kH264 == pC->InputFileProperties.VideoStreamType )
-        {
-            FilterOption.m_pFilterFunction =
-                (M4OSA_Void *) &M4VIFI_ResizeBilinearYUV420toYUV420;
-            FilterOption.m_pFilterUserData = M4OSA_NULL;
-            err = pC->m_pVideoDecoder->m_pFctSetOption(pC->pViDecCtxt,
-                M4DECODER_kOptionID_OutputFilter,
-                (M4OSA_DataOption) &FilterOption);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1("M4MCS_intPrepareVideoDecoder:\
-                               m_pVideoDecoder->m_pFctSetOption returns 0x%x", err);
-                return err;
-            }
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intPrepareVideoDecoder(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intPrepareVideoEncoder(M4MCS_InternalContext* pC);
- * @brief    Prepare the video encoder.
- * @param    pC          (IN) MCS private context
- * @return   M4NO_ERROR  No error
- * @return   Any error returned by an underlaying module
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intPrepareVideoEncoder( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err;
-    M4ENCODER_AdvancedParams EncParams; /**< Encoder advanced parameters */
-    M4ENCODER_Params EncParams1;
-    M4OSA_Double dFrameRate;            /**< tmp variable */
-
-    if( pC->novideo )
-        return M4NO_ERROR;
-
-    if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
-    {
-        /* Approximative cts increment */
-        pC->dCtsIncrement = 1000.0 / pC->pReaderVideoStream->m_averageFrameRate;
-
-        if( pC->uiBeginCutTime == 0 )
-        {
-            M4OSA_TRACE3_0(
-                "M4MCS_intPrepareVideoEncoder(): Null encoding, do nothing.");
-            return M4NO_ERROR;
-        }
-        else
-        {
-            M4OSA_TRACE3_0(
-                "M4MCS_intPrepareVideoEncoder(): Null encoding, I-frame defaults.");
-
-            /* Set useful parameters to encode the first I-frame */
-            EncParams.InputFormat = M4ENCODER_kIYUV420;
-            EncParams.videoProfile = pC->encodingVideoProfile;
-            EncParams.videoLevel= pC->encodingVideoLevel;
-
-            switch( pC->InputFileProperties.VideoStreamType )
-            {
-                case M4VIDEOEDITING_kH263:
-                    EncParams.Format = M4ENCODER_kH263;
-                    break;
-
-                case M4VIDEOEDITING_kMPEG4:
-                    EncParams.Format = M4ENCODER_kMPEG4;
-                    break;
-
-                case M4VIDEOEDITING_kH264:
-                    EncParams.Format = M4ENCODER_kH264;
-                    break;
-
-                default:
-                    M4OSA_TRACE1_1("M4MCS_intPrepareVideoEncoder: unknown encoding video format\
-                                   (%d), returning M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED",
-                                   pC->InputFileProperties.VideoStreamType);
-                    return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-            }
-
-            EncParams.FrameWidth = pC->EncodingWidth;
-            EncParams.FrameHeight = pC->EncodingHeight;
-            EncParams.Bitrate = pC->uiEncVideoBitrate;
-            EncParams.bInternalRegulation =
-                M4OSA_FALSE; /* do not constrain the I-frame */
-            EncParams.FrameRate = pC->EncodingVideoFramerate;
-
-            /* Other encoding settings (quite all dummy...) */
-            EncParams.uiHorizontalSearchRange = 0;    /* use default */
-            EncParams.uiVerticalSearchRange = 0;      /* use default */
-            EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */
-            EncParams.uiIVopPeriod = 0;               /* use default */
-            EncParams.uiMotionEstimationTools =
-                0; /* M4V_MOTION_EST_TOOLS_ALL */
-            EncParams.bAcPrediction = M4OSA_TRUE;     /* use AC prediction */
-            EncParams.uiStartingQuantizerValue = 5;   /* initial QP = 5 */
-            EncParams.bDataPartitioning =
-                M4OSA_FALSE; /* no data partitioning */
-
-            /* Rate factor */
-            EncParams.uiTimeScale = pC->InputFileProperties.uiVideoTimeScale;
-            EncParams.uiRateFactor = 1;
-        }
-    }
-    else
-    {
-        M4OSA_TRACE3_0(
-            "M4MCS_intPrepareVideoEncoder(): Normal encoding, set full config.");
-
-        /**
-        * Set encoder shell parameters according to MCS settings */
-        EncParams.Format = pC->EncodingVideoFormat;
-        EncParams.InputFormat = M4ENCODER_kIYUV420;
-        EncParams.videoProfile = pC->encodingVideoProfile;
-        EncParams.videoLevel= pC->encodingVideoLevel;
-
-        /**
-        * Video frame size */
-        EncParams.FrameWidth = pC->EncodingWidth;
-        EncParams.FrameHeight = pC->EncodingHeight;
-
-        /**
-        * Video bitrate has been previously computed */
-        EncParams.Bitrate = pC->uiEncVideoBitrate;
-
-        /**
-        * MCS use the "true" core internal bitrate regulation */
-        EncParams.bInternalRegulation = M4OSA_TRUE;
-
-        /**
-        * Other encoder settings */
-
-        EncParams.uiHorizontalSearchRange = 0;    /* use default */
-        EncParams.uiVerticalSearchRange = 0;      /* use default */
-        EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */
-        EncParams.uiIVopPeriod = 0;               /* use default */
-        EncParams.uiMotionEstimationTools =
-            0; /* M4V_MOTION_EST_TOOLS_ALL */
-        EncParams.bAcPrediction = M4OSA_TRUE;     /* use AC prediction */
-        EncParams.uiStartingQuantizerValue = 10;  /* initial QP = 10 */
-        EncParams.bDataPartitioning =
-            M4OSA_FALSE; /* no data partitioning */
-
-
-        /**
-        * Video encoder frame rate and rate factor */
-        EncParams.FrameRate = pC->EncodingVideoFramerate;
-        EncParams.uiTimeScale = pC->outputVideoTimescale;
-
-        switch( pC->EncodingVideoFramerate )
-        {
-            case M4ENCODER_k5_FPS:
-                dFrameRate = 5.0;
-                break;
-
-            case M4ENCODER_k7_5_FPS:
-                dFrameRate = 7.5;
-                break;
-
-            case M4ENCODER_k10_FPS:
-                dFrameRate = 10.0;
-                break;
-
-            case M4ENCODER_k12_5_FPS:
-                dFrameRate = 12.5;
-                break;
-
-            case M4ENCODER_k15_FPS:
-                dFrameRate = 15.0;
-                break;
-
-            case M4ENCODER_k20_FPS: /**< MPEG-4 only */
-                dFrameRate = 20.0;
-                break;
-
-            case M4ENCODER_k25_FPS: /**< MPEG-4 only */
-                dFrameRate = 25.0;
-                break;
-
-            case M4ENCODER_k30_FPS:
-                dFrameRate = 30.0;
-                break;
-
-            default:
-                M4OSA_TRACE1_1(
-                    "M4MCS_intPrepareVideoEncoder: unknown encoding video frame rate\
-                    (0x%x), returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE",
-                    pC->EncodingVideoFramerate);
-                return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE;
-        }
-
-        /**
-        * Compute the number of milliseconds between two frames */
-        if( M4ENCODER_kH263 == EncParams.Format )
-        {
-            pC->dCtsIncrement = 1001.0 / dFrameRate;
-        }
-        else /**< MPEG4 or H.264 */
-        {
-            pC->dCtsIncrement = 1000.0 / dFrameRate;
-        }
-    }
-
-    /**
-     * Limit the video bitrate according to encoder profile
-     * and level */
-    err = M4MCS_intLimitBitratePerCodecProfileLevel(&EncParams);
-    if (M4NO_ERROR != err) {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareVideoEncoder: limit bitrate returned err \
-             0x%x", err);
-        return err;
-    }
-
-    /**
-    * Create video encoder */
-    err = pC->pVideoEncoderGlobalFcts->pFctInit(&pC->pViEncCtxt,
-        pC->pWriterDataFcts, \
-        M4MCS_intApplyVPP, pC, pC->pCurrentVideoEncoderExternalAPI, \
-        pC->pCurrentVideoEncoderUserData);
-
-    /**< We put the MCS context in place of the VPP context */
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareVideoEncoder: EncoderInt->pFctInit returns 0x%x",
-            err);
-        return err;
-    }
-
-    pC->encoderState = M4MCS_kEncoderClosed;
-
-    if( M4OSA_TRUE == pC->bH264Trim )
-        //if((M4ENCODER_kNULL == pC->EncodingVideoFormat)
-        //    && (M4VIDEOEDITING_kH264 == pC->InputFileProperties.VideoStreamType))
-    {
-        EncParams1.InputFormat = EncParams.InputFormat;
-        //EncParams1.InputFrameWidth = EncParams.InputFrameWidth;
-        //EncParams1.InputFrameHeight = EncParams.InputFrameHeight;
-        EncParams1.FrameWidth = EncParams.FrameWidth;
-        EncParams1.FrameHeight = EncParams.FrameHeight;
-        EncParams1.videoProfile= EncParams.videoProfile;
-        EncParams1.videoLevel= EncParams.videoLevel;
-        EncParams1.Bitrate = EncParams.Bitrate;
-        EncParams1.FrameRate = EncParams.FrameRate;
-        EncParams1.Format = M4ENCODER_kH264; //EncParams.Format;
-        M4OSA_TRACE1_2("mcs encoder open profile :%d, level %d",
-            EncParams1.videoProfile, EncParams1.videoLevel);
-        err = pC->pVideoEncoderGlobalFcts->pFctOpen(pC->pViEncCtxt,
-            &pC->WriterVideoAU, &EncParams1);
-    }
-    else
-    {
-        M4OSA_TRACE1_2("mcs encoder open Adv profile :%d, level %d",
-            EncParams.videoProfile, EncParams.videoLevel);
-        err = pC->pVideoEncoderGlobalFcts->pFctOpen(pC->pViEncCtxt,
-            &pC->WriterVideoAU, &EncParams);
-    }
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareVideoEncoder: EncoderInt->pFctOpen returns 0x%x",
-            err);
-        return err;
-    }
-
-    pC->encoderState = M4MCS_kEncoderStopped;
-
-    if( M4OSA_NULL != pC->pVideoEncoderGlobalFcts->pFctStart )
-    {
-        err = pC->pVideoEncoderGlobalFcts->pFctStart(pC->pViEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intPrepareVideoEncoder: EncoderInt->pFctStart returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    pC->encoderState = M4MCS_kEncoderRunning;
-
-    /******************************/
-    /* Video resize management    */
-    /******************************/
-    /**
-    * Compare video input size and video output size to check if resize is needed */
-    if( ( (M4OSA_UInt32)EncParams.FrameWidth
-        != pC->pReaderVideoStream->m_videoWidth)
-        || ((M4OSA_UInt32)EncParams.FrameHeight
-        != pC->pReaderVideoStream->m_videoHeight) )
-    {
-        /**
-        * Allocate the intermediate video plane that will receive the decoded image before
-         resizing */
-        pC->pPreResizeFrame =
-            (M4VIFI_ImagePlane *)M4OSA_32bitAlignedMalloc(3 * sizeof(M4VIFI_ImagePlane),
-            M4MCS, (M4OSA_Char *)"m_pPreResizeFrame");
-
-        if( M4OSA_NULL == pC->pPreResizeFrame )
-        {
-            M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder():\
-                           unable to allocate m_pPreResizeFrame, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-
-        pC->pPreResizeFrame[0].pac_data = M4OSA_NULL;
-        pC->pPreResizeFrame[1].pac_data = M4OSA_NULL;
-        pC->pPreResizeFrame[2].pac_data = M4OSA_NULL;
-
-        /**
-        * Allocate the Y plane */
-        pC->pPreResizeFrame[0].u_topleft = 0;
-        pC->pPreResizeFrame[0].u_width = pC->pReaderVideoStream->
-            m_videoWidth; /**< input width */
-        pC->pPreResizeFrame[0].u_height = pC->pReaderVideoStream->
-            m_videoHeight; /**< input height */
-        pC->pPreResizeFrame[0].u_stride = pC->
-            pPreResizeFrame[0].u_width; /**< simple case: stride equals width */
-
-        pC->pPreResizeFrame[0].pac_data =
-            (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pC->pPreResizeFrame[0].u_stride \
-            *pC->pPreResizeFrame[0].u_height, M4MCS,
-            (M4OSA_Char *)"m_pPreResizeFrame[0].pac_data");
-
-        if( M4OSA_NULL == pC->pPreResizeFrame[0].pac_data )
-        {
-            M4OSA_TRACE1_0(
-                "M4MCS_intPrepareVideoEncoder():\
-                     unable to allocate m_pPreResizeFrame[0].pac_data, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-
-        /**
-        * Allocate the U plane */
-        pC->pPreResizeFrame[1].u_topleft = 0;
-        pC->pPreResizeFrame[1].u_width = pC->pPreResizeFrame[0].u_width
-            >> 1; /**< U width is half the Y width */
-        pC->pPreResizeFrame[1].u_height = pC->pPreResizeFrame[0].u_height
-            >> 1; /**< U height is half the Y height */
-        pC->pPreResizeFrame[1].u_stride = pC->
-            pPreResizeFrame[1].u_width; /**< simple case: stride equals width */
-
-        pC->pPreResizeFrame[1].pac_data =
-            (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pC->pPreResizeFrame[1].u_stride \
-            *pC->pPreResizeFrame[1].u_height, M4MCS,
-            (M4OSA_Char *)"m_pPreResizeFrame[1].pac_data");
-
-        if( M4OSA_NULL == pC->pPreResizeFrame[1].pac_data )
-        {
-            M4OSA_TRACE1_0(
-                "M4MCS_intPrepareVideoEncoder():\
-                 unable to allocate m_pPreResizeFrame[1].pac_data, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-
-        /**
-        * Allocate the V plane */
-        pC->pPreResizeFrame[2].u_topleft = 0;
-        pC->pPreResizeFrame[2].u_width = pC->
-            pPreResizeFrame[1].u_width; /**< V width equals U width */
-        pC->pPreResizeFrame[2].u_height = pC->
-            pPreResizeFrame[1].u_height; /**< V height equals U height */
-        pC->pPreResizeFrame[2].u_stride = pC->
-            pPreResizeFrame[2].u_width; /**< simple case: stride equals width */
-
-        pC->pPreResizeFrame[2].pac_data =
-            (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pC->pPreResizeFrame[2].u_stride \
-            *pC->pPreResizeFrame[2].u_height, M4MCS,
-            (M4OSA_Char *)"m_pPreResizeFrame[1].pac_data");
-
-        if( M4OSA_NULL == pC->pPreResizeFrame[2].pac_data )
-        {
-            M4OSA_TRACE1_0(
-                "M4MCS_intPrepareVideoEncoder():\
-                 unable to allocate m_pPreResizeFrame[2].pac_data, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intPrepareVideoEncoder(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intPrepareAudioProcessing(M4MCS_InternalContext* pC);
- * @brief    Prepare the AAC decoder, the SRC and the AMR-NB encoder and the MP3 encoder.
- * @param    pC          (IN) MCS private context
- * @return   M4NO_ERROR  No error
- * @return   Any error returned by an underlaying module
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intPrepareAudioProcessing( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err;
-
-    SSRC_ReturnStatus_en
-        ReturnStatus; /* Function return status                       */
-    LVM_INT16 NrSamplesMin =
-        0; /* Minimal number of samples on the input or on the output */
-    LVM_INT32 ScratchSize; /* The size of the scratch memory               */
-    LVM_INT16
-        *pInputInScratch; /* Pointer to input in the scratch buffer       */
-    LVM_INT16
-        *pOutputInScratch; /* Pointer to the output in the scratch buffer  */
-    SSRC_Params_t ssrcParams;          /* Memory for init parameters                    */
-
-#ifdef MCS_DUMP_PCM_TO_FILE
-
-    file_au_reader = fopen("mcs_ReaderOutput.raw", "wb");
-    file_pcm_decoder = fopen("mcs_DecoderOutput.pcm", "wb");
-    file_pcm_encoder = fopen("mcs_EncoderInput.pcm", "wb");
-
-#endif
-
-    if( pC->noaudio )
-        return M4NO_ERROR;
-
-    if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
-    {
-        M4OSA_TRACE3_0(
-            "M4MCS_intPrepareAudioProcessing(): Null encoding, do nothing.");
-        return M4NO_ERROR;
-    }
-
-    /* ________________________________ */
-    /*|                                |*/
-    /*| Create and "start" the decoder |*/
-    /*|________________________________|*/
-
-    if( M4OSA_NULL == pC->m_pAudioDecoder )
-    {
-        M4OSA_TRACE1_0(
-            "M4MCS_intPrepareAudioProcessing(): Fails to initiate the audio decoder.");
-        return M4MCS_ERR_AUDIO_CONVERSION_FAILED;
-    }
-
-    if( M4OSA_NULL == pC->pAudioDecCtxt )
-    {
-        err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(&pC->pAudioDecCtxt,
-            pC->pReaderAudioStream, pC->m_pCurrentAudioDecoderUserData);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intPrepareVideoDecoder: m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    if( M4VIDEOEDITING_kAMR_NB == pC->InputFileProperties.AudioStreamType ) {
-        /* AMR DECODER CONFIGURATION */
-
-        /* nothing specific to do */
-    }
-    else if( M4VIDEOEDITING_kEVRC == pC->InputFileProperties.AudioStreamType ) {
-        /* EVRC DECODER CONFIGURATION */
-
-        /* nothing specific to do */
-    }
-    else if( M4VIDEOEDITING_kMP3 == pC->InputFileProperties.AudioStreamType ) {
-        /* MP3 DECODER CONFIGURATION */
-
-        /* nothing specific to do */
-    }
-    else
-    {
-        /* AAC DECODER CONFIGURATION */
-        M4_AacDecoderConfig AacDecParam;
-
-        AacDecParam.m_AACDecoderProfile = AAC_kAAC;
-        AacDecParam.m_DownSamplingMode = AAC_kDS_OFF;
-
-        if( pC->AudioEncParams.Format == M4ENCODER_kAMRNB )
-        {
-            AacDecParam.m_OutputMode = AAC_kMono;
-        }
-        else
-        {
-            /* For this version, we encode only in AAC */
-            if( M4ENCODER_kMono == pC->AudioEncParams.ChannelNum )
-            {
-                AacDecParam.m_OutputMode = AAC_kMono;
-            }
-            else
-            {
-                AacDecParam.m_OutputMode = AAC_kStereo;
-            }
-        }
-
-        pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
-            M4AD_kOptionID_UserParam, (M4OSA_DataOption) &AacDecParam);
-    }
-
-    pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
-           M4AD_kOptionID_3gpReaderInterface, (M4OSA_DataOption) pC->m_pReaderDataIt);
-
-    pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
-           M4AD_kOptionID_AudioAU, (M4OSA_DataOption) &pC->ReaderAudioAU);
-
-    if( pC->m_pAudioDecoder->m_pFctStartAudioDec != M4OSA_NULL )
-    {
-        /* Not implemented in all decoders */
-        err = pC->m_pAudioDecoder->m_pFctStartAudioDec(pC->pAudioDecCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intPrepareVideoDecoder: m_pAudioDecoder->m_pFctStartAudioDec returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * Allocate output buffer for the audio decoder */
-    pC->InputFileProperties.uiDecodedPcmSize =
-        pC->pReaderAudioStream->m_byteFrameLength
-        * pC->pReaderAudioStream->m_byteSampleSize
-        * pC->pReaderAudioStream->m_nbChannels;
-
-    if( pC->InputFileProperties.uiDecodedPcmSize > 0 )
-    {
-        pC->AudioDecBufferOut.m_bufferSize =
-            pC->InputFileProperties.uiDecodedPcmSize;
-        pC->AudioDecBufferOut.m_dataAddress =
-            (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->AudioDecBufferOut.m_bufferSize \
-            *sizeof(short), M4MCS, (M4OSA_Char *)"AudioDecBufferOut.m_bufferSize");
-    }
-
-    if( M4OSA_NULL == pC->AudioDecBufferOut.m_dataAddress )
-    {
-        M4OSA_TRACE1_0(
-            "M4MCS_intPrepareVideoDecoder():\
-             unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-    /* _________________________ */
-    /*|                         |*/
-    /*| Set the SSRC parameters |*/
-    /*|_________________________|*/
-
-    switch( pC->pReaderAudioStream->m_samplingFrequency )
-    {
-        case 8000:
-            ssrcParams.SSRC_Fs_In = LVM_FS_8000;
-            break;
-
-        case 11025:
-            ssrcParams.SSRC_Fs_In = LVM_FS_11025;
-            break;
-
-        case 12000:
-            ssrcParams.SSRC_Fs_In = LVM_FS_12000;
-            break;
-
-        case 16000:
-            ssrcParams.SSRC_Fs_In = LVM_FS_16000;
-            break;
-
-        case 22050:
-            ssrcParams.SSRC_Fs_In = LVM_FS_22050;
-            break;
-
-        case 24000:
-            ssrcParams.SSRC_Fs_In = LVM_FS_24000;
-            break;
-
-        case 32000:
-            ssrcParams.SSRC_Fs_In = LVM_FS_32000;
-            break;
-
-        case 44100:
-            ssrcParams.SSRC_Fs_In = LVM_FS_44100;
-            break;
-
-        case 48000:
-            ssrcParams.SSRC_Fs_In = LVM_FS_48000;
-            break;
-
-        default:
-            M4OSA_TRACE1_1(
-                "M4MCS_intPrepareVideoDecoder: invalid input AAC sampling frequency (%d Hz),\
-                 returning M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY",
-                pC->pReaderAudioStream->m_samplingFrequency);
-            return M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY;
-    }
-
-    if( 1 == pC->pReaderAudioStream->m_nbChannels )
-    {
-        ssrcParams.SSRC_NrOfChannels = LVM_MONO;
-    }
-    else
-    {
-        ssrcParams.SSRC_NrOfChannels = LVM_STEREO;
-    }
-
-    /*FlB 26.02.2009: add mp3 as output format*/
-    if( pC->AudioEncParams.Format == M4ENCODER_kAAC
-        || pC->AudioEncParams.Format == M4ENCODER_kMP3 )
-    {
-        switch( pC->AudioEncParams.Frequency )
-        {
-            case M4ENCODER_k8000Hz:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_8000;
-                break;
-
-            case M4ENCODER_k11025Hz:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_11025;
-                break;
-
-            case M4ENCODER_k12000Hz:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_12000;
-                break;
-
-            case M4ENCODER_k16000Hz:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_16000;
-                break;
-
-            case M4ENCODER_k22050Hz:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_22050;
-                break;
-
-            case M4ENCODER_k24000Hz:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_24000;
-                break;
-
-            case M4ENCODER_k32000Hz:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_32000;
-                break;
-
-            case M4ENCODER_k44100Hz:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_44100;
-                break;
-
-            case M4ENCODER_k48000Hz:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_48000;
-                break;
-
-            default:
-                M4OSA_TRACE1_1(
-                    "M4MCS_intPrepareAudioProcessing: invalid output AAC sampling frequency \
-                    (%d Hz), returning M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY",
-                    pC->AudioEncParams.Frequency);
-                return M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY;
-                break;
-        }
-    }
-    else
-    {
-        ssrcParams.SSRC_Fs_Out = LVM_FS_8000;
-    }
-
-
-
-    ReturnStatus = 0;
-
-    switch( ssrcParams.SSRC_Fs_In )
-    {
-        case LVM_FS_8000:
-            ssrcParams.NrSamplesIn = 320;
-            break;
-
-        case LVM_FS_11025:
-            ssrcParams.NrSamplesIn = 441;
-            break;
-
-        case LVM_FS_12000:
-            ssrcParams.NrSamplesIn = 480;
-            break;
-
-        case LVM_FS_16000:
-            ssrcParams.NrSamplesIn = 640;
-            break;
-
-        case LVM_FS_22050:
-            ssrcParams.NrSamplesIn = 882;
-            break;
-
-        case LVM_FS_24000:
-            ssrcParams.NrSamplesIn = 960;
-            break;
-
-        case LVM_FS_32000:
-            ssrcParams.NrSamplesIn = 1280;
-            break;
-
-        case LVM_FS_44100:
-            ssrcParams.NrSamplesIn = 1764;
-            break;
-
-        case LVM_FS_48000:
-            ssrcParams.NrSamplesIn = 1920;
-            break;
-
-        default:
-            ReturnStatus = -1;
-            break;
-    }
-
-    switch( ssrcParams.SSRC_Fs_Out )
-    {
-        case LVM_FS_8000:
-            ssrcParams.NrSamplesOut = 320;
-            break;
-
-        case LVM_FS_11025:
-            ssrcParams.NrSamplesOut = 441;
-            break;
-
-        case LVM_FS_12000:
-            ssrcParams.NrSamplesOut = 480;
-            break;
-
-        case LVM_FS_16000:
-            ssrcParams.NrSamplesOut = 640;
-            break;
-
-        case LVM_FS_22050:
-            ssrcParams.NrSamplesOut = 882;
-            break;
-
-        case LVM_FS_24000:
-            ssrcParams.NrSamplesOut = 960;
-            break;
-
-        case LVM_FS_32000:
-            ssrcParams.NrSamplesOut = 1280;
-            break;
-
-        case LVM_FS_44100:
-            ssrcParams.NrSamplesOut = 1764;
-            break;
-
-        case LVM_FS_48000:
-            ssrcParams.NrSamplesOut = 1920;
-            break;
-
-        default:
-            ReturnStatus = -1;
-            break;
-    }
-
-
-
-    if( ReturnStatus != SSRC_OK )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareAudioProcessing:\
-             Error code %d returned by the SSRC_GetNrSamples function",
-            ReturnStatus);
-        return M4MCS_ERR_AUDIO_CONVERSION_FAILED;
-    }
-
-    NrSamplesMin =
-        (LVM_INT16)((ssrcParams.NrSamplesIn > ssrcParams.NrSamplesOut)
-        ? ssrcParams.NrSamplesOut : ssrcParams.NrSamplesIn);
-
-    while( NrSamplesMin < M4MCS_SSRC_MINBLOCKSIZE )
-    { /* Don't take blocks smaller that the minimal block size */
-        ssrcParams.NrSamplesIn = (LVM_INT16)(ssrcParams.NrSamplesIn << 1);
-        ssrcParams.NrSamplesOut = (LVM_INT16)(ssrcParams.NrSamplesOut << 1);
-        NrSamplesMin = (LVM_INT16)(NrSamplesMin << 1);
-    }
-
-
-    pC->iSsrcNbSamplIn = (LVM_INT16)(
-        ssrcParams.
-        NrSamplesIn); /* multiplication by NrOfChannels is done below */
-    pC->iSsrcNbSamplOut = (LVM_INT16)(ssrcParams.NrSamplesOut);
-
-    /**
-    * Allocate buffer for the input of the SSRC */
-    pC->pSsrcBufferIn =
-        (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->iSsrcNbSamplIn * sizeof(short) \
-        *pC->pReaderAudioStream->m_nbChannels, M4MCS,
-        (M4OSA_Char *)"pSsrcBufferIn");
-
-    if( M4OSA_NULL == pC->pSsrcBufferIn )
-    {
-        M4OSA_TRACE1_0(
-            "M4MCS_intPrepareVideoDecoder():\
-             unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-    pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
-
-    /**
-    * Allocate buffer for the output of the SSRC */
-    pC->pSsrcBufferOut =
-        (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->iSsrcNbSamplOut * sizeof(short) \
-        *pC->pReaderAudioStream->m_nbChannels, M4MCS,
-        (M4OSA_Char *)"pSsrcBufferOut");
-
-    if( M4OSA_NULL == pC->pSsrcBufferOut )
-    {
-        M4OSA_TRACE1_0(
-            "M4MCS_intPrepareVideoDecoder():\
-             unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-
-    pC->pLVAudioResampler = LVAudioResamplerCreate(
-        16, /*gInputParams.lvBTChannelCount*/
-        (M4OSA_Int16)pC->InputFileProperties.uiNbChannels/*ssrcParams.SSRC_NrOfChannels*/,
-        (M4OSA_Int32)(pC->AudioEncParams.Frequency)/*ssrcParams.SSRC_Fs_Out*/, 1);
-
-     if( M4OSA_NULL == pC->pLVAudioResampler)
-     {
-         return M4ERR_ALLOC;
-     }
-
-    LVAudiosetSampleRate(pC->pLVAudioResampler,
-        /*gInputParams.lvInSampleRate*/
-        /*pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency*/
-        pC->InputFileProperties.uiSamplingFrequency/*ssrcParams.SSRC_Fs_In*/);
-
-    LVAudiosetVolume(pC->pLVAudioResampler, (M4OSA_Int16)(0x1000 /* 0x7fff */),
-        (M4OSA_Int16)(0x1000/*0x7fff*/));
-
-
-    /* ________________________ */
-    /*|                        |*/
-    /*| Init the audio encoder |*/
-    /*|________________________|*/
-
-    /* Initialise the audio encoder */
-
-    err = pC->pAudioEncoderGlobalFcts->pFctInit(&pC->pAudioEncCtxt,
-        pC->pCurrentAudioEncoderUserData);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareAudioProcessing: pAudioEncoderGlobalFcts->pFctInit returns 0x%x",
-            err);
-        return err;
-    }
-
-    /* Open the audio encoder */
-    err = pC->pAudioEncoderGlobalFcts->pFctOpen(pC->pAudioEncCtxt,
-        &pC->AudioEncParams, &pC->pAudioEncDSI,
-        M4OSA_NULL /* no grabbing */);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareAudioProcessing: pAudioEncoderGlobalFcts->pFctOpen returns 0x%x",
-            err);
-        return err;
-    }
-
-    /* Allocate the input buffer for the audio encoder */
-    switch( pC->AudioEncParams.Format )
-    {
-        case M4ENCODER_kAMRNB:
-            pC->audioEncoderGranularity = M4MCS_PCM_AMR_GRANULARITY_SAMPLES;
-            break;
-
-        case M4ENCODER_kAAC:
-            pC->audioEncoderGranularity = M4MCS_PCM_AAC_GRANULARITY_SAMPLES;
-            break;
-
-            /*FlB 26.02.2009: add mp3 as output format*/
-        case M4ENCODER_kMP3:
-            pC->audioEncoderGranularity = M4MCS_PCM_MP3_GRANULARITY_SAMPLES;
-            break;
-
-         default:
-         break;
-    }
-
-    if( M4ENCODER_kMono == pC->AudioEncParams.ChannelNum )
-        pC->audioEncoderGranularity *= sizeof(short);
-    else
-        pC->audioEncoderGranularity *= sizeof(short) * 2;
-
-    pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
-    pC->pAudioEncoderBuffer =
-        (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->audioEncoderGranularity, M4MCS,
-        (M4OSA_Char *)"pC->pAudioEncoderBuffer");
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intPrepareAudioProcessing(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intPrepareWriter(M4MCS_InternalContext* pC);
- * @brief    Prepare the writer.
- * @param    pC          (IN) MCS private context
- * @return   M4NO_ERROR  No error
- * @return   Any error returned by an underlaying module
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intPrepareWriter( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err;
-    M4OSA_UInt32 uiVersion; /**< To write component version in 3gp writer */
-    M4OSA_MemAddr8 pDSI = M4OSA_NULL; /**< To create the Decoder Specific Info */
-    M4SYS_StreamIDValue optionValue; /**< For the setoption calls */
-    M4OSA_UInt32 TargetedFileSize;
-    M4OSA_Bool bMULPPSSPS = M4OSA_FALSE;
-
-    /**
-    * Init the writer */
-    err = pC->pWriterGlobalFcts->pFctOpen(&pC->pWriterContext, pC->pOutputFile,
-        pC->pOsaFileWritPtr, pC->pTemporaryFile, pC->pOsaFileReadPtr);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctOpen returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Link to the writer context in the writer interface */
-    pC->pWriterDataFcts->pWriterContext = pC->pWriterContext;
-
-    /**
-    * Set the product description string in the written file */
-    err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
-        M4WRITER_kEmbeddedString, (M4OSA_DataOption)"NXP-SW : MCS    ");
-
-    if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
-        != err) ) /* this option may not be implemented by some writers */
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareWriter:\
-             pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Set the product version in the written file */
-    uiVersion =
-        M4VIDEOEDITING_VERSION_MAJOR * 100 + M4VIDEOEDITING_VERSION_MINOR * 10
-        + M4VIDEOEDITING_VERSION_REVISION;
-    err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
-        M4WRITER_kEmbeddedVersion, (M4OSA_DataOption) &uiVersion);
-
-    if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
-        != err) ) /* this option may not be implemented by some writers */
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareWriter: \
-            pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * If there is a video input, allocate and fill the video stream structures for the writer */
-    if( pC->novideo == M4OSA_FALSE )
-    {
-        /**
-        * Fill Video properties structure for the AddStream method */
-        pC->WriterVideoStreamInfo.height = pC->EncodingHeight;
-        pC->WriterVideoStreamInfo.width = pC->EncodingWidth;
-        pC->WriterVideoStreamInfo.fps =
-            0; /**< Not used by the shell/core writer */
-        pC->WriterVideoStreamInfo.Header.pBuf =
-            M4OSA_NULL; /**< Will be updated later */
-        pC->WriterVideoStreamInfo.Header.Size = 0; /**< Will be updated later */
-
-        /**
-        * Fill Video stream description structure for the AddStream method */
-        switch( pC->EncodingVideoFormat )
-        {
-            case M4ENCODER_kMPEG4:
-                pC->WriterVideoStream.streamType = M4SYS_kMPEG_4;
-                break;
-
-            case M4ENCODER_kH263:
-                pC->WriterVideoStream.streamType = M4SYS_kH263;
-                break;
-
-            case M4ENCODER_kH264:
-                pC->WriterVideoStream.streamType = M4SYS_kH264;
-                break;
-
-            case M4ENCODER_kNULL:
-                switch( pC->InputFileProperties.VideoStreamType )
-                {
-                    case M4VIDEOEDITING_kMPEG4:
-                        pC->WriterVideoStream.streamType = M4SYS_kMPEG_4;
-                        break;
-
-                    case M4VIDEOEDITING_kH263:
-                        pC->WriterVideoStream.streamType = M4SYS_kH263;
-                        break;
-
-                    case M4VIDEOEDITING_kH264:
-                        pC->WriterVideoStream.streamType = M4SYS_kH264;
-                        break;
-
-                    default:
-                        M4OSA_TRACE1_1(
-                            "M4MCS_intPrepareWriter: case input=M4ENCODER_kNULL, \
-                            unknown format (0x%x),\
-                             returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT",
-                            pC->EncodingVideoFormat);
-                        return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT;
-                }
-                break;
-
-            default: /**< It should never happen, already tested */
-                M4OSA_TRACE1_1(
-                    "M4MCS_intPrepareWriter: unknown format (0x%x),\
-                     returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT",
-                    pC->EncodingVideoFormat);
-                return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT;
-        }
-
-        /**
-        * Video bitrate value will be the real value */
-        pC->WriterVideoStream.averageBitrate =
-            (M4OSA_Int32)pC->uiEncVideoBitrate;
-        pC->WriterVideoStream.maxBitrate = (M4OSA_Int32)pC->uiEncVideoBitrate;
-
-        /**
-        * most other parameters are "dummy" */
-        pC->WriterVideoStream.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
-        pC->WriterVideoStream.timeScale =
-            0; /**< Not used by the shell/core writer */
-        pC->WriterVideoStream.profileLevel =
-            0; /**< Not used by the shell/core writer */
-        pC->WriterVideoStream.duration =
-            0; /**< Not used by the shell/core writer */
-        pC->WriterVideoStream.decoderSpecificInfoSize =
-            sizeof(M4WRITER_StreamVideoInfos);
-        pC->WriterVideoStream.decoderSpecificInfo =
-            (M4OSA_MemAddr32) &(pC->WriterVideoStreamInfo);
-
-        /**
-        * Update Encoder Header properties for Video stream if needed */
-        if( M4ENCODER_kH263 == pC->EncodingVideoFormat )
-        {
-            /**
-            * Creates the H263 DSI */
-            pC->WriterVideoStreamInfo.Header.Size =
-                7; /**< H263 output DSI is always 7 bytes */
-            pDSI = (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(7, M4MCS, (M4OSA_Char
-                *)"pC->WriterVideoStreamInfo.Header.pBuf (DSI H263)");
-
-            if( M4OSA_NULL == pDSI )
-            {
-                M4OSA_TRACE1_0("M4MCS_intPrepareWriter(): unable to allocate pDSI (H263),\
-                               returning M4ERR_ALLOC");
-                return M4ERR_ALLOC;
-            }
-
-            /**
-            * Vendor is NXP Software: N, X, P, S. */
-            pDSI[0] = 'N';
-            pDSI[1] = 'X';
-            pDSI[2] = 'P';
-            pDSI[3] = 'S';
-
-            /**
-            * Decoder version is 0 */
-            pDSI[4] = 0;
-
-            /**
-            * Level is the sixth byte of the DSI. */
-            switch( pC->EncodingWidth )
-            {
-                case M4ENCODER_SQCIF_Width:
-                case M4ENCODER_QCIF_Width:
-                    if( ( pC->uiEncVideoBitrate <= M4ENCODER_k64_KBPS)
-                        && (pC->EncodingVideoFramerate <= M4ENCODER_k15_FPS) )
-                    {
-                        pDSI[5] = 10;
-                    }
-                    else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k128_KBPS)
-                        && (pC->EncodingVideoFramerate <= M4ENCODER_k15_FPS) )
-                    {
-                        pDSI[5] = 45;
-                    }
-                    else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k128_KBPS)
-                        && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
-                    {
-                        pDSI[5] = 20;
-                    }
-                    else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k384_KBPS)
-                        && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
-                    {
-                        pDSI[5] = 30;
-                    }
-                    else if( ( pC->uiEncVideoBitrate
-                        <= M4ENCODER_k800_KBPS/*2048*/)
-                        && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
-                    {
-                        pDSI[5] = 40;
-                    }
-                    break;
-
-                case M4ENCODER_CIF_Width:
-                    if( ( pC->uiEncVideoBitrate <= M4ENCODER_k128_KBPS)
-                        && (pC->EncodingVideoFramerate <= M4ENCODER_k15_FPS) )
-                    {
-                        pDSI[5] = 20;
-                    }
-                    else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k384_KBPS)
-                        && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
-                    {
-                        pDSI[5] = 30;
-                    }
-                    else if( ( pC->uiEncVideoBitrate
-                        <= M4ENCODER_k800_KBPS/*2048*/)
-                        && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
-                    {
-                        pDSI[5] = 40;
-                    }
-                    break;
-
-                    default:
-                    break;
-            }
-
-            /**
-            * Profile is the seventh byte of the DSI. */
-            pDSI[6] = 0;
-
-            pC->WriterVideoStreamInfo.Header.pBuf = pDSI;
-        }
-        else if( M4ENCODER_kNULL == pC->EncodingVideoFormat )
-        {
-            /* If we copy the stream from the input, we copy its DSI */
-
-            pC->WriterVideoStreamInfo.Header.Size = pC->pReaderVideoStream->
-                m_basicProperties.m_decoderSpecificInfoSize;
-            pC->WriterVideoStreamInfo.Header.pBuf =
-                (M4OSA_MemAddr8)pC->pReaderVideoStream->
-                m_basicProperties.m_pDecoderSpecificInfo;
-
-        }
-        /* otherwise (MPEG4), the DSI will be recovered from the encoder later on. */
-
-        /*+CRLV6775 - H.264 Trimming  */
-        if( pC->bH264Trim == M4OSA_TRUE )
-        {
-            bMULPPSSPS = M4OSA_TRUE;
-            err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
-                (M4OSA_UInt32)M4WRITER_kMUL_PPS_SPS,
-                (M4OSA_DataOption) &bMULPPSSPS);
-
-            if( ( M4NO_ERROR != err)
-                && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
-                != err) ) /* this option may not be implemented by some writers */
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_intPrepareWriter:\
-                     pWriterGlobalFcts->pFctSetOption(M4WRITER_kMUL_PPS_SPS) returns 0x%x",
-                    err);
-                return err;
-            }
-        }
-        /*-CRLV6775 - H.264 Trimming  */
-        /**
-        * Add the video stream */
-        err = pC->pWriterGlobalFcts->pFctAddStream(pC->pWriterContext,
-            &pC->WriterVideoStream);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctAddStream(video) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Update AU properties for video stream */
-        pC->WriterVideoAU.stream = &(pC->WriterVideoStream);
-        pC->WriterVideoAU.dataAddress = M4OSA_NULL;
-        pC->WriterVideoAU.size = 0;
-        pC->WriterVideoAU.CTS = 0; /** Reset time */
-        pC->WriterVideoAU.DTS = 0;
-        pC->WriterVideoAU.attribute = AU_RAP;
-        pC->WriterVideoAU.nbFrag = 0; /** No fragment */
-        pC->WriterVideoAU.frag = M4OSA_NULL;
-
-        /**
-        * Set the writer max video AU size */
-        optionValue.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
-        optionValue.value = pC->uiVideoMaxAuSize;
-        err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
-            (M4OSA_UInt32)M4WRITER_kMaxAUSize,
-            (M4OSA_DataOption) &optionValue);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intPrepareWriter: \
-                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Set the writer max video chunk size */
-        optionValue.value = pC->uiVideoMaxChunckSize;
-        err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
-            (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
-            (M4OSA_DataOption) &optionValue);
-
-        if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
-            != err) ) /* this option may not be implemented by some writers */
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intPrepareWriter:\
-                 pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * If there is an audio input, allocate and fill the audio stream structures for the writer */
-    if( pC->noaudio == M4OSA_FALSE )
-    {
-        M4WRITER_StreamAudioInfos streamAudioInfo;
-
-        streamAudioInfo.nbSamplesPerSec = 0; /**< unused by our shell writer */
-        streamAudioInfo.nbBitsPerSample = 0; /**< unused by our shell writer */
-        streamAudioInfo.nbChannels = 1;      /**< unused by our shell writer */
-
-        pC->WriterAudioStream.averageBitrate =
-            0; /**< It is not used by the shell, the DSI is taken into account instead */
-        pC->WriterAudioStream.maxBitrate =
-            0; /**< Not used by the shell/core writer */
-
-        /**
-        * Fill Audio stream description structure for the AddStream method */
-        switch( pC->AudioEncParams.Format )
-        {
-            case M4ENCODER_kAMRNB:
-                pC->WriterAudioStream.streamType = M4SYS_kAMR;
-                break;
-
-            case M4ENCODER_kAAC:
-                pC->WriterAudioStream.streamType = M4SYS_kAAC;
-                pC->WriterAudioStream.averageBitrate =
-                    pC->AudioEncParams.Bitrate;
-                pC->WriterAudioStream.maxBitrate = pC->AudioEncParams.Bitrate;
-                break;
-
-                /*FlB 26.02.2009: add mp3 as output format*/
-            case M4ENCODER_kMP3:
-                pC->WriterAudioStream.streamType = M4SYS_kMP3;
-                break;
-
-            case M4ENCODER_kAudioNULL:
-                switch( pC->InputFileProperties.AudioStreamType )
-                {
-                case M4VIDEOEDITING_kAMR_NB:
-                    pC->WriterAudioStream.streamType = M4SYS_kAMR;
-                    break;
-                    /*FlB 26.02.2009: add mp3 as output format*/
-                case M4VIDEOEDITING_kMP3:
-                    pC->WriterAudioStream.streamType = M4SYS_kMP3;
-                    break;
-
-                case M4VIDEOEDITING_kAAC:
-                case M4VIDEOEDITING_kAACplus:
-                case M4VIDEOEDITING_keAACplus:
-                    pC->WriterAudioStream.streamType = M4SYS_kAAC;
-                    pC->WriterAudioStream.averageBitrate =
-                        pC->AudioEncParams.Bitrate;
-                    pC->WriterAudioStream.maxBitrate =
-                        pC->AudioEncParams.Bitrate;
-                    break;
-
-                case M4VIDEOEDITING_kEVRC:
-                    pC->WriterAudioStream.streamType = M4SYS_kEVRC;
-                    break;
-
-                case M4VIDEOEDITING_kNoneAudio:
-                case M4VIDEOEDITING_kPCM:
-                case M4VIDEOEDITING_kNullAudio:
-                case M4VIDEOEDITING_kUnsupportedAudio:
-                    break;
-                }
-                break;
-
-            default: /**< It should never happen, already tested */
-                M4OSA_TRACE1_1(
-                    "M4MCS_intPrepareWriter: \
-                    unknown format (0x%x), returning M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
-                    pC->AudioEncParams.Format);
-                return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
-        }
-
-        /**
-        * MCS produces only AMR-NB output */
-        pC->WriterAudioStream.streamID = M4MCS_WRITER_AUDIO_STREAM_ID;
-        pC->WriterAudioStream.duration =
-            0; /**< Not used by the shell/core writer */
-        pC->WriterAudioStream.profileLevel =
-            0; /**< Not used by the shell/core writer */
-        pC->WriterAudioStream.timeScale = pC->AudioEncParams.Frequency;
-
-        if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
-        {
-            /* If we copy the stream from the input, we copy its DSI */
-            streamAudioInfo.Header.Size = pC->pReaderAudioStream->
-                m_basicProperties.m_decoderSpecificInfoSize;
-            streamAudioInfo.Header.pBuf =
-                (M4OSA_MemAddr8)pC->pReaderAudioStream->
-                m_basicProperties.m_pDecoderSpecificInfo;
-        }
-        else
-        {
-            if( pC->pAudioEncDSI.pInfo != M4OSA_NULL )
-            {
-                /* Use the DSI given by the encoder open() */
-                streamAudioInfo.Header.Size = pC->pAudioEncDSI.infoSize;
-                streamAudioInfo.Header.pBuf = pC->pAudioEncDSI.pInfo;
-            }
-            else
-            {
-                /* Writer will put a default Philips DSI */
-                streamAudioInfo.Header.Size = 0;
-                streamAudioInfo.Header.pBuf = M4OSA_NULL;
-            }
-        }
-
-        /**
-        * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos
-         in the DSI pointer... */
-        pC->WriterAudioStream.decoderSpecificInfo =
-            (M4OSA_MemAddr32) &streamAudioInfo;
-
-        /**
-        * Add the audio stream to the writer */
-        err = pC->pWriterGlobalFcts->pFctAddStream(pC->pWriterContext,
-            &pC->WriterAudioStream);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctAddStream(audio) returns 0x%x",
-                err);
-            return err;
-        }
-
-        /**
-        * Link the AU and the stream */
-        pC->WriterAudioAU.stream = &(pC->WriterAudioStream);
-        pC->WriterAudioAU.dataAddress = M4OSA_NULL;
-        pC->WriterAudioAU.size = 0;
-        pC->WriterAudioAU.CTS = 0; /** Reset time */
-        pC->WriterAudioAU.DTS = 0;
-        pC->WriterAudioAU.attribute = 0;
-        pC->WriterAudioAU.nbFrag = 0; /** No fragment */
-        pC->WriterAudioAU.frag = M4OSA_NULL;
-
-        /**
-        * Set the writer audio max AU size */
-        /* As max bitrate is now 320kbps instead of 128kbps, max AU
-         * size has to be increased adapt the max AU size according to the stream type and the
-         * channels numbers*/
-        /* After tests, a margin of 3 is taken (2 was not enough and raises to memory overwrite)
-         */
-        //pC->uiAudioMaxAuSize = M4MCS_AUDIO_MAX_AU_SIZE;
-        switch( pC->WriterAudioStream.streamType )
-        {
-            case M4SYS_kAMR:
-                pC->uiAudioMaxAuSize = M4MCS_PCM_AMR_GRANULARITY_SAMPLES
-                    * (( pC->InputFileProperties.uiNbChannels
-                    * sizeof(short)) + 3);
-                break;
-
-            case M4SYS_kMP3:
-                pC->uiAudioMaxAuSize = M4MCS_PCM_MP3_GRANULARITY_SAMPLES
-                    * (( pC->InputFileProperties.uiNbChannels
-                    * sizeof(short)) + 3);
-                break;
-
-            case M4SYS_kAAC:
-                pC->uiAudioMaxAuSize = M4MCS_PCM_AAC_GRANULARITY_SAMPLES
-                    * (( pC->InputFileProperties.uiNbChannels
-                    * sizeof(short)) + 3);
-                break;
-                /*case M4SYS_kEVRC:
-                pC->uiAudioMaxAuSize = M4MCS_PCM_EVRC_GRANULARITY_SAMPLES*
-                ((pC->InputFileProperties.uiNbChannels * sizeof(short))+3);
-                break;*/
-            default: /**< It should never happen, already tested */
-                M4OSA_TRACE1_1(
-                    "M4MCS_intPrepareWriter: unknown format (0x%x),\
-                     returning M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
-                    pC->WriterAudioStream.streamType);
-                return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
-        }
-
-        optionValue.streamID = M4MCS_WRITER_AUDIO_STREAM_ID;
-        optionValue.value = pC->uiAudioMaxAuSize;
-        err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
-            (M4OSA_UInt32)M4WRITER_kMaxAUSize,
-            (M4OSA_DataOption) &optionValue);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctSetOption(audio,\
-                M4WRITER_kMaxAUSize) returns 0x%x",
-                err);
-            return err;
-        }
-
-        optionValue.value = M4MCS_AUDIO_MAX_CHUNK_SIZE;
-        err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
-            (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
-            (M4OSA_DataOption) &optionValue);
-
-        if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
-            != err) ) /* this option may not be implemented by some writers */
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctSetOption(audio,\
-                M4WRITER_kMaxChunckSize) returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    /*
-    * Set the limitation size of the writer */
-    TargetedFileSize = pC->uiMaxFileSize;
-    /* add 1 kB margin */
-    if( TargetedFileSize > 8192 )
-        TargetedFileSize -= 1024;
-
-    err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
-        (M4OSA_UInt32)M4WRITER_kMaxFileSize,
-        (M4OSA_DataOption) &TargetedFileSize);
-
-    if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
-        != err) ) /* this option may not be implemented by some writers */
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctSetOption\
-            (M4WRITER_kMaxFileSize) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    * Close the stream registering in order to be ready to write data */
-    err = pC->pWriterGlobalFcts->pFctStartWriting(pC->pWriterContext);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctStartWriting returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intPrepareWriter(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intPrepareAudioBeginCut(M4MCS_InternalContext* pC);
- * @brief    DO the audio begin cut.
- * @param    pC          (IN) MCS private context
- * @return   M4NO_ERROR  No error
- * @return   Any error returned by an underlaying module
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intPrepareAudioBeginCut( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err;
-    M4OSA_Int32 iCts;
-    M4OSA_UInt32 uiFrameSize;
-
-    if( pC->noaudio )
-        return M4NO_ERROR;
-
-    /**
-    * Check if an audio begin cut is needed */
-    if( ( M4OSA_NULL == pC->pReaderAudioStream) || (0 == pC->uiBeginCutTime) )
-    {
-        /**
-        * Return with no error */
-        M4OSA_TRACE3_0(
-            "M4MCS_intPrepareAudioBeginCut(): returning M4NO_ERROR (a)");
-        return M4NO_ERROR;
-    }
-
-    /**
-    * Jump at the begin cut time */
-    iCts = pC->uiBeginCutTime;
-    err = pC->m_pReader->m_pFctJump(pC->pReaderContext,
-        (M4_StreamHandler *)pC->pReaderAudioStream, &iCts);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intPrepareAudioBeginCut: m_pFctJump(Audio) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    * Remember audio begin cut offset */
-    pC->iAudioCtsOffset = iCts;
-
-    /**
-    * AMR-NB & EVRC: there may be many frames per AU.
-    * In that case we need to slice the first AU to keep the 20 ms cut precision */
-    if( ( M4DA_StreamTypeAudioAmrNarrowBand
-        == pC->pReaderAudioStream->m_basicProperties.m_streamType)
-        || (M4DA_StreamTypeAudioEvrc
-        == pC->pReaderAudioStream->m_basicProperties.m_streamType) )
-    {
-        /**
-        * If the next frame CTS is lower than the begin cut time,
-        * we must read the AU and parse its frames to reach the
-        * nearest to the begin cut */
-        if( ( iCts + 20) < (M4OSA_Int32)pC->uiBeginCutTime )
-        {
-            /**
-            * Read the first audio AU after the jump */
-            err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
-                (M4_StreamHandler *)pC->pReaderAudioStream,
-                &pC->ReaderAudioAU);
-
-            if( M4WAR_NO_MORE_AU == err )
-            {
-                M4OSA_TRACE1_0(
-                    "M4MCS_intPrepareAudioBeginCut(): m_pReaderDataIt->m_pFctGetNextAu(audio)\
-                     returns M4WAR_NO_MORE_AU! Returning M4NO_ERROR");
-                return
-                    M4NO_ERROR; /**< no fatal error here, we should be able to pursue */
-            }
-            else if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_intPrepareAudioBeginCut(): m_pReaderDataIt->m_pFctGetNextAu(Audio)\
-                     returns 0x%x",
-                    err);
-                return err;
-            }
-
-            /**
-            * While the next AU has a lower CTS than the begin cut time, we advance to
-            the next frame */
-            while( ( iCts + 20) <= (M4OSA_Int32)pC->uiBeginCutTime )
-            {
-                /**
-                * Get the size of the frame */
-                switch( pC->pReaderAudioStream->m_basicProperties.m_streamType )
-                {
-                    case M4DA_StreamTypeAudioAmrNarrowBand:
-                        uiFrameSize = M4MCS_intGetFrameSize_AMRNB(
-                            pC->ReaderAudioAU.m_dataAddress);
-                        break;
-
-                    case M4DA_StreamTypeAudioEvrc:
-                        uiFrameSize = M4MCS_intGetFrameSize_EVRC(
-                            pC->ReaderAudioAU.m_dataAddress);
-                        break;
-
-                    default:
-                        uiFrameSize = 0;
-                        break;
-                }
-
-                if( 0 == uiFrameSize )
-                {
-                    /**
-                    * Corrupted frame! We get out of this mess!
-                    * We don't want to crash here... */
-                    M4OSA_TRACE1_0(
-                        "M4MCS_intPrepareAudioBeginCut(): \
-                        M4MCS_intGetFrameSize_xxx returns 0! Returning M4NO_ERROR");
-                    return
-                        M4NO_ERROR; /**< no fatal error here, we should be able to pursue */
-                }
-
-                /**
-                * Go to the next frame */
-                pC->ReaderAudioAU.m_dataAddress += uiFrameSize;
-                pC->ReaderAudioAU.m_size -= uiFrameSize;
-
-                /**
-                * Get the CTS of the next frame */
-                iCts += 20; /**< AMR, EVRC frame duration is always 20 ms */
-                pC->ReaderAudioAU.m_CTS = iCts;
-                pC->ReaderAudioAU.m_DTS = iCts;
-            }
-
-            /**
-            * Update the audio begin cut offset */
-            pC->iAudioCtsOffset = iCts;
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intPrepareAudioBeginCut(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intStepEncoding(M4MCS_InternalContext* pC, M4OSA_UInt8* pProgress)
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intStepEncoding( M4MCS_InternalContext *pC,
-                                       M4OSA_UInt8 *pProgress )
-{
-    M4OSA_ERR err;
-    M4OSA_UInt32 uiAudioStepCount = 0;
-
-    /* ---------- VIDEO TRANSCODING ---------- */
-
-    if( ( pC->novideo == M4OSA_FALSE) && (M4MCS_kStreamState_STARTED
-        == pC->VideoState) ) /**< If the video encoding is going on */
-    {
-        if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
-        {
-            err = M4MCS_intVideoNullEncoding(pC);
-        }
-        else
-        {
-            err = M4MCS_intVideoTranscoding(pC);
-        }
-
-        /**
-        * No more space, quit properly */
-        if( M4WAR_WRITER_STOP_REQ == err )
-        {
-            *pProgress = (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->dViDecCurrentCts
-                - pC->uiBeginCutTime) * 100)
-                / (pC->uiEndCutTime - pC->uiBeginCutTime));
-
-            pC->State = M4MCS_kState_FINISHED;
-
-            /* bad file produced on very short 3gp file */
-            if( pC->dViDecCurrentCts - pC->uiBeginCutTime == 0 )
-            {
-                /* Nothing has been encoded -> bad produced file -> error returned */
-                M4OSA_TRACE2_0(
-                    "M4MCS_intStepEncoding(): video transcoding returns\
-                     M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL");
-                return M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL;
-            }
-            else
-            {
-#ifndef M4MCS_AUDIOONLY
-                /* clean AIR context needed to keep media aspect ratio*/
-
-                if( M4OSA_NULL != pC->m_air_context )
-                {
-                    err = M4AIR_cleanUp(pC->m_air_context);
-
-                    if( err != M4NO_ERROR )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x",
-                            err);
-                        return err;
-                    }
-                    pC->m_air_context = M4OSA_NULL;
-                }
-
-#endif /*M4MCS_AUDIOONLY*/
-
-                M4OSA_TRACE2_0(
-                    "M4MCS_intStepEncoding(): video transcoding returns M4MCS_ERR_NOMORE_SPACE");
-                return M4MCS_ERR_NOMORE_SPACE;
-            }
-        }
-
-        /**< The input plane is null because the input image will be obtained by the
-        VPP filter from the context */
-        if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intStepEncoding(): video transcoding returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-
-    /* ---------- AUDIO TRANSCODING ---------- */
-
-    if( ( pC->noaudio == M4OSA_FALSE) && (M4MCS_kStreamState_STARTED
-        == pC->AudioState) ) /**< If there is an audio stream */
-    {
-        while(
-            /**< If the video encoding is running, encode audio until we reach video time */
-            ( ( pC->novideo == M4OSA_FALSE)
-            && (M4MCS_kStreamState_STARTED == pC->VideoState)
-            && (pC->ReaderAudioAU.m_CTS
-            + pC->m_audioAUDuration < pC->ReaderVideoAU.m_CTS)) ||
-            /**< If the video encoding is not running, perform 1 step of audio encoding */
-            (( M4MCS_kStreamState_STARTED == pC->AudioState)
-            && (uiAudioStepCount < 1)) )
-        {
-            uiAudioStepCount++;
-
-            /**< check if an adio effect has to be applied*/
-            err = M4MCS_intCheckAudioEffects(pC);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_intStepEncoding(): M4MCS_intCheckAudioEffects returns err: 0x%x",
-                    err);
-                return err;
-            }
-
-            if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
-            {
-                err = M4MCS_intAudioNullEncoding(pC);
-            }
-            else /**< Audio transcoding */
-            {
-                err = M4MCS_intAudioTranscoding(pC);
-            }
-
-            /**
-            * No more space, quit properly */
-            if( M4WAR_WRITER_STOP_REQ == err )
-            {
-                *pProgress =
-                    (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
-                    - pC->uiBeginCutTime) * 100)
-                    / (pC->uiEndCutTime - pC->uiBeginCutTime));
-
-                pC->State = M4MCS_kState_FINISHED;
-
-                /* bad file produced on very short 3gp file */
-                if( pC->ReaderAudioAU.m_CTS - pC->uiBeginCutTime == 0 )
-                {
-                    /* Nothing has been encoded -> bad produced file -> error returned */
-                    M4OSA_TRACE2_0(
-                        "M4MCS_intStepEncoding():\
-                         audio transcoding returns M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL");
-                    return M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL;
-                }
-                else
-                {
-#ifndef M4MCS_AUDIOONLY
-                    /* clean AIR context needed to keep media aspect ratio*/
-
-                    if( M4OSA_NULL != pC->m_air_context )
-                    {
-                        err = M4AIR_cleanUp(pC->m_air_context);
-
-                        if( err != M4NO_ERROR )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x",
-                                err);
-                            return err;
-                        }
-                        pC->m_air_context = M4OSA_NULL;
-                    }
-
-#endif /*M4MCS_AUDIOONLY*/
-
-                    M4OSA_TRACE2_0(
-                        "M4MCS_intStepEncoding(): \
-                        audio transcoding returns M4MCS_ERR_NOMORE_SPACE");
-                    return M4MCS_ERR_NOMORE_SPACE;
-                }
-            }
-
-            if( M4WAR_NO_MORE_AU == err )
-            {
-                pC->AudioState = M4MCS_kStreamState_FINISHED;
-                M4OSA_TRACE3_0(
-                    "M4MCS_intStepEncoding(): audio transcoding returns M4WAR_NO_MORE_AU");
-                break;
-            }
-            else if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_intStepEncoding(): audio transcoding returns 0x%x",
-                    err);
-                return err;
-            }
-
-            /**
-            * Check for end cut */
-            /* We absolutely want to have less or same audio duration as video ->
-            (2*pC->m_audioAUDuration) */
-            if( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
-                + (2 *pC->m_audioAUDuration) > pC->uiEndCutTime )
-            {
-                pC->AudioState = M4MCS_kStreamState_FINISHED;
-                break;
-            }
-        }
-    }
-
-    /* ---------- PROGRESS MANAGEMENT ---------- */
-
-    /**
-    * Compute progress */
-    if( pC->novideo )
-    {
-        if( pC->ReaderAudioAU.m_CTS < pC->uiBeginCutTime )
-        {
-            *pProgress = 0;
-        }
-        else
-        {
-            *pProgress = (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
-                - pC->uiBeginCutTime) * 100)
-                / (pC->uiEndCutTime - pC->uiBeginCutTime));
-        }
-        //printf(": %6.0f\b\b\b\b\b\b\b\b", pC->ReaderAudioAU.m_CTS);
-
-    }
-    else
-    {
-        if( pC->dViDecCurrentCts < pC->uiBeginCutTime )
-        {
-            *pProgress = 0;
-        }
-        else
-        {
-            *pProgress = (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->dViDecCurrentCts
-                - pC->uiBeginCutTime) * 100)
-                / (pC->uiEndCutTime - pC->uiBeginCutTime));
-        }
-        //printf(": %6.0f\b\b\b\b\b\b\b\b", pC->dViDecCurrentCts);
-    }
-
-    /**
-    * Sanity check */
-    if( *pProgress > 99 )
-    {
-        *pProgress = 99;
-    }
-
-    /**
-    * Increment CTS for next step */
-    if( pC->novideo == M4OSA_FALSE )
-    {
-        if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
-        {
-           pC->dViDecCurrentCts +=  1;
-        }
-        else
-        {
-            pC->dViDecCurrentCts += pC->dCtsIncrement;
-        }
-    }
-
-    /**
-    * The transcoding is finished when no stream is being encoded anymore */
-    if( ( ( pC->novideo) || (M4MCS_kStreamState_FINISHED == pC->VideoState))
-        && (( pC->noaudio) || (M4MCS_kStreamState_FINISHED == pC->AudioState)) )
-    {
-        /* the AIR part can only be used when video codecs are compiled*/
-#ifndef M4MCS_AUDIOONLY
-        /* clean AIR context needed to keep media aspect ratio*/
-
-        if( M4OSA_NULL != pC->m_air_context )
-        {
-            err = M4AIR_cleanUp(pC->m_air_context);
-
-            if( err != M4NO_ERROR )
-            {
-                M4OSA_TRACE1_1(
-                    "M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x",
-                    err);
-                return err;
-            }
-            pC->m_air_context = M4OSA_NULL;
-        }
-
-#endif /*M4MCS_AUDIOONLY*/
-        /**/
-
-        *pProgress = 100;
-        pC->State = M4MCS_kState_FINISHED;
-        M4OSA_TRACE2_0(
-            "M4MCS_intStepEncoding(): transcoding finished, returning M4MCS_WAR_TRANSCODING_DONE");
-        return M4MCS_WAR_TRANSCODING_DONE;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intStepEncoding(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intStepBeginVideoJump(M4MCS_InternalContext* pC)
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intStepBeginVideoJump( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err;
-    M4OSA_Int32 iCts;
-
-    if( pC->novideo )
-    {
-        pC->State = M4MCS_kState_BEGINVIDEODECODE;
-        return M4NO_ERROR;
-    }
-
-    /**
-    * Jump to the previous RAP in the clip (first get the time, then jump) */
-    iCts = (M4OSA_Int32)pC->dViDecStartingCts;
-    err = pC->m_pReader->m_pFctGetPrevRapTime(pC->pReaderContext,
-        (M4_StreamHandler *)pC->pReaderVideoStream, &iCts);
-
-    if( M4WAR_READER_INFORMATION_NOT_PRESENT == err )
-    {
-        /* No RAP table, jump backward and predecode */
-        iCts = (M4OSA_Int32)pC->dViDecStartingCts - M4MCS_NO_STSS_JUMP_POINT;
-
-        if( iCts < 0 )
-            iCts = 0;
-    }
-    else if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intStepBeginVideoJump: m_pFctGetPrevRapTime returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /* + CRLV6775 -H.264 Trimming */
-
-    if( M4OSA_TRUE == pC->bH264Trim )
-    {
-
-        // Save jump time for safety, this fix should be generic
-
-        M4OSA_Int32 iCtsOri = iCts;
-
-
-        err = pC->m_pReader->m_pFctJump(pC->pReaderContext,
-            (M4_StreamHandler *)pC->pReaderVideoStream, &iCts);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intStepBeginVideoJump: m_pFctJump(V) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        if( pC->ReaderVideoAU1.m_structSize == 0 )
-        {
-            /**
-            * Initializes an access Unit */
-            err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
-                (M4_StreamHandler *)pC->pReaderVideoStream,
-                &pC->ReaderVideoAU1);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_open(): m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
-                    err);
-                return err;
-            }
-            err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
-                (M4_StreamHandler *)pC->pReaderVideoStream,
-                &pC->ReaderVideoAU1);
-
-            if( M4WAR_NO_MORE_AU == err )
-            {
-                M4OSA_TRACE2_0(
-                    "M4MCS_intVideoNullEncoding(): \
-                    m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
-                /* The audio transcoding is finished */
-                pC->VideoState = M4MCS_kStreamState_FINISHED;
-                return err;
-            }
-            else if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_intVideoNullEncoding():\
-                     m_pReaderDataIt->m_pFctGetNextAu(video) returns 0x%x",
-                    err);
-                return err;
-            }
-
-            pC->ReaderVideoAU1.m_structSize = 0;
-        }
-
-        err = H264MCS_ProcessSPS_PPS(pC->m_pInstance,
-            (M4OSA_UInt8 *)pC->ReaderVideoAU1.m_dataAddress, pC->ReaderVideoAU1.m_size);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intStepBeginVideoJump: H264MCS_ProcessSPS_PPS returns 0x%x!",
-                err);
-            return err;
-        }
-
-
-        // Restore jump time for safety, this fix should be generic
-
-        iCts = iCtsOri;
-
-
-    }
-    /* - CRLV6775 -H.264 Trimming */
-
-    /**
-    * Decode one step */
-    pC->dViDecCurrentCts = (M4OSA_Double)(iCts + pC->iVideoBeginDecIncr);
-
-    /**
-    * Be sure we don't decode too far */
-    if( pC->dViDecCurrentCts > pC->dViDecStartingCts )
-    {
-        pC->dViDecCurrentCts = pC->dViDecStartingCts;
-    }
-
-    /**
-    * Decode at least once with the bJump flag to true */
-    M4OSA_TRACE3_1(
-        "M4VSS3GPP_intClipDecodeVideoUpToCts: Decoding upTo CTS %.3f",
-        pC->dViDecCurrentCts);
-    pC->isRenderDup = M4OSA_FALSE;
-    err =
-        pC->m_pVideoDecoder->m_pFctDecode(pC->pViDecCtxt, &pC->dViDecCurrentCts,
-        M4OSA_TRUE, 0);
-
-    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err)
-        && (err != M4WAR_VIDEORENDERER_NO_NEW_FRAME) )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intStepBeginVideoJump: m_pFctDecode returns 0x%x!", err);
-        return err;
-    }
-
-    if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
-    {
-        M4OSA_TRACE2_0("Decoding output the same frame as before 1");
-        pC->isRenderDup = M4OSA_TRUE;
-    }
-
-    /**
-    * Increment decoding cts for the next step */
-    pC->dViDecCurrentCts += (M4OSA_Double)pC->iVideoBeginDecIncr;
-
-    /**
-    * Update state automaton */
-    if( pC->dViDecCurrentCts > pC->dViDecStartingCts )
-    {
-        /**
-        * Be sure we don't decode too far */
-        pC->dViDecCurrentCts = pC->dViDecStartingCts;
-        pC->State = M4MCS_kState_PROCESSING;
-    }
-    else
-    {
-        pC->State = M4MCS_kState_BEGINVIDEODECODE;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intStepBeginVideoJump(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intStepBeginVideoDecode(M4MCS_InternalContext* pC)
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intStepBeginVideoDecode( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err;
-    M4_MediaTime dDecTarget;
-
-    if( pC->novideo )
-    {
-        pC->State = M4MCS_kState_PROCESSING;
-        return M4NO_ERROR;
-    }
-
-    /**
-    * Decode */
-    dDecTarget = pC->dViDecCurrentCts;
-    M4OSA_TRACE3_1("M4MCS_intStepBeginDecode: Decoding upTo CTS %.3f",
-        pC->dViDecCurrentCts);
-    pC->isRenderDup = M4OSA_FALSE;
-    err = pC->m_pVideoDecoder->m_pFctDecode(pC->pViDecCtxt, &dDecTarget,
-        M4OSA_FALSE, 0);
-
-    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err)
-        && (err != M4WAR_VIDEORENDERER_NO_NEW_FRAME) )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intStepBeginVideoDecode: m_pFctDecode returns 0x%x!", err);
-        return err;
-    }
-
-    if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
-    {
-        M4OSA_TRACE2_0("Decoding output the same frame as before 2");
-        pC->isRenderDup = M4OSA_TRUE;
-    }
-
-    /**
-    * Increment decoding cts for the next step */
-    pC->dViDecCurrentCts += (M4OSA_Double)pC->iVideoBeginDecIncr;
-
-    /**
-    * Update state automaton, if needed */
-    if( ( (M4OSA_UInt32)pC->dViDecCurrentCts > pC->dViDecStartingCts)
-        || (M4WAR_NO_MORE_AU == err) )
-    {
-        /**
-        * Be sure we don't decode too far */
-        pC->dViDecCurrentCts = (M4OSA_Double)pC->dViDecStartingCts;
-        pC->State = M4MCS_kState_PROCESSING;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intStepBeginVideoDecode(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/*****************************/
-/* define AMR silence frames */
-/*****************************/
-
-#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE 13
-#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION 160
-
-#ifdef M4VSS3GPP_SILENCE_FRAMES
-
-const M4OSA_UInt8 M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[
-    M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
-    {
-        0x04, 0xFF, 0x18, 0xC7, 0xF0, 0x0D, 0x04, 0x33, 0xFF, 0xE0, 0x00, 0x00, 0x00
-    };
-#else
-
-extern
-const
-M4OSA_UInt8
-M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE];
-
-#endif
-
-/*****************************/
-/* define AAC silence frames */
-/*****************************/
-
-#define M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE      4
-
-#ifdef M4VSS3GPP_SILENCE_FRAMES
-
-const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_MONO[
-    M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE] =
-    {
-        0x00, 0xC8, 0x20, 0x07
-    };
-#else
-
-extern const M4OSA_UInt8
-M4VSS3GPP_AAC_AU_SILENCE_MONO[M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE];
-
-#endif
-
-#define M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE        6
-
-#ifdef M4VSS3GPP_SILENCE_FRAMES
-
-const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_STEREO[
-    M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE] =
-    {
-        0x21, 0x10, 0x03, 0x20, 0x54, 0x1C
-    };
-#else
-
-extern const
-M4OSA_UInt8
-M4VSS3GPP_AAC_AU_SILENCE_STEREO[M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE];
-
-#endif
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intAudioNullEncoding(M4MCS_InternalContext* pC)
- * @return   M4NO_ERROR:         No error
- ******************************************************************************
- */
-
-static M4OSA_ERR M4MCS_intAudioNullEncoding( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err;
-
-    if( pC->noaudio )
-        return M4NO_ERROR;
-
-    /* Check if all audio frame has been written (happens at begin cut) */
-    if( pC->ReaderAudioAU.m_size == 0 )
-    {
-        /**
-        * Initializes a new AU if needed */
-        if( pC->ReaderAudioAU1.m_structSize == 0 )
-        {
-            /**
-            * Initializes an access Unit */
-            err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
-                (M4_StreamHandler *)pC->pReaderAudioStream,
-                &pC->ReaderAudioAU1);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_open(): m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
-                    err);
-                return err;
-            }
-
-            pC->m_pDataAddress1 =
-                (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->ReaderAudioAU1.m_maxsize,
-                M4MCS, (M4OSA_Char *)"Temporary AU1 buffer");
-
-            if( pC->m_pDataAddress1 == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0(
-                    "M4MCS_intAudioNullEncoding(): allocation error");
-                return M4ERR_ALLOC;
-            }
-
-            err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
-                (M4_StreamHandler *)pC->pReaderAudioStream,
-                &pC->ReaderAudioAU1);
-
-            if( M4WAR_NO_MORE_AU == err )
-            {
-                M4OSA_TRACE2_0(
-                    "M4MCS_intAudioNullEncoding():\
-                     m_pReaderDataIt->m_pFctGetNextAu(audio) returns M4WAR_NO_MORE_AU");
-                /* The audio transcoding is finished */
-                pC->AudioState = M4MCS_kStreamState_FINISHED;
-                return err;
-            }
-            else if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_intAudioNullEncoding(): \
-                    m_pReaderDataIt->m_pFctGetNextAu(Audio) returns 0x%x",
-                    err);
-                return err;
-            }
-            /*FB 2009.04.02: PR surnxp#616: Crash in MCS while Audio AU copying ,
-             constant memory reader case*/
-            if( pC->ReaderAudioAU1.m_maxsize
-        > pC->pReaderAudioStream->m_basicProperties.m_maxAUSize )
-            {
-                /* Constant memory reader case, we need to reallocate the temporary buffers */
-                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                    *) &(pC->m_pDataAddress1), pC->ReaderAudioAU1.m_maxsize);
-                /* pC->m_pDataAddress1 and
-                pC->m_pDataAddress2 must be reallocated at the same time */
-                /* because pC->pReaderAudioStream->m_basicProperties.m_maxAUSize take
-                 maximum value. Then the test "if(pC->ReaderAudioAU?.m_maxsize >
-                  pC->pReaderAudioStream->m_basicProperties.m_maxAUSize)" is never true */
-                /* and the size of the second buffer is never changed. */
-                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                    *) &(pC->m_pDataAddress2), pC->ReaderAudioAU1.m_maxsize);
-                /* pC->m_pDataAddress1 and
-                pC->m_pDataAddress2 must be reallocated at the same time */
-                /* Update stream properties */
-                pC->pReaderAudioStream->m_basicProperties.m_maxAUSize =
-                    pC->ReaderAudioAU1.m_maxsize;
-            }
-            /**/
-            memcpy((void *)pC->m_pDataAddress1,
-                (void *)pC->ReaderAudioAU1.m_dataAddress,
-                pC->ReaderAudioAU1.m_size);
-        }
-
-        if( pC->ReaderAudioAU2.m_structSize == 0 )
-        {
-            /**
-            * Initializes an access Unit */
-            err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
-                (M4_StreamHandler *)pC->pReaderAudioStream,
-                &pC->ReaderAudioAU2);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_open(): m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
-                    err);
-                return err;
-            }
-            pC->m_pDataAddress2 =
-                (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->ReaderAudioAU2.m_maxsize,
-                M4MCS, (M4OSA_Char *)"Temporary AU buffer");
-
-            if( pC->m_pDataAddress2 == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0(
-                    "M4MCS_intAudioNullEncoding(): allocation error");
-                return M4ERR_ALLOC;
-            }
-        }
-        /**
-        * Read the next audio AU in the input file */
-        if( pC->ReaderAudioAU2.m_CTS > pC->ReaderAudioAU1.m_CTS )
-        {
-            memcpy((void *) &pC->ReaderAudioAU,
-                (void *) &pC->ReaderAudioAU2, sizeof(M4_AccessUnit));
-            err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
-                (M4_StreamHandler *)pC->pReaderAudioStream,
-                &pC->ReaderAudioAU1);
-
-            if( pC->ReaderAudioAU1.m_maxsize
-                > pC->pReaderAudioStream->m_basicProperties.m_maxAUSize )
-            {
-                /* Constant memory reader case, we need to reallocate the temporary buffers */
-                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                    *) &(pC->m_pDataAddress1), pC->ReaderAudioAU1.m_maxsize);
-                /*   pC->m_pDataAddress1
-                 * and pC->m_pDataAddress2 must be reallocated at the same time *
-                 * because pC->pReaderAudioStream->m_basicProperties.m_maxAUSize take
-                 * maximum value. Then the test "if(pC->ReaderAudioAU?.m_maxsize >
-                 * pC->pReaderAudioStream->m_basicProperties.m_maxAUSize)" is never true *
-                 * and the size of the second buffer is never changed.
-                 */
-                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                    *) &(pC->m_pDataAddress2), pC->ReaderAudioAU1.m_maxsize);
-                /* pC->m_pDataAddress1 and
-                 * pC->m_pDataAddress2 must be reallocated at the same time
-                 * Update stream properties
-                 */
-                pC->pReaderAudioStream->m_basicProperties.m_maxAUSize =
-                    pC->ReaderAudioAU1.m_maxsize;
-            }
-            /**/
-            memcpy((void *)pC->m_pDataAddress1,
-                (void *)pC->ReaderAudioAU1.m_dataAddress,
-                pC->ReaderAudioAU1.m_size);
-            pC->m_audioAUDuration =
-                pC->ReaderAudioAU1.m_CTS - pC->ReaderAudioAU2.m_CTS;
-            pC->ReaderAudioAU.m_dataAddress = pC->m_pDataAddress2;
-        }
-        else
-        {
-            memcpy((void *) &pC->ReaderAudioAU,
-                (void *) &pC->ReaderAudioAU1, sizeof(M4_AccessUnit));
-            err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
-                (M4_StreamHandler *)pC->pReaderAudioStream,
-                &pC->ReaderAudioAU2);
-            /* Crash in MCS while Audio AU copying ,
-             * constant memory reader case
-             */
-            if( pC->ReaderAudioAU2.m_maxsize
-                > pC->pReaderAudioStream->m_basicProperties.m_maxAUSize )
-            {
-                /* Constant memory reader case, we need to reallocate the temporary buffers */
-                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                    *) &(pC->m_pDataAddress2), pC->ReaderAudioAU2.m_maxsize);
-                /* pC->m_pDataAddress1 and
-                 * pC->m_pDataAddress2 must be reallocated at the same time
-                 * because pC->pReaderAudioStream->m_basicProperties.m_maxAUSize take maximum
-                 * value. Then the test "if(pC->ReaderAudioAU?.m_maxsize > pC->pReaderAudioStream->
-                 * m_basicProperties.m_maxAUSize)" is never true
-                 * and the size of the second buffer is never changed.
-                 */
-                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                    *) &(pC->m_pDataAddress1), pC->ReaderAudioAU2.m_maxsize);
-                /* [ END ] 20091008  JFV PR fix surnxpsw#1071: pC->m_pDataAddress1 and
-                 pC->m_pDataAddress2 must be reallocated at the same time */
-                /* Update stream properties */
-                pC->pReaderAudioStream->m_basicProperties.m_maxAUSize =
-                    pC->ReaderAudioAU2.m_maxsize;
-            }
-            /**/
-            memcpy((void *)pC->m_pDataAddress2,
-                (void *)pC->ReaderAudioAU2.m_dataAddress,
-                pC->ReaderAudioAU2.m_size);
-            pC->m_audioAUDuration =
-                pC->ReaderAudioAU2.m_CTS - pC->ReaderAudioAU1.m_CTS;
-            pC->ReaderAudioAU.m_dataAddress = pC->m_pDataAddress1;
-        }
-
-        if( M4WAR_NO_MORE_AU == err )
-        {
-            M4OSA_TRACE2_0(
-                "M4MCS_intAudioNullEncoding(): \
-                m_pReaderDataIt->m_pFctGetNextAu(audio) returns M4WAR_NO_MORE_AU");
-            /* The audio transcoding is finished */
-            pC->AudioState = M4MCS_kStreamState_FINISHED;
-            return err;
-        }
-        else if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intAudioNullEncoding(): \
-                m_pReaderDataIt->m_pFctGetNextAu(Audio) returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * Prepare the writer AU */
-    err = pC->pWriterDataFcts->pStartAU(pC->pWriterContext,
-        M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intAudioNullEncoding(): pWriterDataFcts->pStartAU(Audio) returns 0x%x",
-            err);
-        return err;
-    }
-
-    if( pC->uiAudioAUCount
-        == 0 ) /* If it is the first AU, we set it to silence
-        (else, errors 0x3841, 0x3847 in our AAC decoder) */
-    {
-        if( pC->InputFileProperties.AudioStreamType == M4VIDEOEDITING_kAAC
-            || pC->InputFileProperties.AudioStreamType
-            == M4VIDEOEDITING_kAACplus
-            || pC->InputFileProperties.AudioStreamType
-            == M4VIDEOEDITING_keAACplus )
-        {
-            if( pC->InputFileProperties.uiNbChannels == 1 )
-            {
-                pC->WriterAudioAU.size = M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
-                memcpy((void *)pC->WriterAudioAU.dataAddress,
-                    (void *)M4VSS3GPP_AAC_AU_SILENCE_MONO,
-                    pC->WriterAudioAU.size);
-            }
-            else if( pC->InputFileProperties.uiNbChannels == 2 )
-            {
-                pC->WriterAudioAU.size = M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
-                memcpy((void *)pC->WriterAudioAU.dataAddress,
-                    (void *)M4VSS3GPP_AAC_AU_SILENCE_STEREO,
-                    pC->WriterAudioAU.size);
-            }
-            else
-            {
-                /* Must never happen ...*/
-                M4OSA_TRACE1_0(
-                    "M4MCS_intAudioNullEncoding: Bad number of channels in audio input");
-                return M4MCS_ERR_INVALID_INPUT_FILE;
-            }
-        }
-        else if( pC->InputFileProperties.AudioStreamType
-            == M4VIDEOEDITING_kAMR_NB )
-        {
-            pC->WriterAudioAU.size = M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
-            memcpy((void *)pC->WriterAudioAU.dataAddress,
-                (void *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048,
-                pC->WriterAudioAU.size);
-            /* Some remaining AMR AU needs to be copied */
-            if( pC->ReaderAudioAU.m_size != 0 )
-            {
-                /* Update Writer AU */
-                pC->WriterAudioAU.size += pC->ReaderAudioAU.m_size;
-                memcpy((void *)(pC->WriterAudioAU.dataAddress
-                    + M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE),
-                    (void *)pC->ReaderAudioAU.m_dataAddress,
-                    pC->ReaderAudioAU.m_size);
-            }
-        }
-        else
-        {
-            /*MP3 case: copy the AU*/
-            M4OSA_TRACE3_1(
-                "M4MCS_intAudioNullEncoding(): Copying audio AU: size=%d",
-                pC->ReaderAudioAU.m_size);
-            memcpy((void *)pC->WriterAudioAU.dataAddress,
-                (void *)pC->ReaderAudioAU.m_dataAddress,
-                pC->ReaderAudioAU.m_size);
-            pC->WriterAudioAU.size = pC->ReaderAudioAU.m_size;
-        }
-    }
-    else
-    {
-        /**
-        * Copy audio data from reader AU to writer AU */
-        M4OSA_TRACE3_1(
-            "M4MCS_intAudioNullEncoding(): Copying audio AU: size=%d",
-            pC->ReaderAudioAU.m_size);
-        memcpy((void *)pC->WriterAudioAU.dataAddress,
-            (void *)pC->ReaderAudioAU.m_dataAddress,
-            pC->ReaderAudioAU.m_size);
-        pC->WriterAudioAU.size = pC->ReaderAudioAU.m_size;
-    }
-
-    /**
-    * Convert CTS unit from milliseconds to timescale */
-    pC->WriterAudioAU.CTS =
-        (M4OSA_Time)((( pC->ReaderAudioAU.m_CTS - pC->iAudioCtsOffset)
-        * (pC->WriterAudioStream.timeScale / 1000.0)));
-
-    if( pC->InputFileProperties.AudioStreamType == M4VIDEOEDITING_kAMR_NB
-        && pC->uiAudioAUCount == 0 )
-    {
-        pC->iAudioCtsOffset -=
-            20; /* Duration of a silence AMR AU, to handle the duration of the added
-                silence frame */
-    }
-    pC->WriterAudioAU.nbFrag = 0;
-    M4OSA_TRACE3_1("M4MCS_intAudioNullEncoding(): audio AU: CTS=%d ms",
-        pC->WriterAudioAU.CTS);
-
-    /**
-    * Write it to the output file */
-    pC->uiAudioAUCount++;
-    err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
-        M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intAudioNullEncoding(): pWriterDataFcts->pProcessAU(Audio) returns 0x%x",
-            err);
-        return err;
-    }
-
-    /* All the audio has been written */
-    pC->ReaderAudioAU.m_size = 0;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intAudioNullEncoding(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * @brief    Init Audio Transcoding
- * @return   M4NO_ERROR:         No error
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intAudioTranscoding( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err;                        /**< General error */
-
-    M4OSA_UInt32
-        uiBytesDec; /**< Nb of bytes available in the decoder OUT buffer */
-    M4OSA_UInt32
-        uiDecoder2Ssrc_NbBytes; /**< Nb of bytes copied into the ssrc IN buffer */
-
-    int ssrcErr;                          /**< Error while ssrc processing */
-    M4OSA_UInt32 uiSsrcInSize; /**< Size in bytes of ssrc intput buffer */
-    M4OSA_UInt32
-        uiSsrcInRoom; /**< Nb of bytes available in the ssrc IN buffer */
-    M4OSA_MemAddr8
-        pSsrcInput; /**< Pointer to the good buffer location for ssrc input */
-    M4OSA_UInt32 uiSsrcOutSize; /**< Size in bytes of ssrc output buffer */
-    M4OSA_UInt32
-        uiBytesSsrc; /**< Nb of bytes available in the ssrc OUT buffer */
-
-    M4OSA_UInt8
-        needChannelConversion; /**< Flag to indicate if a stereo <-> mono conversion is needed */
-    M4OSA_UInt32
-        uiChannelConvertorCoeff; /**< Multiplicative coefficient if stereo
-                                    <-> mono conversion is applied */
-    M4OSA_MemAddr8 pChannelConvertorInput =
-        M4OSA_NULL; /**< Pointer to the good buffer location for channel convertor input */
-    M4OSA_UInt32 uiChannelConvertorNbSamples =
-        0; /**< Nb of pcm samples to convert in channel convertor */
-    M4OSA_MemAddr8 pChannelConvertorOutput =
-        M4OSA_NULL; /**< Pointer to the good buffer location for channel convertor output */
-
-    M4OSA_Time
-        frameTimeDelta; /**< Duration of the encoded (then written) data */
-    M4OSA_UInt32
-        uiEncoderInRoom; /**< Nb of bytes available in the encoder IN buffer */
-    M4OSA_UInt32
-        uiSsrc2Encoder_NbBytes; /**< Nb of bytes copied from the ssrc OUT buffer */
-    M4OSA_MemAddr8
-        pEncoderInput; /**< Pointer to the good buffer location for encoder input */
-    M4ENCODER_AudioBuffer pEncInBuffer;   /**< Encoder input buffer for api */
-    M4ENCODER_AudioBuffer pEncOutBuffer;  /**< Encoder output buffer for api */
-
-    M4OSA_Int16 *tempBuffOut = M4OSA_NULL;
-    /*FlB 2009.03.04: apply audio effects if an effect is active*/
-    M4OSA_Int8 *pActiveEffectNumber = &(pC->pActiveEffectNumber);
-
-    uint32_t errCode = M4NO_ERROR;
-
-    if( pC->noaudio )
-        return M4NO_ERROR;
-
-    /* _________________ */
-    /*|                 |*/
-    /*| READ AND DECODE |*/
-    /*|_________________|*/
-
-    /* Check if we have to empty the decoder out buffer first */
-    if( M4OSA_NULL != pC->pPosInDecBufferOut )
-    {
-        goto m4mcs_intaudiotranscoding_feed_resampler;
-    }
-
-    err = pC->m_pAudioDecoder->m_pFctStepAudioDec(pC->pAudioDecCtxt,
-        M4OSA_NULL, &pC->AudioDecBufferOut, M4OSA_FALSE);
-
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intAudioTranscoding(): m_pAudioDecoder->m_pFctStepAudio returns 0x%x",
-            err);
-        return err;
-    }
-
-#ifdef MCS_DUMP_PCM_TO_FILE
-
-    fwrite(pC->AudioDecBufferOut.m_dataAddress,
-        pC->AudioDecBufferOut.m_bufferSize, 1, file_pcm_decoder);
-
-#endif
-
-    pC->m_pAudioDecoder->m_pFctGetOptionAudioDec(pC->pAudioDecCtxt,
-           M4AD_kOptionID_GetAudioAUErrCode, (M4OSA_DataOption) &errCode);
-
-    if ( M4WAR_NO_MORE_AU == errCode ) {
-        pC->AudioState = M4MCS_kStreamState_FINISHED;
-            M4OSA_TRACE2_0(
-                "M4MCS_intAudioTranscoding():\
-                 m_pReaderDataIt->m_pFctGetNextAu(audio) returns M4WAR_NO_MORE_AU");
-            return errCode;
-   }
-
-    /* Set the current position in the decoder out buffer */
-    pC->pPosInDecBufferOut = pC->AudioDecBufferOut.m_dataAddress;
-
-    /* ________________ */
-    /*|                |*/
-    /*| FEED RESAMPLER |*/
-    /*|________________|*/
-
-m4mcs_intaudiotranscoding_feed_resampler:
-
-    /* Check if we have to empty the ssrc out buffer first */
-    if( M4OSA_NULL != pC->pPosInSsrcBufferOut )
-    {
-        goto m4mcs_intaudiotranscoding_prepare_input_buffer;
-    }
-
-    /* Compute number of bytes remaining in the decoder buffer */
-    uiSsrcInSize = pC->iSsrcNbSamplIn * sizeof(short)
-        * pC->pReaderAudioStream->m_nbChannels;
-    uiBytesDec = ( pC->AudioDecBufferOut.m_dataAddress
-        + pC->AudioDecBufferOut.m_bufferSize) - pC->pPosInDecBufferOut;
-
-    /* Check if we can feed directly the Ssrc with the decoder out buffer */
-    if( ( pC->pPosInSsrcBufferIn == pC->pSsrcBufferIn)
-        && (uiBytesDec >= uiSsrcInSize) )
-    {
-        pSsrcInput = pC->pPosInDecBufferOut;
-
-        /* update data consumed into decoder buffer after resampling */
-        if( uiBytesDec == uiSsrcInSize )
-            pC->pPosInDecBufferOut = M4OSA_NULL;
-        else
-            pC->pPosInDecBufferOut += uiSsrcInSize;
-
-        goto m4mcs_intaudiotranscoding_do_resampling;
-    }
-
-    /**
-    * Compute remaining space in Ssrc buffer in */
-    uiSsrcInRoom = ( pC->pSsrcBufferIn + uiSsrcInSize) - pC->pPosInSsrcBufferIn;
-
-    /**
-    * Nb of bytes copied is the minimum between nb of bytes remaining in
-    * decoder out buffer and space remaining in ssrc in buffer */
-    uiDecoder2Ssrc_NbBytes =
-        (uiSsrcInRoom < uiBytesDec) ? uiSsrcInRoom : uiBytesDec;
-
-    /**
-    * Copy from the decoder out buffer into the Ssrc in buffer */
-    memcpy((void *)pC->pPosInSsrcBufferIn, (void *)pC->pPosInDecBufferOut,
-        uiDecoder2Ssrc_NbBytes);
-
-    /**
-    * Update the position in the decoder out buffer */
-    pC->pPosInDecBufferOut += uiDecoder2Ssrc_NbBytes;
-
-    /**
-    * Update the position in the Ssrc in buffer */
-    pC->pPosInSsrcBufferIn += uiDecoder2Ssrc_NbBytes;
-
-    /**
-    * Check if the decoder buffer out is empty */
-    if( ( pC->pPosInDecBufferOut - pC->AudioDecBufferOut.m_dataAddress)
-        == (M4OSA_Int32)pC->AudioDecBufferOut.m_bufferSize )
-    {
-        pC->pPosInDecBufferOut = M4OSA_NULL;
-    }
-
-    /* Check if the Ssrc in buffer is ready (= full) */
-    if( ( pC->pPosInSsrcBufferIn - pC->pSsrcBufferIn)
-        < (M4OSA_Int32)uiSsrcInSize )
-    {
-        goto m4mcs_intaudiotranscoding_end;
-    }
-
-    pSsrcInput = pC->pSsrcBufferIn;
-
-    /* update data consumed into ssrc buffer in after resampling (empty) */
-    pC->pPosInSsrcBufferIn = pC->pSsrcBufferIn;
-
-    /* ___________________ */
-    /*|                   |*/
-    /*| DO THE RESAMPLING |*/
-    /*|___________________|*/
-
-m4mcs_intaudiotranscoding_do_resampling:
-
-    /**
-    * No need for memcopy, we can feed Ssrc directly with the data in the audio
-    decoder out buffer*/
-
-    ssrcErr = 0;
-
-    if( pC->pReaderAudioStream->m_nbChannels == 1 )
-    {
-        tempBuffOut =
-            (short *)M4OSA_32bitAlignedMalloc((pC->iSsrcNbSamplOut * sizeof(short) * 2
-            * ((*pC).InputFileProperties).uiNbChannels),
-            M4VSS3GPP,(M4OSA_Char *) "tempBuffOut");
-        memset((void *)tempBuffOut, 0,(pC->iSsrcNbSamplOut * sizeof(short) * 2
-            * ((*pC).InputFileProperties).uiNbChannels));
-
-        LVAudioresample_LowQuality((short *)tempBuffOut, (short *)pSsrcInput,
-            pC->iSsrcNbSamplOut, pC->pLVAudioResampler);
-    }
-    else
-    {
-        memset((void *)pC->pSsrcBufferOut, 0, (pC->iSsrcNbSamplOut * sizeof(short)
-            * ((*pC).InputFileProperties).uiNbChannels));
-
-        LVAudioresample_LowQuality((short *)pC->pSsrcBufferOut,
-            (short *)pSsrcInput, pC->iSsrcNbSamplOut, pC->pLVAudioResampler);
-    }
-
-    if( pC->pReaderAudioStream->m_nbChannels == 1 )
-    {
-        From2iToMono_16((short *)tempBuffOut, (short *)pC->pSsrcBufferOut,
-            (short)pC->iSsrcNbSamplOut);
-        free(tempBuffOut);
-    }
-
-
-    if( 0 != ssrcErr )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intAudioTranscoding: SSRC_Process returns 0x%x, \
-            returning M4MCS_ERR_AUDIO_CONVERSION_FAILED",
-            ssrcErr);
-        return M4MCS_ERR_AUDIO_CONVERSION_FAILED;
-    }
-
-    pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
-
-    /* ______________________ */
-    /*|                      |*/
-    /*| PREPARE INPUT BUFFER |*/
-    /*|______________________|*/
-
-m4mcs_intaudiotranscoding_prepare_input_buffer:
-
-    /* Set the flag for channel conversion requirement */
-    if( ( pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
-        && (pC->pReaderAudioStream->m_nbChannels == 2) )
-    {
-        needChannelConversion = 1;
-        uiChannelConvertorCoeff = 4;
-    }
-    else if( ( pC->AudioEncParams.ChannelNum == M4ENCODER_kStereo)
-        && (pC->pReaderAudioStream->m_nbChannels == 1) )
-    {
-        needChannelConversion = 2;
-        uiChannelConvertorCoeff = 1;
-    }
-    else
-    {
-        needChannelConversion = 0;
-        uiChannelConvertorCoeff = 2;
-    }
-
-    /* Compute number of bytes remaining in the Ssrc buffer */
-    uiSsrcOutSize = pC->iSsrcNbSamplOut * sizeof(short)
-        * pC->pReaderAudioStream->m_nbChannels;
-    uiBytesSsrc =
-        ( pC->pSsrcBufferOut + uiSsrcOutSize) - pC->pPosInSsrcBufferOut;
-
-    /* Check if the ssrc buffer is full */
-    if( pC->pPosInSsrcBufferOut == pC->pSsrcBufferOut )
-    {
-        uiSsrc2Encoder_NbBytes =
-            pC->audioEncoderGranularity * uiChannelConvertorCoeff / 2;
-
-        /* Check if we can feed directly the encoder with the ssrc out buffer */
-        if( ( pC->pPosInAudioEncoderBuffer == M4OSA_NULL)
-            && (uiBytesSsrc >= uiSsrc2Encoder_NbBytes) )
-        {
-            /* update position in ssrc out buffer after encoding */
-            if( uiBytesSsrc == uiSsrc2Encoder_NbBytes )
-                pC->pPosInSsrcBufferOut = M4OSA_NULL;
-            else
-                pC->pPosInSsrcBufferOut += uiSsrc2Encoder_NbBytes;
-
-            /* mark the encoder buffer ready (= full) */
-            pC->pPosInAudioEncoderBuffer =
-                pC->pAudioEncoderBuffer + pC->audioEncoderGranularity;
-
-            if( needChannelConversion > 0 )
-            {
-                /* channel convertor writes directly into encoder buffer */
-                pEncoderInput = pC->pAudioEncoderBuffer;
-
-                pChannelConvertorInput = pC->pSsrcBufferOut;
-                pChannelConvertorOutput = pC->pAudioEncoderBuffer;
-                uiChannelConvertorNbSamples =
-                    uiSsrc2Encoder_NbBytes / sizeof(short);
-
-                goto m4mcs_intaudiotranscoding_channel_convertor;
-            }
-            else
-            {
-                /* encode directly from ssrc out buffer */
-                pEncoderInput = pC->pSsrcBufferOut;
-
-                goto m4mcs_intaudiotranscoding_encode_and_write;
-            }
-        }
-    }
-
-    /**
-    * Compute remaining space in encoder buffer in */
-    if( pC->pPosInAudioEncoderBuffer == M4OSA_NULL )
-    {
-        pC->pPosInAudioEncoderBuffer = pC->pAudioEncoderBuffer;
-    }
-
-    uiEncoderInRoom = ( pC->pAudioEncoderBuffer + pC->audioEncoderGranularity)
-        - pC->pPosInAudioEncoderBuffer;
-    pEncoderInput = pC->pAudioEncoderBuffer;
-
-    /**
-    * Nb of bytes copied is the minimum between nb of bytes remaining in
-    * decoder out buffer and space remaining in ssrc in buffer */
-    uiSsrc2Encoder_NbBytes =
-        (( uiEncoderInRoom * uiChannelConvertorCoeff / 2) < uiBytesSsrc)
-        ? (uiEncoderInRoom * uiChannelConvertorCoeff / 2) : uiBytesSsrc;
-
-    if( needChannelConversion > 0 )
-    {
-        /* channel convertor writes directly into encoder buffer */
-        pChannelConvertorInput = pC->pPosInSsrcBufferOut;
-        pChannelConvertorOutput = pC->pPosInAudioEncoderBuffer;
-        uiChannelConvertorNbSamples = uiSsrc2Encoder_NbBytes / sizeof(short);
-    }
-    else
-    {
-        /* copy from the ssrc out buffer into the encoder in buffer */
-        memcpy((void *)pC->pPosInAudioEncoderBuffer, (void *)pC->pPosInSsrcBufferOut,
-            uiSsrc2Encoder_NbBytes);
-    }
-
-    /* Update position in ssrc out buffer after encoding */
-    pC->pPosInSsrcBufferOut += uiSsrc2Encoder_NbBytes;
-
-    /* Update the position in the encoder in buffer */
-    pC->pPosInAudioEncoderBuffer +=
-        uiSsrc2Encoder_NbBytes * 2 / uiChannelConvertorCoeff;
-
-    /* Check if the ssrc buffer out is empty */
-    if( ( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut)
-        == (M4OSA_Int32)uiSsrcOutSize )
-    {
-        pC->pPosInSsrcBufferOut = M4OSA_NULL;
-    }
-
-    /* go to next statement */
-    if( needChannelConversion > 0 )
-        goto m4mcs_intaudiotranscoding_channel_convertor;
-    else
-        goto m4mcs_intaudiotranscoding_encode_and_write;
-
-    /* _________________ */
-    /*|                 |*/
-    /*| STEREO <-> MONO |*/
-    /*|_________________|*/
-
-m4mcs_intaudiotranscoding_channel_convertor:
-
-    /* convert the input pcm stream to mono or to stereo */
-    switch( needChannelConversion )
-    {
-        case 1: /* stereo to mono */
-            From2iToMono_16((short *)pChannelConvertorInput,
-                (short *)pChannelConvertorOutput,
-                (short)(uiChannelConvertorNbSamples / 2));
-            break;
-
-        case 2: /* mono to stereo */
-            MonoTo2I_16((short *)pChannelConvertorInput,
-                (short *)pChannelConvertorOutput,
-                (short)uiChannelConvertorNbSamples);
-            break;
-    }
-
-    /* __________________ */
-    /*|                  |*/
-    /*| ENCODE AND WRITE |*/
-    /*|__________________|*/
-
-m4mcs_intaudiotranscoding_encode_and_write:
-
-    /* Check if the encoder in buffer is ready (= full) */
-    if( ( pC->pPosInAudioEncoderBuffer - pC->pAudioEncoderBuffer)
-        < (M4OSA_Int32)pC->audioEncoderGranularity )
-    {
-        goto m4mcs_intaudiotranscoding_end;
-    }
-
-    /* [Mono] or [Stereo interleaved] : all is in one buffer */
-    pEncInBuffer.pTableBuffer[0] = pEncoderInput;
-    pEncInBuffer.pTableBufferSize[0] = pC->audioEncoderGranularity;
-    pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
-    pEncInBuffer.pTableBufferSize[1] = 0;
-
-    /* Time in ms from data size, because it is PCM16 samples */
-    frameTimeDelta =
-        ( pEncInBuffer.pTableBufferSize[0] * uiChannelConvertorCoeff / 2)
-        / sizeof(short) / pC->pReaderAudioStream->m_nbChannels;
-
-    /**
-    * Prepare the writer AU */
-    err = pC->pWriterDataFcts->pStartAU(pC->pWriterContext,
-        M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intAudioTranscoding(): pWriterDataFcts->pStartAU(Audio) returns 0x%x",
-            err);
-        return err;
-    }
-
-    /*FlB 2009.03.04: apply audio effects if an effect is active*/
-    if( *pActiveEffectNumber >= 0 && *pActiveEffectNumber < pC->nbEffects )
-    {
-        if( pC->pEffects[*pActiveEffectNumber].ExtAudioEffectFct != M4OSA_NULL )
-        {
-            M4MCS_ExternalProgress pProgress;
-            M4OSA_UInt32 tempProgress = 0;
-            pProgress.uiClipTime = (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS;
-
-            pProgress.uiOutputTime = ( pC->WriterAudioAU.CTS * 1000)
-                / pC->WriterAudioStream.timeScale;
-            tempProgress = ( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
-                - pC->pEffects[*pActiveEffectNumber].uiStartTime
-                - pC->uiBeginCutTime) * 1000;
-            pProgress.uiProgress =
-                (M4OSA_UInt32)(tempProgress / (M4OSA_UInt32)pC->pEffects[
-                    *pActiveEffectNumber].uiDuration);
-
-                    err = pC->pEffects[*pActiveEffectNumber].ExtAudioEffectFct(
-                        pC->pEffects[*pActiveEffectNumber].pExtAudioEffectFctCtxt,
-                        (M4OSA_Int16 *)pEncInBuffer.pTableBuffer[0],
-                        pEncInBuffer.pTableBufferSize[0], &pProgress);
-
-                    if( err != M4NO_ERROR )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4MCS_intAudioTranscoding(): ExtAudioEffectFct() returns 0x%x",
-                            err);
-                        return err;
-                    }
-        }
-    }
-
-    /**
-    * Prepare output buffer */
-    pEncOutBuffer.pTableBuffer[0] =
-        (M4OSA_MemAddr8)pC->WriterAudioAU.dataAddress;
-    pEncOutBuffer.pTableBufferSize[0] = 0;
-
-#ifdef MCS_DUMP_PCM_TO_FILE
-
-    fwrite(pEncInBuffer.pTableBuffer[0], pEncInBuffer.pTableBufferSize[0], 1,
-        file_pcm_encoder);
-
-#endif
-
-    if( M4OSA_FALSE == pC->b_isRawWriter )
-    {
-        /* This allow to write PCM data to file and to encode AMR data,
-         when output file is not RAW */
-        if( pC->pOutputPCMfile != M4OSA_NULL )
-        {
-            pC->pOsaFileWritPtr->writeData(pC->pOutputPCMfile,
-                pEncInBuffer.pTableBuffer[0], pEncInBuffer.pTableBufferSize[0]);
-        }
-
-        /**
-        * Encode the PCM audio */
-        err = pC->pAudioEncoderGlobalFcts->pFctStep(pC->pAudioEncCtxt,
-            &pEncInBuffer, &pEncOutBuffer);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intAudioTranscoding(): pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
-                err);
-            return err;
-        }
-
-        /* update data consumed into encoder buffer in after encoding (empty) */
-        pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
-
-        /**
-        * Set AU cts and size */
-        pC->WriterAudioAU.size =
-            pEncOutBuffer.
-            pTableBufferSize[0]; /**< Get the size of encoded data */
-        pC->WriterAudioAU.CTS += frameTimeDelta;
-
-        /**
-        * Update duration of the encoded AU */
-        pC->m_audioAUDuration =
-            ( frameTimeDelta * 1000) / pC->WriterAudioStream.timeScale;
-
-        /**
-        * Write the encoded AU to the output file */
-        pC->uiAudioAUCount++;
-        err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
-            M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intAudioTranscoding(): pWriterDataFcts->pProcessAU(Audio) returns 0x%x",
-                err);
-            return err;
-        }
-    }
-    else
-    {
-        /* update data consumed into encoder buffer in after encoding (empty) */
-        pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
-
-        pC->WriterAudioAU.dataAddress =
-            (M4OSA_MemAddr32)
-            pEncoderInput; /* will be converted back to u8* in file write */
-        pC->WriterAudioAU.size = pC->audioEncoderGranularity;
-        pC->uiAudioAUCount++;
-
-        err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
-            M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intAudioTranscoding(): pWriterDataFcts->pProcessAU(Audio) returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    /* _______________ */
-    /*|               |*/
-    /*| ONE PASS DONE |*/
-    /*|_______________|*/
-
-m4mcs_intaudiotranscoding_end:
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intAudioTranscoding(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intReallocTemporaryAU(M4OSA_MemAddr8* addr, M4OSA_UInt32 newSize)
- * Used only in case of 3GP constant memory reader, to be able to realloc temporary AU
- * because max AU size can be reevaluated during reading
- * @return   M4NO_ERROR:         No error
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intReallocTemporaryAU( M4OSA_MemAddr8 *addr,
-                                             M4OSA_UInt32 newSize )
-{
-    if( *addr != M4OSA_NULL )
-    {
-        free(*addr);
-        *addr = (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(newSize, M4MCS,
-            (M4OSA_Char *)"Reallocation of temporary AU buffer");
-
-        if( *addr == M4OSA_NULL )
-        {
-            return M4ERR_ALLOC;
-        }
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intVideoNullEncoding(M4MCS_InternalContext* pC)
- * @author   Alexis Vapillon (NXP Software Vision)
- * @return   M4NO_ERROR:         No error
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intVideoNullEncoding( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    /* Duration of the AU (find the next AU duration
-     * to obtain a more precise video end cut)
-     */
-    M4OSA_UInt32 videoAUDuration = 0;
-
-    M4OSA_MemAddr8 WritebufferAdd = M4OSA_NULL;
-    M4OSA_Int32 lastdecodedCTS = 0;
-    M4_AccessUnit lReaderVideoAU; /**< Read video access unit */
-
-    if( pC->novideo )
-        return M4NO_ERROR;
-
-    /* H.264 Trimming */
-    if( ( ( pC->bH264Trim == M4OSA_TRUE)
-        && (pC->uiVideoAUCount < pC->m_pInstance->clip_sps.num_ref_frames)
-        && (pC->uiBeginCutTime > 0))
-        || (( pC->uiVideoAUCount == 0) && (pC->uiBeginCutTime > 0)) )
-    {
-        err = M4MCS_intVideoTranscoding(pC);
-        return err;
-    }
-
-
-    if((pC->bLastDecodedFrameCTS == M4OSA_FALSE) && (pC->uiBeginCutTime > 0))
-    {
-        // StageFright encoder does prefetch, the one frame we requested will not be written until
-        // the encoder is closed, so do it now rather than in MCS_close
-        if( ( M4NO_ERROR != err)
-            || (M4MCS_kEncoderRunning != pC->encoderState) )
-        {
-            M4OSA_TRACE1_2(
-                "!!! M4MCS_intVideoNullEncoding ERROR : M4MCS_intVideoTranscoding "
-                "returns 0x%X w/ encState=%d", err, pC->encoderState);
-
-            return err;
-        }
-
-        /* Stop and close the encoder now to flush the frame (prefetch) */
-        if( pC->pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
-        {
-            err = pC->pVideoEncoderGlobalFcts->pFctStop(pC->pViEncCtxt);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "!!! M4MCS_intVideoNullEncoding ERROR : encoder stop returns 0x%X",
-                    err);
-                return err;
-            }
-        }
-        pC->encoderState = M4MCS_kEncoderStopped;
-        err = pC->pVideoEncoderGlobalFcts->pFctClose(pC->pViEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "!!! M4MCS_intVideoNullEncoding ERROR : encoder close returns 0x%X",
-                err);
-            return err;
-        }
-        pC->encoderState = M4MCS_kEncoderClosed;
-    }
-
-
-    if ((pC->EncodingVideoFormat == M4ENCODER_kNULL)
-        && (pC->bLastDecodedFrameCTS == M4OSA_FALSE)
-        && (pC->uiBeginCutTime > 0)) {
-
-        pC->bLastDecodedFrameCTS = M4OSA_TRUE;
-        err = pC->m_pVideoDecoder->m_pFctGetOption(pC->pViDecCtxt,
-            M4DECODER_kOptionID_AVCLastDecodedFrameCTS, &lastdecodedCTS);
-
-        if (M4NO_ERROR != err) {
-            M4OSA_TRACE1_1(
-                "M4MCS_intVideoNullEncoding: m_pVideoDecoder->m_pFctGetOption returns 0x%x!",
-                err);
-            return err;
-        }
-        /* Do not need video decoder any more, need to destroy it. Otherwise it
-         * will call reader function which will cause frame lost during triming,
-         * since the 3gp reader is shared between MCS and decoder.*/
-        if (M4OSA_NULL != pC->pViDecCtxt) {
-            err = pC->m_pVideoDecoder->m_pFctDestroy(pC->pViDecCtxt);
-            pC->pViDecCtxt = M4OSA_NULL;
-
-            if (M4NO_ERROR != err) {
-                M4OSA_TRACE1_1(
-                    "M4MCS_intVideoNullEncoding: decoder pFctDestroy returns 0x%x",
-                    err);
-                return err;
-            }
-        }
-
-        err = pC->m_pReader->m_pFctJump(pC->pReaderContext,
-            (M4_StreamHandler *)pC->pReaderVideoStream, &lastdecodedCTS);
-
-        if (M4NO_ERROR != err) {
-            M4OSA_TRACE1_1(
-                "M4MCS_intVideoNullEncoding: m_pFctJump(V) returns 0x%x!",
-                err);
-            return err;
-        }
-
-
-        /* Initializes an access Unit */
-
-        err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
-            (M4_StreamHandler *)pC->pReaderVideoStream, &lReaderVideoAU);
-
-        if (M4NO_ERROR != err) {
-            M4OSA_TRACE1_1(
-                "M4MCS_intVideoNullEncoding:m_pReader->m_pFctFillAuStruct(video)\
-                returns 0x%x", err);
-            return err;
-        }
-
-        err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
-            (M4_StreamHandler *)pC->pReaderVideoStream, &lReaderVideoAU);
-
-        if (M4WAR_NO_MORE_AU == err) {
-            M4OSA_TRACE2_0(
-                "M4MCS_intVideoNullEncoding():\
-                 m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
-            /* The audio transcoding is finished */
-            pC->VideoState = M4MCS_kStreamState_FINISHED;
-            return err;
-        }
-        else if (M4NO_ERROR != err) {
-            M4OSA_TRACE1_1(
-                "M4MCS_intVideoNullEncoding():\
-                 m_pReaderDataIt->m_pFctGetNextAu(video) returns 0x%x",
-                err);
-            return err;
-        }
-
-        M4OSA_TRACE1_1(
-            "### [TS_CHECK] M4MCS_intVideoNullEncoding  video AU CTS: %d ",
-            lReaderVideoAU.m_CTS);
-
-
-    }
-
-
-    pC->bLastDecodedFrameCTS = M4OSA_TRUE;
-
-
-    /* Find the next AU duration to obtain a more precise video end cut*/
-    /**
-    * Initializes a new AU if needed */
-
-    if (pC->ReaderVideoAU1.m_structSize == 0) {
-        /**
-        * Initializes an access Unit */
-        err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
-            (M4_StreamHandler *)pC->pReaderVideoStream,
-            &pC->ReaderVideoAU1);
-
-        if (M4NO_ERROR != err) {
-            M4OSA_TRACE1_1(
-                "M4MCS_open(): m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
-                err);
-            return err;
-        }
-
-        pC->m_pDataVideoAddress1 =
-            (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->ReaderVideoAU1.m_maxsize, M4MCS,
-            (M4OSA_Char *)"Temporary video AU1 buffer");
-
-        if (pC->m_pDataVideoAddress1 == M4OSA_NULL) {
-            M4OSA_TRACE1_0("M4MCS_intVideoNullEncoding(): allocation error");
-            return M4ERR_ALLOC;
-        }
-
-        err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
-            (M4_StreamHandler *)pC->pReaderVideoStream,
-            &pC->ReaderVideoAU1);
-
-        if( M4WAR_NO_MORE_AU == err )
-        {
-            M4OSA_TRACE2_0(
-                "M4MCS_intVideoNullEncoding():\
-                 m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
-            /* The audio transcoding is finished */
-            pC->VideoState = M4MCS_kStreamState_FINISHED;
-            return err;
-        }
-        else if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intVideoNullEncoding(): m_pReaderDataIt->m_pFctGetNextAu(video)\
-                 returns 0x%x", err);
-            return err;
-        }
-
-        if( pC->ReaderVideoAU1.m_maxsize
-            > pC->pReaderVideoStream->m_basicProperties.m_maxAUSize )
-        {
-            /* Constant memory reader case, we need to reallocate the temporary buffers */
-            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                *) &(pC->m_pDataVideoAddress1), pC->ReaderVideoAU1.m_maxsize);
-            /* pC->m_pDataVideoAddress1
-            and pC->m_pDataVideoAddress2 must be reallocated at the same time */
-            /* because pC->pReaderVideoStream->m_basicProperties.m_maxAUSize take maximum value.
-             Then the test "if(pC->ReaderVideoAU?.m_maxsize > pC->pReaderVideoStream->
-             m_basicProperties.m_maxAUSize)" is never true */
-            /* and the size of the second buffer is never changed. */
-            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                *) &(pC->m_pDataVideoAddress2), pC->ReaderVideoAU1.m_maxsize);
-            /* pC->m_pDataVideoAddress1 and
-            pC->m_pDataVideoAddress2 must be reallocated at the same time */
-            /* Update stream properties */
-            pC->pReaderVideoStream->m_basicProperties.m_maxAUSize =
-                pC->ReaderVideoAU1.m_maxsize;
-        }
-        memcpy((void *)pC->m_pDataVideoAddress1,
-            (void *)pC->ReaderVideoAU1.m_dataAddress,
-            pC->ReaderVideoAU1.m_size);
-    }
-
-    if( pC->ReaderVideoAU2.m_structSize == 0 )
-    {
-        /**
-        * Initializes an access Unit */
-        err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
-            (M4_StreamHandler *)pC->pReaderVideoStream,
-            &pC->ReaderVideoAU2);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_open(): m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
-                err);
-            return err;
-        }
-        pC->m_pDataVideoAddress2 =
-            (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->ReaderVideoAU2.m_maxsize, M4MCS,
-            (M4OSA_Char *)"Temporary video AU buffer");
-
-        if( pC->m_pDataVideoAddress2 == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("M4MCS_intVideoNullEncoding(): allocation error");
-            return M4ERR_ALLOC;
-        }
-    }
-    /**
-    * Read the next video AU in the input file */
-    if( pC->ReaderVideoAU2.m_CTS > pC->ReaderVideoAU1.m_CTS )
-    {
-        memcpy((void *) &pC->ReaderVideoAU,
-            (void *) &pC->ReaderVideoAU2, sizeof(M4_AccessUnit));
-        err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
-            (M4_StreamHandler *)pC->pReaderVideoStream,
-            &pC->ReaderVideoAU1);
-
-        if( pC->ReaderVideoAU1.m_maxsize
-            > pC->pReaderVideoStream->m_basicProperties.m_maxAUSize )
-        {
-            /* Constant memory reader case, we need to reallocate the temporary buffers */
-            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                *) &(pC->m_pDataVideoAddress1), pC->ReaderVideoAU1.m_maxsize);
-            /* pC->m_pDataVideoAddress1 and
-             pC->m_pDataVideoAddress2 must be reallocated at the same time */
-            /* because pC->pReaderVideoStream->m_basicProperties.m_maxAUSize take maximum value.
-             Then the test "if(pC->ReaderVideoAU?.m_maxsize > pC->pReaderVideoStream->
-             m_basicProperties.m_maxAUSize)" is never true */
-            /* and the size of the second buffer is never changed. */
-            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                *) &(pC->m_pDataVideoAddress2), pC->ReaderVideoAU1.m_maxsize);
-            /* pC->m_pDataVideoAddress1 and
-            pC->m_pDataVideoAddress2 must be reallocated at the same time */
-            /* Update stream properties */
-            pC->pReaderVideoStream->m_basicProperties.m_maxAUSize =
-                pC->ReaderVideoAU1.m_maxsize;
-        }
-        memcpy((void *)pC->m_pDataVideoAddress1,
-            (void *)pC->ReaderVideoAU1.m_dataAddress,
-            pC->ReaderVideoAU1.m_size);
-        videoAUDuration = pC->ReaderVideoAU1.m_CTS - pC->ReaderVideoAU2.m_CTS;
-        pC->ReaderVideoAU.m_dataAddress = pC->m_pDataVideoAddress2;
-    }
-    else
-    {
-        memcpy((void *) &pC->ReaderVideoAU,
-            (void *) &pC->ReaderVideoAU1, sizeof(M4_AccessUnit));
-        err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
-            (M4_StreamHandler *)pC->pReaderVideoStream,
-            &pC->ReaderVideoAU2);
-
-        if( pC->ReaderVideoAU2.m_maxsize
-            > pC->pReaderVideoStream->m_basicProperties.m_maxAUSize )
-        {
-            /* Constant memory reader case, we need to reallocate the temporary buffers */
-            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                *) &(pC->m_pDataVideoAddress2), pC->ReaderVideoAU2.m_maxsize);
-            /* pC->m_pDataVideoAddress1 and
-             pC->m_pDataVideoAddress2 must be reallocated at the same time */
-            /* because pC->pReaderVideoStream->m_basicProperties.m_maxAUSize take maximum value.
-             Then the test "if(pC->ReaderVideoAU?.m_maxsize > pC->pReaderVideoStream->
-             m_basicProperties.m_maxAUSize)" is never true */
-            /* and the size of the second buffer is never changed. */
-            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
-                *) &(pC->m_pDataVideoAddress1), pC->ReaderVideoAU2.m_maxsize);
-            /* pC->m_pDataVideoAddress1 and
-            pC->m_pDataVideoAddress2 must be reallocated at the same time */
-            /* Update stream properties */
-            pC->pReaderVideoStream->m_basicProperties.m_maxAUSize =
-                pC->ReaderVideoAU2.m_maxsize;
-        }
-        memcpy((void *)pC->m_pDataVideoAddress2,
-            (void *)pC->ReaderVideoAU2.m_dataAddress,
-            pC->ReaderVideoAU2.m_size);
-        videoAUDuration = pC->ReaderVideoAU2.m_CTS - pC->ReaderVideoAU1.m_CTS;
-        pC->ReaderVideoAU.m_dataAddress = pC->m_pDataVideoAddress1;
-    }
-
-    if( M4WAR_NO_MORE_AU == err )
-    {
-        M4OSA_TRACE2_0(
-            "M4MCS_intVideoNullEncoding():\
-             m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
-        /* The video transcoding is finished */
-        pC->VideoState = M4MCS_kStreamState_FINISHED;
-        return err;
-    }
-    else if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intVideoNullEncoding(): m_pReaderDataIt->m_pFctGetNextAu(Video) returns 0x%x",
-            err);
-        return err;
-    }
-    else
-    {
-        /**
-        * Prepare the writer AU */
-        err = pC->pWriterDataFcts->pStartAU(pC->pWriterContext,
-            M4MCS_WRITER_VIDEO_STREAM_ID, &pC->WriterVideoAU);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intVideoNullEncoding(): pWriterDataFcts->pStartAU(Video) returns 0x%x",
-                err);
-            return err;
-        }
-            /**
-            * Copy video data from reader AU to writer AU */
-            M4OSA_TRACE3_1(
-                "M4MCS_intVideoNullEncoding(): Copying video AU: size=%d",
-                pC->ReaderVideoAU.m_size);
-            /* + CRLV6775 -H.264 Trimming */
-            if( M4OSA_TRUE == pC->bH264Trim )
-            {
-                if( pC->H264MCSTempBufferSize
-                    < (pC->ReaderVideoAU.m_size + 2048) )
-                {
-                    pC->H264MCSTempBufferSize =
-                        (pC->ReaderVideoAU.m_size + 2048);
-
-                    if( pC->H264MCSTempBuffer != M4OSA_NULL )
-                    {
-                        free(pC->H264MCSTempBuffer);
-                    }
-                    pC->H264MCSTempBuffer =
-                        (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(pC->H264MCSTempBufferSize,
-                        M4MCS, (M4OSA_Char *)"pC->H264MCSTempBuffer");
-
-                    if( pC->H264MCSTempBuffer == M4OSA_NULL )
-                    {
-                        M4OSA_TRACE1_0(
-                            "M4MCS_intVideoNullEncoding(): allocation error");
-                        return M4ERR_ALLOC;
-                    }
-                }
-
-                pC->H264MCSTempBufferDataSize = pC->H264MCSTempBufferSize;
-
-                err = H264MCS_ProcessNALU(pC->m_pInstance,
-                    (M4OSA_UInt8 *)pC->ReaderVideoAU.m_dataAddress,
-                    pC->ReaderVideoAU.m_size, pC->H264MCSTempBuffer,
-                    (M4OSA_Int32 *)&pC->H264MCSTempBufferDataSize);
-
-                if( pC->m_pInstance->is_done == 1 )
-                {
-                    M4MCS_convetFromByteStreamtoNALStream(
-                        (M4OSA_UInt8 *)pC->ReaderVideoAU.m_dataAddress ,
-                        pC->ReaderVideoAU.m_size);
-
-                    memcpy((void *)pC->WriterVideoAU.dataAddress,
-                        (void *)(pC->ReaderVideoAU.m_dataAddress + 4),
-                        pC->ReaderVideoAU.m_size - 4);
-                    pC->WriterVideoAU.size = pC->ReaderVideoAU.m_size - 4;
-                    WritebufferAdd =
-                        (M4OSA_MemAddr8)pC->WriterVideoAU.dataAddress;
-                }
-                else
-                {
-                    memcpy((void *)pC->WriterVideoAU.dataAddress,
-                        (void *)(pC->H264MCSTempBuffer + 4),
-                        pC->H264MCSTempBufferDataSize - 4);
-                    pC->WriterVideoAU.size = pC->H264MCSTempBufferDataSize - 4;
-                    WritebufferAdd =
-                        (M4OSA_MemAddr8)pC->WriterVideoAU.dataAddress;
-                }
-            }
-            /* H.264 Trimming */
-            else
-            {
-                memcpy((void *)pC->WriterVideoAU.dataAddress,
-                    (void *)pC->ReaderVideoAU.m_dataAddress,
-                    pC->ReaderVideoAU.m_size);
-                pC->WriterVideoAU.size = pC->ReaderVideoAU.m_size;
-            }
-            /**
-            * Convert CTS unit from milliseconds to timescale */
-            pC->WriterVideoAU.CTS =
-                (M4OSA_Time)((( pC->ReaderVideoAU.m_CTS - pC->dViDecStartingCts)
-                * (pC->WriterVideoStream.timeScale / 1000.0)));
-            pC->WriterVideoAU.nbFrag = 0;
-            pC->WriterVideoAU.attribute = pC->ReaderVideoAU.m_attribute;
-
-            M4OSA_TRACE3_1("M4MCS_intVideoNullEncoding(): video AU: CTS=%d ms",
-                pC->WriterVideoAU.CTS);
-
-        /**
-        * Write it to the output file */
-        pC->uiVideoAUCount++;
-        err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
-            M4MCS_WRITER_VIDEO_STREAM_ID, &pC->WriterVideoAU);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_intVideoNullEncoding(): pWriterDataFcts->pProcessAU(Video) returns 0x%x",
-                err);
-            return err;
-        }
-        /* + CRLV6775 -H.264 Trimming */
-        if( M4OSA_TRUE == pC->bH264Trim )
-        {
-            if( pC->m_pInstance->is_done == 1 )
-            {
-                memcpy((void *)(WritebufferAdd - 4),
-                    (void *)(pC->ReaderVideoAU.m_dataAddress), 4);
-            }
-            else
-            {
-                memcpy((void *)(WritebufferAdd - 4),
-                    (void *)(pC->H264MCSTempBuffer), 4);
-            }
-        } /* H.264 Trimming */
-    }
-    /**
-    * Check for end cut. */
-    /* Bug fix 11/12/2008: We absolutely want to have less or same video duration ->
-    (2*videoAUDuration) to have a more precise end cut*/
-    if( pC->ReaderVideoAU.m_CTS + (2 *videoAUDuration) > pC->uiEndCutTime )
-    {
-        pC->VideoState = M4MCS_kStreamState_FINISHED;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_intVideoNullEncoding(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intVideoTranscoding(M4MCS_InternalContext* pC)
- * @author   Alexis Vapillon (NXP Software Vision)
- * @return   M4NO_ERROR:         No error
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intVideoTranscoding( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4_MediaTime mtTranscodedTime = 0.0;
-    M4ENCODER_FrameMode FrameMode;
-    M4OSA_Int32 derive = 0;
-
-    /**
-    * Get video CTS to decode */
-    mtTranscodedTime = pC->dViDecCurrentCts;
-    FrameMode = M4ENCODER_kNormalFrame;
-
-    /**
-    * Decode video */
-    M4OSA_TRACE3_1(
-        "M4MCS_intVideoTranscoding(): Calling m_pVideoDecoder->m_pFctDecode(%.2f)",
-        mtTranscodedTime);
-    pC->isRenderDup = M4OSA_FALSE;
-    err = pC->m_pVideoDecoder->m_pFctDecode(pC->pViDecCtxt, &mtTranscodedTime,
-        M4OSA_FALSE, 0);
-
-    if( M4WAR_NO_MORE_AU == err )
-    {
-        FrameMode =
-            M4ENCODER_kLastFrame; /**< We will give this value to the encoder to
-            ask for the end of the encoding */
-        pC->VideoState = M4MCS_kStreamState_FINISHED;
-    }
-    else if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
-    {
-        M4OSA_TRACE2_0("Decoding output the same frame as before 3");
-        pC->isRenderDup = M4OSA_TRUE;
-    }
-    else if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4MCS_intVideoTranscoding(): m_pVideoDecoder->m_pFctDecode returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    * Check for end cut.
-    * We must check here if the end cut is reached, because in that case we must
-    * call the last encode step (-> bLastFrame set to true) */
-    if( ( pC->dViDecCurrentCts + pC->dCtsIncrement ) >= (pC->uiEndCutTime
-        + M4MCS_ABS(pC->dViDecStartingCts - pC->uiBeginCutTime)) )
-    {
-        FrameMode =
-            M4ENCODER_kLastFrame; /**< We will give this value to the encoder to
-            ask for the end of the encoding */
-        pC->VideoState = M4MCS_kStreamState_FINISHED;
-        derive = (M4OSA_Int32)(( pC->dViDecCurrentCts + pC->dCtsIncrement + 0.5)
-            - (pC->uiEndCutTime
-            + M4MCS_ABS(pC->dViDecStartingCts - pC->uiBeginCutTime)));
-    }
-
-    /* Update starting CTS to have a more precise value (
-    the begin cut is not a real CTS)*/
-    if( pC->uiVideoAUCount == 0 )
-    {
-        pC->dViDecStartingCts = mtTranscodedTime;
-        pC->dViDecCurrentCts = pC->dViDecStartingCts;
-    }
-
-    /**
-    * Encode video */
-    M4OSA_TRACE3_1(
-        "M4MCS_intVideoTranscoding(): Calling pVideoEncoderGlobalFcts->pFctEncode with videoCts\
-         = %.2f",pC->ReaderVideoAU.m_CTS);
-    pC->uiVideoAUCount++;
-    /* update the given duration (the begin cut is not a real CTS)*/
-    err = pC->pVideoEncoderGlobalFcts->pFctEncode(pC->pViEncCtxt, M4OSA_NULL,
-        (pC->dViDecCurrentCts - pC->dViDecStartingCts - (derive >> 1)),
-        FrameMode);
-
-    return err;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intGetInputClipProperties(M4MCS_InternalContext* pContext)
- * @author   Dounya Manai (NXP Software Vision)
- * @brief    Retrieve the properties of the audio and video streams from the input file.
- * @param    pContext            (IN) MCS context
- * @return   M4NO_ERROR:         No error
- * @return   M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intGetInputClipProperties( M4MCS_InternalContext *pC )
-{
-    M4DECODER_MPEG4_DecoderConfigInfo DecConfInfo;
-    M4READER_3GP_H263Properties H263prop;
-    M4OSA_ERR err;
-    M4OSA_UInt32 videoBitrate;
-    M4DECODER_VideoSize videoSize;
-    M4_AACType iAacType = 0;
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2(M4OSA_NULL == pC, M4ERR_PARAMETER,
-        "M4MCS_intGetInputClipProperties: pC is M4OSA_NULL");
-
-    /**
-    * Reset common characteristics */
-    pC->InputFileProperties.bAnalysed = M4OSA_FALSE;
-    pC->InputFileProperties.FileType = 0;
-    pC->InputFileProperties.Version[0] = M4VIDEOEDITING_VERSION_MAJOR;
-    pC->InputFileProperties.Version[1] = M4VIDEOEDITING_VERSION_MINOR;
-    pC->InputFileProperties.Version[2] = M4VIDEOEDITING_VERSION_REVISION;
-    pC->InputFileProperties.uiClipDuration = 0;
-
-    memset((void *) &pC->InputFileProperties.ftyp,
-        0, sizeof(M4VIDEOEDITING_FtypBox));
-
-    /**
-    * Reset video characteristics */
-    pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kNoneVideo;
-    pC->InputFileProperties.uiClipVideoDuration = 0;
-    pC->InputFileProperties.uiVideoBitrate = 0;
-    pC->InputFileProperties.uiVideoMaxAuSize = 0;
-    pC->InputFileProperties.uiVideoWidth = 0;
-    pC->InputFileProperties.uiVideoHeight = 0;
-    pC->InputFileProperties.uiVideoTimeScale = 0;
-    pC->InputFileProperties.fAverageFrameRate = 0.0;
-    pC->InputFileProperties.uiVideoLevel =
-        M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
-    pC->InputFileProperties.uiVideoProfile =
-        M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
-    pC->InputFileProperties.bMPEG4dataPartition = M4OSA_FALSE;
-    pC->InputFileProperties.bMPEG4rvlc = M4OSA_FALSE;
-    pC->InputFileProperties.bMPEG4resynchMarker = M4OSA_FALSE;
-
-    /**
-    * Reset audio characteristics */
-    pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
-    pC->InputFileProperties.uiClipAudioDuration = 0;
-    pC->InputFileProperties.uiAudioBitrate = 0;
-    pC->InputFileProperties.uiAudioMaxAuSize = 0;
-    pC->InputFileProperties.uiNbChannels = 0;
-    pC->InputFileProperties.uiSamplingFrequency = 0;
-    pC->InputFileProperties.uiExtendedSamplingFrequency = 0;
-    pC->InputFileProperties.uiDecodedPcmSize = 0;
-
-    /* Reset compatibility chart (not used in MCS) */
-    pC->InputFileProperties.bVideoIsEditable = M4OSA_FALSE;
-    pC->InputFileProperties.bAudioIsEditable = M4OSA_FALSE;
-    pC->InputFileProperties.bVideoIsCompatibleWithMasterClip = M4OSA_FALSE;
-    pC->InputFileProperties.bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
-
-    /**
-    * Video stream properties */
-    if( M4OSA_NULL != pC->pReaderVideoStream )
-    {
-        switch( pC->pReaderVideoStream->m_basicProperties.m_streamType )
-        {
-            case M4DA_StreamTypeVideoMpeg4:
-                pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kMPEG4;
-                break;
-
-            case M4DA_StreamTypeVideoH263:
-                pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kH263;
-                break;
-
-            case M4DA_StreamTypeVideoMpeg4Avc:
-                pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kH264;
-                break;
-
-            case M4DA_StreamTypeUnknown:
-            default:
-                pC->InputFileProperties.VideoStreamType =
-                    M4VIDEOEDITING_kUnsupportedVideo;
-                break;
-        }
-
-        /* if bitrate not available retrieve an estimation of the overall bitrate */
-        pC->InputFileProperties.uiVideoBitrate =
-            pC->pReaderVideoStream->m_basicProperties.m_averageBitRate;
-
-        if( 0 == pC->InputFileProperties.uiVideoBitrate )
-        {
-            pC->m_pReader->m_pFctGetOption(pC->pReaderContext,
-                M4READER_kOptionID_Bitrate, &videoBitrate);
-
-            if( M4OSA_NULL != pC->pReaderAudioStream )
-            {
-                /* we get the overall bitrate, substract the audio bitrate if any */
-                videoBitrate -=
-                    pC->pReaderAudioStream->m_basicProperties.m_averageBitRate;
-            }
-            pC->InputFileProperties.uiVideoBitrate = videoBitrate;
-        }
-
-        /**
-        * Retrieve the Profile & Level */
-        if( ( M4VIDEOEDITING_kH263 != pC->InputFileProperties.VideoStreamType)
-            && (M4VIDEOEDITING_kH264
-            != pC->InputFileProperties.VideoStreamType) )
-        {
-            /* Use the DSI parsing function from the external video shell decoder.
-            See the comments in M4VSS3GPP_ClipAnalysis.c, it's pretty much the
-            same issue. */
-
-            err = M4DECODER_EXTERNAL_ParseVideoDSI(pC->pReaderVideoStream->
-                m_basicProperties.m_pDecoderSpecificInfo,
-                pC->pReaderVideoStream->
-                m_basicProperties.m_decoderSpecificInfoSize,
-                &DecConfInfo, &videoSize);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_intGetInputClipProperties():\
-                     M4DECODER_EXTERNAL_ParseVideoDSI returns 0x%08X",
-                    err);
-                return err;
-            }
-
-            pC->pReaderVideoStream->m_videoWidth = videoSize.m_uiWidth;
-            pC->pReaderVideoStream->m_videoHeight = videoSize.m_uiHeight;
-            pC->InputFileProperties.uiVideoTimeScale = DecConfInfo.uiTimeScale;
-            pC->InputFileProperties.bMPEG4dataPartition =
-                DecConfInfo.bDataPartition;
-            pC->InputFileProperties.bMPEG4rvlc = DecConfInfo.bUseOfRVLC;
-            pC->InputFileProperties.bMPEG4resynchMarker =
-                DecConfInfo.uiUseOfResynchMarker;
-
-            err = getMPEG4ProfileAndLevel(DecConfInfo.uiProfile,
-                        &(pC->InputFileProperties.uiVideoProfile),
-                        &(pC->InputFileProperties.uiVideoLevel));
-            if ( M4NO_ERROR != err ) {
-                M4OSA_TRACE1_1("M4MCS_intGetInputClipProperties():\
-                    getMPEG4ProfileAndLevel returns 0x%08X", err);
-                return err;
-            }
-        }
-        else if( M4VIDEOEDITING_kH263 ==
-            pC->InputFileProperties.VideoStreamType ) {
-
-            err = getH263ProfileAndLevel(pC->pReaderVideoStream->
-                        m_basicProperties.m_pDecoderSpecificInfo,
-                        pC->pReaderVideoStream->m_basicProperties.m_decoderSpecificInfoSize,
-                        &(pC->InputFileProperties.uiVideoProfile),
-                        &(pC->InputFileProperties.uiVideoLevel));
-            if ( M4NO_ERROR != err ) {
-                M4OSA_TRACE1_1("M4MCS_intGetInputClipProperties():\
-                    getH263ProfileAndLevel returns 0x%08X", err);
-                return err;
-            }
-            /* For h263 set default timescale : 30000:1001 */
-            pC->InputFileProperties.uiVideoTimeScale = 30000;
-        }
-        else if ( M4VIDEOEDITING_kH264 ==
-            pC->InputFileProperties.VideoStreamType ) {
-
-            pC->InputFileProperties.uiVideoTimeScale = 30000;
-            err = getAVCProfileAndLevel(pC->pReaderVideoStream->
-                        m_basicProperties.m_pDecoderSpecificInfo,
-                        pC->pReaderVideoStream->m_basicProperties.m_decoderSpecificInfoSize,
-                        &(pC->InputFileProperties.uiVideoProfile),
-                        &(pC->InputFileProperties.uiVideoLevel));
-            if ( M4NO_ERROR != err ) {
-                M4OSA_TRACE1_1("M4MCS_intGetInputClipProperties():\
-                    getAVCProfileAndLevel returns 0x%08X", err);
-                return err;
-            }
-        }
-
-        /* Here because width x height is correct only after dsi parsing
-        (done in create decoder) */
-        pC->InputFileProperties.uiVideoHeight =
-            pC->pReaderVideoStream->m_videoHeight;
-        pC->InputFileProperties.uiVideoWidth =
-            pC->pReaderVideoStream->m_videoWidth;
-        pC->InputFileProperties.uiClipVideoDuration =
-            (M4OSA_UInt32)pC->pReaderVideoStream->m_basicProperties.m_duration;
-        pC->InputFileProperties.fAverageFrameRate =
-            pC->pReaderVideoStream->m_averageFrameRate;
-        pC->InputFileProperties.uiVideoMaxAuSize =
-            pC->pReaderVideoStream->m_basicProperties.m_maxAUSize;
-        pC->InputFileProperties.videoRotationDegrees =
-            pC->pReaderVideoStream->videoRotationDegrees;
-    }
-    else
-    {
-        if( M4OSA_TRUE == pC->bUnsupportedVideoFound )
-        {
-            pC->InputFileProperties.VideoStreamType =
-                M4VIDEOEDITING_kUnsupportedVideo;
-        }
-        else
-        {
-            pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kNoneVideo;
-        }
-    }
-
-    /**
-    * Audio stream properties */
-    if( M4OSA_NULL != pC->pReaderAudioStream )
-    {
-        switch( pC->pReaderAudioStream->m_basicProperties.m_streamType )
-        {
-            case M4DA_StreamTypeAudioAmrNarrowBand:
-                pC->InputFileProperties.AudioStreamType =
-                    M4VIDEOEDITING_kAMR_NB;
-                break;
-
-            case M4DA_StreamTypeAudioAac:
-                pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kAAC;
-                break;
-
-            case M4DA_StreamTypeAudioMp3:
-                pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kMP3;
-                break;
-
-            case M4DA_StreamTypeAudioEvrc:
-                pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kEVRC;
-                break;
-
-            case M4DA_StreamTypeUnknown:
-            default:
-                pC->InputFileProperties.AudioStreamType =
-                    M4VIDEOEDITING_kUnsupportedAudio;
-                break;
-        }
-
-        if( ( M4OSA_NULL != pC->m_pAudioDecoder)
-            && (M4OSA_NULL == pC->pAudioDecCtxt) )
-        {
-            M4OSA_TRACE3_1(
-                "M4MCS_intGetInputClipProperties: calling CreateAudioDecoder, userData= 0x%x",
-                pC->m_pCurrentAudioDecoderUserData);
-
-            if( M4OSA_FALSE == pC->bExtOMXAudDecoder ) {
-                err = M4MCS_intCheckAndGetCodecProperties(pC);
-            }
-            else
-            {
-                err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(
-                    &pC->pAudioDecCtxt, pC->pReaderAudioStream,
-                    pC->m_pCurrentAudioDecoderUserData);
-
-                if( M4NO_ERROR == err )
-                {
-                    /* AAC properties*/
-                    //get from Reader; temporary, till Audio decoder shell API available to
-                    //get the AAC properties
-                    pC->AacProperties.aNumChan =
-                        pC->pReaderAudioStream->m_nbChannels;
-                    pC->AacProperties.aSampFreq =
-                        pC->pReaderAudioStream->m_samplingFrequency;
-
-                    err = pC->m_pAudioDecoder->m_pFctGetOptionAudioDec(
-                        pC->pAudioDecCtxt, M4AD_kOptionID_StreamType,
-                        (M4OSA_DataOption) &iAacType);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4MCS_intGetInputClipProperties:\
-                             m_pAudioDecoder->m_pFctGetOptionAudioDec returns err 0x%x",
-                            err);
-                        iAacType = M4_kAAC; //set to default
-                        err = M4NO_ERROR;
-                    }
-                    else
-                    {
-                        M4OSA_TRACE3_1(
-                            "M4MCS_intGetInputClipProperties:\
-                             m_pAudioDecoder->m_pFctGetOptionAudioDec returns streamType %d",
-                            iAacType);
-                    }
-
-                    switch( iAacType )
-                    {
-                        case M4_kAAC:
-                            pC->AacProperties.aSBRPresent = 0;
-                            pC->AacProperties.aPSPresent = 0;
-                            break;
-
-                        case M4_kAACplus:
-                            pC->AacProperties.aSBRPresent = 1;
-                            pC->AacProperties.aPSPresent = 0;
-                            pC->AacProperties.aExtensionSampFreq =
-                                pC->pReaderAudioStream->
-                                m_samplingFrequency; //TODO
-                            break;
-
-                        case M4_keAACplus:
-                            pC->AacProperties.aSBRPresent = 1;
-                            pC->AacProperties.aPSPresent = 1;
-                            pC->AacProperties.aExtensionSampFreq =
-                                pC->pReaderAudioStream->
-                                m_samplingFrequency; //TODO
-                            break;
-                          case M4_kUnknown:
-                          break;
-                          default:
-                          break;
-                        }
-                        M4OSA_TRACE3_2(
-                            "M4MCS_intGetInputClipProperties: AAC NBChans=%d, SamplFreq=%d",
-                            pC->AacProperties.aNumChan,
-                            pC->AacProperties.aSampFreq);
-                }
-            }
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4MCS_intGetInputClipProperties:\
-                     m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
-                    err);
-                return err;
-            }
-        }
-
-        //EVRC
-        if( pC->pReaderAudioStream->m_basicProperties.m_streamType
-            == M4DA_StreamTypeAudioEvrc )
-        {
-            /* decoder not implemented yet, provide some default values for the null encoding */
-            pC->pReaderAudioStream->m_nbChannels = 1;
-            pC->pReaderAudioStream->m_samplingFrequency = 8000;
-        }
-
-        /**
-        * Bugfix P4ME00001128: With some IMTC files, the AMR bit rate is 0 kbps according
-         the GetProperties function */
-        if( 0 == pC->pReaderAudioStream->m_basicProperties.m_averageBitRate )
-        {
-            if( M4VIDEOEDITING_kAMR_NB
-                == pC->InputFileProperties.AudioStreamType )
-            {
-                /**
-                * Better returning a guessed 12.2 kbps value than a sure-to-be-false
-                0 kbps value! */
-                pC->InputFileProperties.uiAudioBitrate =
-                    M4VIDEOEDITING_k12_2_KBPS;
-            }
-            else if( M4VIDEOEDITING_kEVRC
-                == pC->InputFileProperties.AudioStreamType )
-            {
-                /**
-                * Better returning a guessed 8.5 kbps value than a sure-to-be-false
-                0 kbps value! */
-                pC->InputFileProperties.uiAudioBitrate =
-                    M4VIDEOEDITING_k9_2_KBPS;
-            }
-            else
-            {
-                M4OSA_UInt32 FileBitrate;
-
-                /* Can happen also for aac, in this case we calculate an approximative */
-                /* value from global bitrate and video bitrate */
-                err = pC->m_pReader->m_pFctGetOption(pC->pReaderContext,
-                    M4READER_kOptionID_Bitrate,
-                    (M4OSA_DataOption) &FileBitrate);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4MCS_intGetInputClipProperties: M4READER_kOptionID_Bitrate returns 0x%x",
-                        err);
-                    return err;
-                }
-                pC->InputFileProperties.uiAudioBitrate =
-                    FileBitrate
-                    - pC->
-                    InputFileProperties.
-                    uiVideoBitrate /* normally setted to 0, if no video */;
-            }
-        }
-        else
-        {
-            pC->InputFileProperties.uiAudioBitrate =
-                pC->pReaderAudioStream->m_basicProperties.m_averageBitRate;
-        }
-
-        pC->InputFileProperties.uiNbChannels =
-            pC->pReaderAudioStream->m_nbChannels;
-        pC->InputFileProperties.uiSamplingFrequency =
-            pC->pReaderAudioStream->m_samplingFrequency;
-        pC->InputFileProperties.uiClipAudioDuration =
-            (M4OSA_UInt32)pC->pReaderAudioStream->m_basicProperties.m_duration;
-        pC->InputFileProperties.uiAudioMaxAuSize =
-            pC->pReaderAudioStream->m_basicProperties.m_maxAUSize;
-
-        /* Bug: with aac, value is 0 until decoder start() is called */
-        pC->InputFileProperties.uiDecodedPcmSize =
-            pC->pReaderAudioStream->m_byteFrameLength
-            * pC->pReaderAudioStream->m_byteSampleSize
-            * pC->pReaderAudioStream->m_nbChannels;
-
-        /* New aac properties */
-        if( M4DA_StreamTypeAudioAac
-            == pC->pReaderAudioStream->m_basicProperties.m_streamType )
-        {
-            pC->InputFileProperties.uiNbChannels = pC->AacProperties.aNumChan;
-            pC->InputFileProperties.uiSamplingFrequency =
-                pC->AacProperties.aSampFreq;
-
-            if( pC->AacProperties.aSBRPresent )
-            {
-                pC->InputFileProperties.AudioStreamType =
-                    M4VIDEOEDITING_kAACplus;
-                pC->InputFileProperties.uiExtendedSamplingFrequency =
-                    pC->AacProperties.aExtensionSampFreq;
-            }
-
-            if( pC->AacProperties.aPSPresent )
-            {
-                pC->InputFileProperties.AudioStreamType =
-                    M4VIDEOEDITING_keAACplus;
-            }
-        }
-    }
-    else
-    {
-        if( M4OSA_TRUE == pC->bUnsupportedAudioFound )
-        {
-            pC->InputFileProperties.AudioStreamType =
-                M4VIDEOEDITING_kUnsupportedAudio;
-        }
-        else
-        {
-            pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
-        }
-    }
-
-    /* Get 'ftyp' atom */
-    err = pC->m_pReader->m_pFctGetOption(pC->pReaderContext,
-        M4READER_kOptionID_3gpFtypBox, &pC->InputFileProperties.ftyp);
-
-    /* Analysis is successful */
-    if( pC->InputFileProperties.uiClipVideoDuration
-        > pC->InputFileProperties.uiClipAudioDuration )
-        pC->InputFileProperties.uiClipDuration =
-        pC->InputFileProperties.uiClipVideoDuration;
-    else
-        pC->InputFileProperties.uiClipDuration =
-        pC->InputFileProperties.uiClipAudioDuration;
-
-    pC->InputFileProperties.FileType = pC->InputFileType;
-    pC->InputFileProperties.bAnalysed = M4OSA_TRUE;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_UInt32 M4MCS_intGetFrameSize_AMRNB(M4OSA_MemAddr8 pAudioFrame)
- * @brief   Return the length, in bytes, of the AMR Narrow-Band frame contained in the given buffer
- * @note
- * @param   pCpAudioFrame   (IN) AMRNB frame
- * @return  M4NO_ERROR: No error
- ******************************************************************************
- */
-static M4OSA_UInt32 M4MCS_intGetFrameSize_AMRNB( M4OSA_MemAddr8 pAudioFrame )
-{
-    M4OSA_UInt32 frameSize = 0;
-    M4OSA_UInt32 frameType = ( ( *pAudioFrame) &(0xF << 3)) >> 3;
-
-    switch( frameType )
-    {
-        case 0:
-            frameSize = 95;
-            break; /*  4750 bps */
-
-        case 1:
-            frameSize = 103;
-            break; /*  5150 bps */
-
-        case 2:
-            frameSize = 118;
-            break; /*  5900 bps */
-
-        case 3:
-            frameSize = 134;
-            break; /*  6700 bps */
-
-        case 4:
-            frameSize = 148;
-            break; /*  7400 bps */
-
-        case 5:
-            frameSize = 159;
-            break; /*  7950 bps */
-
-        case 6:
-            frameSize = 204;
-            break; /* 10200 bps */
-
-        case 7:
-            frameSize = 244;
-            break; /* 12000 bps */
-
-        case 8:
-            frameSize = 39;
-            break; /* SID (Silence) */
-
-        case 15:
-            frameSize = 0;
-            break; /* No data */
-
-        default:
-            M4OSA_TRACE3_0(
-                "M4MCS_intGetFrameSize_AMRNB(): Corrupted AMR frame! returning 0.");
-            return 0;
-    }
-
-    return (1 + (( frameSize + 7) / 8));
-}
-
-/**
- ******************************************************************************
- * M4OSA_UInt32 M4MCS_intGetFrameSize_EVRC(M4OSA_MemAddr8 pAudioFrame)
- * @brief   Return the length, in bytes, of the EVRC frame contained in the given buffer
- * @note
- *     0 1 2 3
- *    +-+-+-+-+
- *    |fr type|              RFC 3558
- *    +-+-+-+-+
- *
- * Frame Type: 4 bits
- *    The frame type indicates the type of the corresponding codec data
- *    frame in the RTP packet.
- *
- * For EVRC and SMV codecs, the frame type values and size of the
- * associated codec data frame are described in the table below:
- *
- * Value   Rate      Total codec data frame size (in octets)
- * ---------------------------------------------------------
- *   0     Blank      0    (0 bit)
- *   1     1/8        2    (16 bits)
- *   2     1/4        5    (40 bits; not valid for EVRC)
- *   3     1/2       10    (80 bits)
- *   4     1         22    (171 bits; 5 padded at end with zeros)
- *   5     Erasure    0    (SHOULD NOT be transmitted by sender)
- *
- * @param   pCpAudioFrame   (IN) EVRC frame
- * @return  M4NO_ERROR: No error
- ******************************************************************************
- */
-static M4OSA_UInt32 M4MCS_intGetFrameSize_EVRC( M4OSA_MemAddr8 pAudioFrame )
-{
-    M4OSA_UInt32 frameSize = 0;
-    M4OSA_UInt32 frameType = ( *pAudioFrame) &0x0F;
-
-    switch( frameType )
-    {
-        case 0:
-            frameSize = 0;
-            break; /*  blank */
-
-        case 1:
-            frameSize = 16;
-            break; /*  1/8 */
-
-        case 2:
-            frameSize = 40;
-            break; /*  1/4 */
-
-        case 3:
-            frameSize = 80;
-            break; /*  1/2 */
-
-        case 4:
-            frameSize = 171;
-            break; /*  1 */
-
-        case 5:
-            frameSize = 0;
-            break; /*  erasure */
-
-        default:
-            M4OSA_TRACE3_0(
-                "M4MCS_intGetFrameSize_EVRC(): Corrupted EVRC frame! returning 0.");
-            return 0;
-    }
-
-    return (1 + (( frameSize + 7) / 8));
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intCheckMaxFileSize(M4MCS_Context pContext)
- * @brief    Check if max file size is greater enough to encode a file with the
- *           current selected bitrates and duration.
- * @param    pContext            (IN) MCS context
- * @return   M4NO_ERROR
- * @return   M4MCS_ERR_MAXFILESIZE_TOO_SMALL
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intCheckMaxFileSize( M4MCS_Context pContext )
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
-
-    M4OSA_UInt32 duration;
-    M4OSA_UInt32 audiobitrate;
-    M4OSA_UInt32 videobitrate;
-
-    /* free file size : OK */
-    if( pC->uiMaxFileSize == 0 )
-        return M4NO_ERROR;
-
-    /* duration */
-    if( pC->uiEndCutTime == 0 )
-    {
-        duration = pC->InputFileProperties.uiClipDuration - pC->uiBeginCutTime;
-    }
-    else
-    {
-        duration = pC->uiEndCutTime - pC->uiBeginCutTime;
-    }
-
-    /* audio bitrate */
-    if( pC->noaudio )
-    {
-        audiobitrate = 0;
-    }
-    else if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
-    {
-        audiobitrate = pC->InputFileProperties.uiAudioBitrate;
-    }
-    else if( pC->uiAudioBitrate == M4VIDEOEDITING_kUndefinedBitrate )
-    {
-        switch( pC->AudioEncParams.Format )
-        {
-            case M4ENCODER_kAMRNB:
-                audiobitrate = M4VIDEOEDITING_k12_2_KBPS;
-                break;
-                //EVRC
-                //            case M4ENCODER_kEVRC:
-                //                audiobitrate = M4VIDEOEDITING_k9_2_KBPS;
-                //                break;
-
-            default: /* AAC and MP3*/
-                audiobitrate =
-                    (pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
-                    ? M4VIDEOEDITING_k16_KBPS : M4VIDEOEDITING_k32_KBPS;
-                break;
-        }
-    }
-    else
-    {
-        audiobitrate = pC->uiAudioBitrate;
-    }
-
-    /* video bitrate */
-    if( pC->novideo )
-    {
-        videobitrate = 0;
-    }
-    else if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
-    {
-        videobitrate = pC->InputFileProperties.uiVideoBitrate;
-    }
-    else if( pC->uiVideoBitrate == M4VIDEOEDITING_kUndefinedBitrate )
-    {
-        videobitrate = M4VIDEOEDITING_k16_KBPS;
-    }
-    else
-    {
-        videobitrate = pC->uiVideoBitrate;
-    }
-
-    /* max file size */
-    if( (M4OSA_UInt32)pC->uiMaxFileSize
-        < (M4OSA_UInt32)(M4MCS_MOOV_OVER_FILESIZE_RATIO
-        * (audiobitrate + videobitrate) * (duration / 8000.0)) )
-        return M4MCS_ERR_MAXFILESIZE_TOO_SMALL;
-    else
-        return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4VIDEOEDITING_Bitrate M4MCS_intGetNearestBitrate(M4OSA_UInt32 freebitrate, M4OSA_Int8 mode)
- * @brief    Returns the closest bitrate value from the enum list of type M4VIDEOEDITING_Bitrate
- * @param    freebitrate: unsigned int value
- * @param    mode: -1:previous,0:current,1:next
- * @return   bitrate value in enum list M4VIDEOEDITING_Bitrate
- ******************************************************************************
- */
-static M4VIDEOEDITING_Bitrate
-M4MCS_intGetNearestBitrate( M4OSA_Int32 freebitrate, M4OSA_Int8 mode )
-{
-    M4OSA_Int32 bitarray [] =
-    {
-        0, M4VIDEOEDITING_k16_KBPS, M4VIDEOEDITING_k24_KBPS,
-        M4VIDEOEDITING_k32_KBPS, M4VIDEOEDITING_k48_KBPS,
-        M4VIDEOEDITING_k64_KBPS, M4VIDEOEDITING_k96_KBPS,
-        M4VIDEOEDITING_k128_KBPS, M4VIDEOEDITING_k192_KBPS,
-        M4VIDEOEDITING_k256_KBPS, M4VIDEOEDITING_k288_KBPS,
-        M4VIDEOEDITING_k384_KBPS, M4VIDEOEDITING_k512_KBPS,
-        M4VIDEOEDITING_k800_KBPS, M4VIDEOEDITING_k2_MBPS,
-        M4VIDEOEDITING_k5_MBPS,
-        M4VIDEOEDITING_k8_MBPS, /*+ New Encoder bitrates */
-        M4OSA_INT32_MAX
-    };
-
-    const M4OSA_UInt32 nbbitrates = 14;
-    M4OSA_UInt32 i;
-
-    for ( i = 0; freebitrate >= bitarray[i]; i++ );
-
-    switch( mode )
-    {
-        case -1: /* previous */
-            if( i <= 2 )
-                return 0;
-            else
-                return bitarray[i - 2];
-            break;
-
-        case 0: /* current */
-            if( i <= 1 )
-                return 0;
-            else
-                return bitarray[i - 1];
-            break;
-
-        case 1: /* next */
-            if( i >= nbbitrates )
-                return M4OSA_INT32_MAX;
-            else
-                return bitarray[i];
-            break;
-    }
-
-    return 0;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intCleanUp_ReadersDecoders(M4MCS_InternalContext* pC);
- * @brief    Free all resources allocated by M4MCS_open()
- * @param    pContext            (IN) MCS context
- * @return   M4NO_ERROR:         No error
- ******************************************************************************
- */
-static M4OSA_ERR M4MCS_intCleanUp_ReadersDecoders( M4MCS_InternalContext *pC )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4OSA_TRACE2_1("M4MCS_intCleanUp_ReadersDecoders called with pC=0x%x", pC);
-
-    /**/
-    /* ----- Free video decoder stuff, if needed ----- */
-
-    if( M4OSA_NULL != pC->pViDecCtxt )
-    {
-        err = pC->m_pVideoDecoder->m_pFctDestroy(pC->pViDecCtxt);
-        pC->pViDecCtxt = M4OSA_NULL;
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_cleanUp: m_pVideoDecoder->pFctDestroy returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-    }
-
-    /* ----- Free the audio decoder stuff ----- */
-
-    if( M4OSA_NULL != pC->pAudioDecCtxt )
-    {
-        err = pC->m_pAudioDecoder->m_pFctDestroyAudioDec(pC->pAudioDecCtxt);
-        pC->pAudioDecCtxt = M4OSA_NULL;
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_cleanUp: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-    }
-
-    if( M4OSA_NULL != pC->AudioDecBufferOut.m_dataAddress )
-    {
-        free(pC->AudioDecBufferOut.m_dataAddress);
-        pC->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
-    }
-
-    /* ----- Free reader stuff, if needed ----- */
-    // We cannot free the reader before decoders because the decoders may read
-    // from the reader (in another thread) before being stopped.
-
-    if( M4OSA_NULL != pC->
-        pReaderContext ) /**< may be M4OSA_NULL if M4MCS_open was not called */
-    {
-        err = pC->m_pReader->m_pFctClose(pC->pReaderContext);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1("M4MCS_cleanUp: m_pReader->m_pFctClose returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        err = pC->m_pReader->m_pFctDestroy(pC->pReaderContext);
-        pC->pReaderContext = M4OSA_NULL;
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4MCS_cleanUp: m_pReader->m_pFctDestroy returns 0x%x", err);
-            /**< don't return, we still have stuff to free */
-        }
-    }
-
-    if( pC->m_pDataAddress1 != M4OSA_NULL )
-    {
-        free(pC->m_pDataAddress1);
-        pC->m_pDataAddress1 = M4OSA_NULL;
-    }
-
-    if( pC->m_pDataAddress2 != M4OSA_NULL )
-    {
-        free(pC->m_pDataAddress2);
-        pC->m_pDataAddress2 = M4OSA_NULL;
-    }
-    /*Bug fix 11/12/2008 (to obtain more precise video end cut)*/
-    if( pC->m_pDataVideoAddress1 != M4OSA_NULL )
-    {
-        free(pC->m_pDataVideoAddress1);
-        pC->m_pDataVideoAddress1 = M4OSA_NULL;
-    }
-
-    if( pC->m_pDataVideoAddress2 != M4OSA_NULL )
-    {
-        free(pC->m_pDataVideoAddress2);
-        pC->m_pDataVideoAddress2 = M4OSA_NULL;
-    }
-
-    return M4NO_ERROR;
-}
-
-
-/**
-
- ******************************************************************************
- * M4OSA_ERR M4MCS_open_normalMode(M4MCS_Context pContext, M4OSA_Void* pFileIn,
- *                             M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
- * @brief   Set the MCS input and output files. It is the same as M4MCS_open without
- *                                M4MCS_WITH_FAST_OPEN flag
-It is used in VideoArtist
- * @note    It opens the input file, but the output file is not created yet.
- * @param   pContext            (IN) MCS context
- * @param   pFileIn             (IN) Input file to transcode (The type of this parameter
- *                                    (URL, pipe...) depends on the OSAL implementation).
- * @param   mediaType           (IN) Container type (.3gp,.amr, ...) of input file.
- * @param   pFileOut            (IN) Output file to create  (The type of this parameter
- *                                (URL, pipe...) depends on the OSAL implementation).
- * @param   pTempFile           (IN) Temporary file for the constant memory writer to store
- *                                 metadata ("moov.bin").
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
- * @return  M4ERR_ALLOC:        There is no more available memory
- * @return  M4ERR_FILE_NOT_FOUND:   The input file has not been found
- * @return  M4MCS_ERR_INVALID_INPUT_FILE:   The input file is not a valid file, or is corrupted
- * @return  M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM:  The input file contains no
- *                                                         supported audio or video stream
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_open_normalMode(M4MCS_Context pContext, M4OSA_Void* pFileIn,
-                                 M4VIDEOEDITING_FileType InputFileType,
-                                  M4OSA_Void* pFileOut, M4OSA_Void* pTempFile)
-{
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext*)(pContext);
-    M4OSA_ERR err;
-
-    M4READER_MediaFamily mediaFamily;
-    M4_StreamHandler* pStreamHandler;
-
-    M4OSA_TRACE2_3("M4MCS_open_normalMode called with pContext=0x%x, pFileIn=0x%x,\
-     pFileOut=0x%x", pContext, pFileIn, pFileOut);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-     "M4MCS_open_normalMode: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileIn) , M4ERR_PARAMETER,
-     "M4MCS_open_normalMode: pFileIn is M4OSA_NULL");
-
-    if ((InputFileType == M4VIDEOEDITING_kFileType_JPG)
-        ||(InputFileType == M4VIDEOEDITING_kFileType_PNG)
-        ||(InputFileType == M4VIDEOEDITING_kFileType_GIF)
-        ||(InputFileType == M4VIDEOEDITING_kFileType_BMP))
-    {
-        M4OSA_TRACE1_0("M4MCS_open_normalMode: Still picture is not\
-             supported with this function");
-        return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
-    }
-
-    /**
-    * Check state automaton */
-    if (M4MCS_kState_CREATED != pC->State)
-    {
-        M4OSA_TRACE1_1("M4MCS_open_normalMode(): Wrong State (%d), returning M4ERR_STATE",
-             pC->State);
-        return M4ERR_STATE;
-    }
-
-    /* Copy function input parameters into our context */
-    pC->pInputFile     = pFileIn;
-    pC->InputFileType  = InputFileType;
-    pC->pOutputFile    = pFileOut;
-    pC->pTemporaryFile = pTempFile;
-
-    /***********************************/
-    /* Open input file with the reader */
-    /***********************************/
-
-    err = M4MCS_setCurrentReader(pContext, pC->InputFileType);
-    M4ERR_CHECK_RETURN(err);
-
-    /**
-    * Reset reader related variables */
-    pC->VideoState          = M4MCS_kStreamState_NOSTREAM;
-    pC->AudioState          = M4MCS_kStreamState_NOSTREAM;
-    pC->pReaderVideoStream  = M4OSA_NULL;
-    pC->pReaderAudioStream  = M4OSA_NULL;
-
-    /*******************************************************/
-    /* Initializes the reader shell and open the data file */
-    /*******************************************************/
-    err = pC->m_pReader->m_pFctCreate(&pC->pReaderContext);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4MCS_open_normalMode(): m_pReader->m_pFctCreate returns 0x%x", err);
-        return err;
-    }
-
-    /**
-    * Link the reader interface to the reader context */
-    pC->m_pReaderDataIt->m_readerContext = pC->pReaderContext;
-
-    /**
-    * Set the reader shell file access functions */
-    err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
-         M4READER_kOptionID_SetOsaFileReaderFctsPtr,
-        (M4OSA_DataOption)pC->pOsaFileReadPtr);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4MCS_open_normalMode(): m_pReader->m_pFctSetOption returns 0x%x", err);
-        return err;
-    }
-
-    /**
-    * Open the input file */
-    err = pC->m_pReader->m_pFctOpen(pC->pReaderContext, pC->pInputFile);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_UInt32 uiDummy, uiCoreId;
-        M4OSA_TRACE1_1("M4MCS_open_normalMode(): m_pReader->m_pFctOpen returns 0x%x", err);
-
-        if (err == ((M4OSA_UInt32)M4ERR_UNSUPPORTED_MEDIA_TYPE)) {
-            M4OSA_TRACE1_0("M4MCS_open_normalMode(): returning M4MCS_ERR_FILE_DRM_PROTECTED");
-            return M4MCS_ERR_FILE_DRM_PROTECTED;
-        } else {
-            /**
-            * If the error is from the core reader, we change it to a public VXS error */
-            M4OSA_ERR_SPLIT(err, uiDummy, uiCoreId, uiDummy);
-            if (M4MP4_READER == uiCoreId)
-            {
-                M4OSA_TRACE1_0("M4MCS_open_normalMode(): returning M4MCS_ERR_INVALID_INPUT_FILE");
-                return M4MCS_ERR_INVALID_INPUT_FILE;
-            }
-        }
-        return err;
-    }
-
-    /**
-    * Get the streams from the input file */
-    while (M4NO_ERROR == err)
-    {
-        err = pC->m_pReader->m_pFctGetNextStream(pC->pReaderContext, &mediaFamily,
-            &pStreamHandler);
-
-        /**
-        * In case we found a BIFS stream or something else...*/
-        if((err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE))
-            || (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)))
-        {
-            err = M4NO_ERROR;
-            continue;
-        }
-
-        if (M4NO_ERROR == err) /**< One stream found */
-        {
-            /**
-            * Found the first video stream */
-            if ((M4READER_kMediaFamilyVideo == mediaFamily) \
-                && (M4OSA_NULL == pC->pReaderVideoStream))
-            {
-                if ((M4DA_StreamTypeVideoH263==pStreamHandler->m_streamType) ||
-                    (M4DA_StreamTypeVideoMpeg4==pStreamHandler->m_streamType)
-#ifdef M4VSS_SUPPORT_VIDEO_AVC
-                    ||(M4DA_StreamTypeVideoMpeg4Avc==pStreamHandler->m_streamType))
-#else
-                    ||((M4DA_StreamTypeVideoMpeg4Avc==pStreamHandler->m_streamType)
-                    &&(pC->m_pVideoDecoderItTable[M4DECODER_kVideoTypeAVC] != M4OSA_NULL)))
-#endif
-                {
-                    M4OSA_TRACE3_0("M4MCS_open_normalMode():\
-                     Found a H263 or MPEG-4 video stream in input 3gpp clip");
-
-                    /**
-                    * Keep pointer to the video stream */
-                    pC->pReaderVideoStream = (M4_VideoStreamHandler*)pStreamHandler;
-                    pC->bUnsupportedVideoFound = M4OSA_FALSE;
-                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
-
-                    /**
-                    * Init our video stream state variable */
-                    pC->VideoState = M4MCS_kStreamState_STARTED;
-
-                    /**
-                    * Reset the stream reader */
-                    err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
-                         (M4_StreamHandler*)pC->pReaderVideoStream);
-                    if (M4NO_ERROR != err)
-                    {
-                        M4OSA_TRACE1_1("M4MCS_open_normalMode():\
-                             m_pReader->m_pFctReset(video) returns 0x%x", err);
-                        return err;
-                    }
-
-                    /**
-                    * Initializes an access Unit */
-                    err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext, pStreamHandler,
-                         &pC->ReaderVideoAU);
-                    if (M4NO_ERROR != err)
-                    {
-                        M4OSA_TRACE1_1("M4MCS_open_normalMode():\
-                             m_pReader->m_pFctFillAuStruct(video) returns 0x%x", err);
-                        return err;
-                    }
-                }
-                else /**< Not H263 or MPEG-4 (H264, etc.) */
-                {
-                    M4OSA_TRACE1_1("M4MCS_open_normalMode():\
-                         Found an unsupported video stream (0x%x) in input 3gpp clip",
-                             pStreamHandler->m_streamType);
-
-                    pC->bUnsupportedVideoFound = M4OSA_TRUE;
-                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
-                }
-            }
-            /**
-            * Found the first audio stream */
-            else if ((M4READER_kMediaFamilyAudio == mediaFamily)
-                && (M4OSA_NULL == pC->pReaderAudioStream))
-            {
-                if ((M4DA_StreamTypeAudioAmrNarrowBand==pStreamHandler->m_streamType) ||
-                    (M4DA_StreamTypeAudioAac==pStreamHandler->m_streamType) ||
-                    (M4DA_StreamTypeAudioMp3==pStreamHandler->m_streamType) ||
-                    (M4DA_StreamTypeAudioEvrc==pStreamHandler->m_streamType) )
-                {
-                    M4OSA_TRACE3_0("M4MCS_open_normalMode(): Found an AMR-NB, AAC \
-                        or MP3 audio stream in input clip");
-
-                    /**
-                    * Keep pointer to the audio stream */
-                    pC->pReaderAudioStream = (M4_AudioStreamHandler*)pStreamHandler;
-                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
-                    pC->bUnsupportedAudioFound = M4OSA_FALSE;
-
-                    /**
-                    * Init our audio stream state variable */
-                    pC->AudioState = M4MCS_kStreamState_STARTED;
-
-                    /**
-                    * Reset the stream reader */
-                    err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
-                         (M4_StreamHandler*)pC->pReaderAudioStream);
-                    if (M4NO_ERROR != err)
-                    {
-                        M4OSA_TRACE1_1("M4MCS_open_normalMode():\
-                             m_pReader->m_pFctReset(audio) returns 0x%x", err);
-                        return err;
-                    }
-
-                    /**
-                    * Initializes an access Unit */
-                    err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext, pStreamHandler,
-                         &pC->ReaderAudioAU);
-                    if (M4NO_ERROR != err)
-                    {
-                        M4OSA_TRACE1_1("M4MCS_open_normalMode(): \
-                            m_pReader->m_pFctFillAuStruct(audio) returns 0x%x", err);
-                        return err;
-                    }
-
-                    /**
-                    * Output max AU size is equal to input max AU size (this value
-                    * will be changed if there is audio transcoding) */
-                    pC->uiAudioMaxAuSize = pStreamHandler->m_maxAUSize;
-
-                }
-                else
-                {
-                    /**< Not AMR-NB, AAC, MP3 nor EVRC (AMR-WB, WAV...) */
-                    M4OSA_TRACE1_1("M4MCS_open_normalMode(): Found an unsupported audio stream\
-                         (0x%x) in input 3gpp clip", pStreamHandler->m_streamType);
-
-                    pC->bUnsupportedAudioFound = M4OSA_TRUE;
-                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
-                }
-            }
-        }
-    } /**< end of while (M4NO_ERROR == err) */
-
-    /**
-    * Check we found at least one supported stream */
-    if((M4OSA_NULL == pC->pReaderVideoStream) && (M4OSA_NULL == pC->pReaderAudioStream))
-    {
-        M4OSA_TRACE1_0("M4MCS_open_normalMode(): returning \
-            M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM");
-        return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
-    }
-
-#ifndef M4VSS_ENABLE_EXTERNAL_DECODERS
-    if(pC->VideoState == M4MCS_kStreamState_STARTED)
-    {
-        err = M4MCS_setCurrentVideoDecoder(pContext,
-            pC->pReaderVideoStream->m_basicProperties.m_streamType);
-        M4ERR_CHECK_RETURN(err);
-    }
-#endif
-
-    if(pC->AudioState == M4MCS_kStreamState_STARTED)
-    {
-        //EVRC
-        if(M4DA_StreamTypeAudioEvrc != pStreamHandler->m_streamType)
-         /* decoder not supported yet, but allow to do null encoding */
-        {
-            err = M4MCS_setCurrentAudioDecoder(pContext,
-                 pC->pReaderAudioStream->m_basicProperties.m_streamType);
-            M4ERR_CHECK_RETURN(err);
-        }
-    }
-
-    /**
-    * Get the audio and video stream properties */
-    err = M4MCS_intGetInputClipProperties(pC);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4MCS_open_normalMode():\
-             M4MCS_intGetInputClipProperties returns 0x%x", err);
-        return err;
-    }
-
-    /**
-    * Set the begin cut decoding increment according to the input frame rate */
-    if (0. != pC->InputFileProperties.fAverageFrameRate) /**< sanity check */
-    {
-        pC->iVideoBeginDecIncr = (M4OSA_Int32)(3000. \
-            / pC->InputFileProperties.fAverageFrameRate); /**< about 3 frames */
-    }
-    else
-    {
-        pC->iVideoBeginDecIncr = 200; /**< default value: 200 milliseconds (3 frames @ 15fps)*/
-    }
-
-    /**
-    * Update state automaton */
-    pC->State = M4MCS_kState_OPENED;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4MCS_open_normalMode(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR M4MCS_intCheckAndGetCodecProperties(
-                                 M4MCS_InternalContext *pC) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4AD_Buffer outputBuffer;
-    uint32_t optionValue =0;
-
-    M4OSA_TRACE3_0("M4MCS_intCheckAndGetCodecProperties :start");
-
-    // Decode first audio frame from clip to get properties from codec
-
-    if (M4DA_StreamTypeAudioAac ==
-            pC->pReaderAudioStream->m_basicProperties.m_streamType) {
-
-        err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(
-                    &pC->pAudioDecCtxt,
-                    pC->pReaderAudioStream, &(pC->AacProperties));
-    } else {
-        err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(
-                    &pC->pAudioDecCtxt,
-                    pC->pReaderAudioStream,
-                    pC->m_pCurrentAudioDecoderUserData);
-    }
-    if (M4NO_ERROR != err) {
-
-        M4OSA_TRACE1_1(
-            "M4MCS_intCheckAndGetCodecProperties: m_pFctCreateAudioDec \
-             returns 0x%x", err);
-        return err;
-    }
-
-    pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
-           M4AD_kOptionID_3gpReaderInterface, (M4OSA_DataOption) pC->m_pReaderDataIt);
-
-    pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
-           M4AD_kOptionID_AudioAU, (M4OSA_DataOption) &pC->ReaderAudioAU);
-
-    if( pC->m_pAudioDecoder->m_pFctStartAudioDec != M4OSA_NULL ) {
-
-        err = pC->m_pAudioDecoder->m_pFctStartAudioDec(pC->pAudioDecCtxt);
-        if( M4NO_ERROR != err ) {
-
-            M4OSA_TRACE1_1(
-                "M4MCS_intCheckAndGetCodecProperties: m_pFctStartAudioDec \
-                 returns 0x%x", err);
-            return err;
-        }
-    }
-
-    /**
-    * Allocate output buffer for the audio decoder */
-    outputBuffer.m_bufferSize =
-        pC->pReaderAudioStream->m_byteFrameLength
-        * pC->pReaderAudioStream->m_byteSampleSize
-        * pC->pReaderAudioStream->m_nbChannels;
-
-    if( outputBuffer.m_bufferSize > 0 ) {
-
-        outputBuffer.m_dataAddress =
-            (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(outputBuffer.m_bufferSize \
-            *sizeof(short), M4MCS, (M4OSA_Char *)"outputBuffer.m_bufferSize");
-
-        if( M4OSA_NULL == outputBuffer.m_dataAddress ) {
-
-            M4OSA_TRACE1_0(
-                "M4MCS_intCheckAndGetCodecProperties():\
-                 unable to allocate outputBuffer.m_dataAddress, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-    }
-
-    err = pC->m_pAudioDecoder->m_pFctStepAudioDec(pC->pAudioDecCtxt,
-        M4OSA_NULL, &outputBuffer, M4OSA_FALSE);
-
-    if ( err == M4WAR_INFO_FORMAT_CHANGE ) {
-
-        // Get the properties from codec node
-        pC->m_pAudioDecoder->m_pFctGetOptionAudioDec(pC->pAudioDecCtxt,
-           M4AD_kOptionID_AudioNbChannels, (M4OSA_DataOption) &optionValue);
-
-        // Reset Reader structure value also
-        pC->pReaderAudioStream->m_nbChannels = optionValue;
-
-        pC->m_pAudioDecoder->m_pFctGetOptionAudioDec(pC->pAudioDecCtxt,
-         M4AD_kOptionID_AudioSampFrequency, (M4OSA_DataOption) &optionValue);
-
-        // Reset Reader structure value also
-        pC->pReaderAudioStream->m_samplingFrequency = optionValue;
-
-        if (M4DA_StreamTypeAudioAac ==
-            pC->pReaderAudioStream->m_basicProperties.m_streamType) {
-
-            pC->AacProperties.aNumChan =
-                pC->pReaderAudioStream->m_nbChannels;
-            pC->AacProperties.aSampFreq =
-                pC->pReaderAudioStream->m_samplingFrequency;
-
-        }
-
-    } else if( err != M4NO_ERROR) {
-        M4OSA_TRACE1_1("M4MCS_intCheckAndGetCodecProperties:\
-            m_pFctStepAudioDec returns err = 0x%x", err);
-    }
-
-    free(outputBuffer.m_dataAddress);
-
-    // Reset the stream reader
-    err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
-                 (M4_StreamHandler *)pC->pReaderAudioStream);
-
-    if (M4NO_ERROR != err) {
-        M4OSA_TRACE1_1("M4MCS_intCheckAndGetCodecProperties\
-            Error in reseting reader: 0x%x", err);
-    }
-
-    return err;
-
-}
-
-M4OSA_ERR M4MCS_intLimitBitratePerCodecProfileLevel(
-                                 M4ENCODER_AdvancedParams* EncParams) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-
-    switch (EncParams->Format) {
-        case M4ENCODER_kH263:
-            EncParams->Bitrate = M4MCS_intLimitBitrateForH263Enc(
-                                     EncParams->videoProfile,
-                                     EncParams->videoLevel, EncParams->Bitrate);
-            break;
-
-        case M4ENCODER_kMPEG4:
-            EncParams->Bitrate = M4MCS_intLimitBitrateForMpeg4Enc(
-                                     EncParams->videoProfile,
-                                     EncParams->videoLevel, EncParams->Bitrate);
-            break;
-
-        case M4ENCODER_kH264:
-            EncParams->Bitrate = M4MCS_intLimitBitrateForH264Enc(
-                                     EncParams->videoProfile,
-                                     EncParams->videoLevel, EncParams->Bitrate);
-            break;
-
-        default:
-            M4OSA_TRACE1_1("M4MCS_intLimitBitratePerCodecProfileLevel: \
-                Wrong enc format %d", EncParams->Format);
-            err = M4ERR_PARAMETER;
-            break;
-    }
-
-    return err;
-
-}
-
-M4OSA_Int32 M4MCS_intLimitBitrateForH264Enc(M4OSA_Int32 profile,
-                                    M4OSA_Int32 level, M4OSA_Int32 bitrate) {
-
-    M4OSA_Int32 vidBitrate = 0;
-
-    switch (profile) {
-        case OMX_VIDEO_AVCProfileBaseline:
-        case OMX_VIDEO_AVCProfileMain:
-
-            switch (level) {
-
-                case OMX_VIDEO_AVCLevel1:
-                    vidBitrate = (bitrate > 64000) ? 64000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel1b:
-                    vidBitrate = (bitrate > 128000) ? 128000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel11:
-                    vidBitrate = (bitrate > 192000) ? 192000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel12:
-                    vidBitrate = (bitrate > 384000) ? 384000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel13:
-                    vidBitrate = (bitrate > 768000) ? 768000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel2:
-                    vidBitrate = (bitrate > 2000000) ? 2000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel21:
-                    vidBitrate = (bitrate > 4000000) ? 4000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel22:
-                    vidBitrate = (bitrate > 4000000) ? 4000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel3:
-                    vidBitrate = (bitrate > 10000000) ? 10000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel31:
-                    vidBitrate = (bitrate > 14000000) ? 14000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel32:
-                    vidBitrate = (bitrate > 20000000) ? 20000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel4:
-                    vidBitrate = (bitrate > 20000000) ? 20000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel41:
-                    vidBitrate = (bitrate > 50000000) ? 50000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel42:
-                    vidBitrate = (bitrate > 50000000) ? 50000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel5:
-                    vidBitrate = (bitrate > 135000000) ? 135000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel51:
-                    vidBitrate = (bitrate > 240000000) ? 240000000 : bitrate;
-                    break;
-
-                default:
-                    vidBitrate = bitrate;
-                    break;
-            }
-            break;
-
-        case OMX_VIDEO_AVCProfileHigh:
-            switch (level) {
-                case OMX_VIDEO_AVCLevel1:
-                    vidBitrate = (bitrate > 80000) ? 80000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel1b:
-                    vidBitrate = (bitrate > 160000) ? 160000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel11:
-                    vidBitrate = (bitrate > 240000) ? 240000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel12:
-                    vidBitrate = (bitrate > 480000) ? 480000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel13:
-                    vidBitrate = (bitrate > 960000) ? 960000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel2:
-                    vidBitrate = (bitrate > 2500000) ? 2500000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel21:
-                    vidBitrate = (bitrate > 5000000) ? 5000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel22:
-                    vidBitrate = (bitrate > 5000000) ? 5000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel3:
-                    vidBitrate = (bitrate > 12500000) ? 12500000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel31:
-                    vidBitrate = (bitrate > 17500000) ? 17500000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel32:
-                    vidBitrate = (bitrate > 25000000) ? 25000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel4:
-                    vidBitrate = (bitrate > 25000000) ? 25000000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel41:
-                    vidBitrate = (bitrate > 62500000) ? 62500000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel42:
-                    vidBitrate = (bitrate > 62500000) ? 62500000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel5:
-                    vidBitrate = (bitrate > 168750000) ? 168750000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_AVCLevel51:
-                    vidBitrate = (bitrate > 300000000) ? 300000000 : bitrate;
-                    break;
-
-                default:
-                    vidBitrate = bitrate;
-                    break;
-            }
-            break;
-
-        default:
-            // We do not handle any other AVC profile for now.
-            // Return input bitrate
-            vidBitrate = bitrate;
-            break;
-    }
-
-    return vidBitrate;
-}
-
-M4OSA_Int32 M4MCS_intLimitBitrateForMpeg4Enc(M4OSA_Int32 profile,
-                                    M4OSA_Int32 level, M4OSA_Int32 bitrate) {
-
-    M4OSA_Int32 vidBitrate = 0;
-
-    switch (profile) {
-        case OMX_VIDEO_MPEG4ProfileSimple:
-            switch (level) {
-
-                case OMX_VIDEO_MPEG4Level0:
-                    vidBitrate = (bitrate > 64000) ? 64000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_MPEG4Level0b:
-                    vidBitrate = (bitrate > 128000) ? 128000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_MPEG4Level1:
-                    vidBitrate = (bitrate > 64000) ? 64000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_MPEG4Level2:
-                    vidBitrate = (bitrate > 128000) ? 128000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_MPEG4Level3:
-                    vidBitrate = (bitrate > 384000) ? 384000 : bitrate;
-                    break;
-
-                default:
-                    vidBitrate = bitrate;
-                    break;
-            }
-            break;
-
-        default:
-            // We do not handle any other MPEG4 profile for now.
-            // Return input bitrate
-            vidBitrate = bitrate;
-            break;
-    }
-
-    return vidBitrate;
-}
-
-M4OSA_Int32 M4MCS_intLimitBitrateForH263Enc(M4OSA_Int32 profile,
-                                    M4OSA_Int32 level, M4OSA_Int32 bitrate) {
-
-    M4OSA_Int32 vidBitrate = 0;
-
-    switch (profile) {
-        case OMX_VIDEO_H263ProfileBaseline:
-            switch (level) {
-
-                case OMX_VIDEO_H263Level10:
-                    vidBitrate = (bitrate > 64000) ? 64000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_H263Level20:
-                    vidBitrate = (bitrate > 128000) ? 128000 : bitrate;
-                    break;
-
-                case OMX_VIDEO_H263Level30:
-                    vidBitrate = (bitrate > 384000) ? 384000 : bitrate;
-                    break;
-
-                default:
-                    vidBitrate = bitrate;
-                    break;
-            }
-            break;
-
-        default:
-            // We do not handle any other H263 profile for now.
-            // Return input bitrate
-            vidBitrate = bitrate;
-            break;
-    }
-
-    return vidBitrate;
-}
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_AudioEffects.c b/libvideoeditor/vss/mcs/src/M4MCS_AudioEffects.c
deleted file mode 100755
index 488de68..0000000
--- a/libvideoeditor/vss/mcs/src/M4MCS_AudioEffects.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *************************************************************************
- * @file   M4MCS_API.c
- * @brief  MCS implementation (Video Compressor Service)
- * @note   This file implements the API and the processing of the MCS
- *************************************************************************
- **/
-
-/****************/
-/*** Includes ***/
-/****************/
-
-/**
- * OSAL headers */
-#include "M4OSA_Memory.h"   /**< OSAL memory management */
-#include "M4OSA_Debug.h"    /**< OSAL debug management */
-
-/* Our headers */
-#include "M4MCS_API.h"
-#include "M4MCS_ErrorCodes.h"
-#include "M4MCS_InternalTypes.h"
-#include "M4MCS_InternalConfig.h"
-#include "M4MCS_InternalFunctions.h"
-
-/* Common headers (for aac) */
-#include "M4_Common.h"
-
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-#include "M4VD_EXTERNAL_Interface.h"
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pContext)
- * @brief    Check if an effect has to be applied currently
- * @note    It is called by the stepEncoding function
- * @param    pContext    (IN) MCS internal context
- * @return    M4NO_ERROR:    No error
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pC)
-{
-    M4OSA_Int8 *pActiveEffectNumber = &(pC->pActiveEffectNumber);
-
-    *pActiveEffectNumber = -1;
-
-    if(pC->ReaderAudioAU.m_CTS > pC->uiBeginCutTime
-    && pC->ReaderAudioAU.m_CTS < pC->uiEndCutTime)
-    {
-        M4OSA_UInt32 outputRelatedTime = 0;
-        M4OSA_UInt8 uiEffectIndex = 0;
-        outputRelatedTime =
-        (M4OSA_UInt32)(pC->ReaderAudioAU.m_CTS  - pC->uiBeginCutTime + 0.5);
-
-        for(uiEffectIndex=0; uiEffectIndex<pC->nbEffects; uiEffectIndex++)
-        {
-            if ((outputRelatedTime >=
-                (M4OSA_UInt32)(pC->pEffects[uiEffectIndex].uiStartTime)) &&
-                (outputRelatedTime <
-                (M4OSA_UInt32)(pC->pEffects[uiEffectIndex].uiStartTime +\
-                pC->pEffects[uiEffectIndex].uiDuration)))
-            {
-                *pActiveEffectNumber = uiEffectIndex;
-                uiEffectIndex = pC->nbEffects;
-            }
-        }
-    }
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn()
- * @brief    Apply audio effect FadeIn to pPCMdata
- * @param   pC            (IN/OUT) Internal edit context
- * @param    pPCMdata    (IN/OUT) Input and Output PCM audio data
- * @param    uiPCMsize    (IN)     Size of pPCMdata
- * @param    pProgress    (IN)     Effect progress
- * @return    M4NO_ERROR:             No error
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn(  M4OSA_Void *pFunctionContext,
-                                            M4OSA_Int16 *pPCMdata,
-                                            M4OSA_UInt32 uiPCMsize,
-                                            M4MCS_ExternalProgress *pProgress)
-{
-    /* we will cast each Int16 sample into this Int32 variable */
-    M4OSA_Int32 i32sample;
-
-    /**
-     * Sanity check */
-    if(pProgress->uiProgress > 1000)
-    {
-        pProgress->uiProgress = 1000;
-    }
-
-    /**
-     * From buffer size (bytes) to number of sample (int16): divide by two */
-    uiPCMsize >>= 1;
-
-    /**
-     * Loop on samples */
-    while (uiPCMsize-->0) /**< decrementing to optimize */
-    {
-        i32sample = *pPCMdata;
-        i32sample *= pProgress->uiProgress;
-        i32sample /= 1000;
-        *pPCMdata++ = (M4OSA_Int16)i32sample;
-    }
-
-    /**
-     *    Return */
-    M4OSA_TRACE3_0("M4MCS_editAudioEffectFct_FadeIn: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_editAudioEffectFct_FadeOut()
- * @brief    Apply audio effect FadeIn to pPCMdata
- * @param    pC            (IN/OUT) Internal edit context
- * @param    pPCMdata    (IN/OUT) Input and Output PCM audio data
- * @param    uiPCMsize    (IN)     Size of pPCMdata
- * @param    pProgress    (IN)     Effect progress
- * @return   M4NO_ERROR:             No error
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_editAudioEffectFct_FadeOut( M4OSA_Void *pFunctionContext,
-                                            M4OSA_Int16 *pPCMdata,
-                                            M4OSA_UInt32 uiPCMsize,
-                                            M4MCS_ExternalProgress *pProgress)
-{
-    /* we will cast each Int16 sample into this Int32 variable */
-    M4OSA_Int32 i32sample;
-
-    /**
-     * Sanity check */
-    if(pProgress->uiProgress > 1000)
-    {
-        pProgress->uiProgress = 1000;
-    }
-    pProgress->uiProgress = 1000 - pProgress->uiProgress;
-
-    /**
-     * From buffer size (bytes) to number of sample (int16): divide by two */
-    uiPCMsize >>= 1;
-
-    /**
-     * Loop on samples */
-    while (uiPCMsize-->0) /**< decrementing to optimize */
-    {
-        i32sample = *pPCMdata;
-        i32sample *= pProgress->uiProgress;
-        i32sample /= 1000;
-        *pPCMdata++ = (M4OSA_Int16)i32sample;
-    }
-
-    /**
-     *    Return */
-    M4OSA_TRACE3_0("M4MCS_editAudioEffectFct_FadeOut: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_Codecs.c b/libvideoeditor/vss/mcs/src/M4MCS_Codecs.c
deleted file mode 100755
index 554492b..0000000
--- a/libvideoeditor/vss/mcs/src/M4MCS_Codecs.c
+++ /dev/null
@@ -1,917 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file   M4MCS_Codecs.c
- * @brief  MCS implementation
- * @note   This file contains all functions related to audio/video
- *         codec manipulations.
- ************************************************************************
- */
-
-/**
- ********************************************************************
- * Includes
- ********************************************************************
- */
-#include "NXPSW_CompilerSwitches.h"
-#include "M4OSA_Debug.h"            /* Include for OSAL debug services */
-#include "M4MCS_InternalTypes.h"    /* Internal types of the MCS */
-
-
-#ifdef M4MCS_SUPPORT_VIDEC_3GP
-#include "M4_MPEG4VI_VideoHandler.h"  /*needed for renderer error codes*/
-#endif
-
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_clearInterfaceTables()
- * @brief    Clear encoders, decoders, reader and writers interfaces tables
- * @param    pContext            (IN/OUT) MCS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    The context is null
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_clearInterfaceTables(M4MCS_Context pContext)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4OSA_UInt8 i;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    /* Initialisation that will allow to check if registering twice */
-    pC->pWriterGlobalFcts = M4OSA_NULL;
-    pC->pWriterDataFcts = M4OSA_NULL;
-    pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
-    pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
-
-    pC->pCurrentVideoEncoderExternalAPI = M4OSA_NULL;
-    pC->pCurrentVideoEncoderUserData = M4OSA_NULL;
-
-    for (i = 0; i < M4WRITER_kType_NB; i++ )
-    {
-        pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
-        pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
-    }
-
-    for (i = 0; i < M4ENCODER_kVideo_NB; i++ )
-    {
-        pC->pVideoEncoderInterface[i] = M4OSA_NULL;
-        pC->pVideoEncoderExternalAPITable[i] = M4OSA_NULL;
-        pC->pVideoEncoderUserDataTable[i] = M4OSA_NULL;
-    }
-
-    for (i = 0; i < M4ENCODER_kAudio_NB; i++ )
-    {
-        pC->pAudioEncoderInterface[i] = M4OSA_NULL;
-        pC->pAudioEncoderFlag[i] = M4OSA_FALSE;
-        pC->pAudioEncoderUserDataTable[i] = M4OSA_NULL;
-    }
-
-    /* Initialisation that will allow to check if registering twice */
-    pC->m_pReader = M4OSA_NULL;
-    pC->m_pReaderDataIt   = M4OSA_NULL;
-    pC->m_uiNbRegisteredReaders  = 0;
-
-    for (i = 0; i < M4READER_kMediaType_NB; i++ )
-    {
-        pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
-        pC->m_pReaderDataItTable[i]   = M4OSA_NULL;
-    }
-
-    pC->m_pVideoDecoder = M4OSA_NULL;
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-    pC->m_pCurrentVideoDecoderUserData = M4OSA_NULL;
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-    pC->m_uiNbRegisteredVideoDec = 0;
-    for (i = 0; i < M4DECODER_kVideoType_NB; i++ )
-    {
-        pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-        pC->m_pVideoDecoderUserDataTable[i] = M4OSA_NULL;
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-    }
-
-    pC->m_pAudioDecoder = M4OSA_NULL;
-    for (i = 0; i < M4AD_kType_NB; i++ )
-    {
-        pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
-        pC->m_pAudioDecoderFlagTable[i] = M4OSA_FALSE;
-        pC->m_pAudioDecoderUserDataTable[i] = M4OSA_NULL;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4MCS_registerWriter()
- * @brief    This function will register a specific file format writer.
- * @note    According to the Mediatype, this function will store in the internal context
- *          the writer context.
- * @param    pContext:    (IN) Execution context.
- * @return    M4NO_ERROR: there is no error
- * @return      M4ERR_PARAMETER     pContext,pWtrGlobalInterface or
- *                                  pWtrDataInterface is M4OSA_NULL
- *                                  (debug only), or invalid MediaType
- ******************************************************************************
- */
-M4OSA_ERR   M4MCS_registerWriter(M4MCS_Context pContext, M4WRITER_OutputFileType MediaType,
-                                 M4WRITER_GlobalInterface* pWtrGlobalInterface,
-                                 M4WRITER_DataInterface* pWtrDataInterface)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-
-    /**
-     *    Check input parameters */
-    M4OSA_DEBUG_IF2((pC == M4OSA_NULL),M4ERR_PARAMETER,
-         "MCS: context is M4OSA_NULL in M4MCS_registerWriter");
-    M4OSA_DEBUG_IF2((pWtrGlobalInterface == M4OSA_NULL),M4ERR_PARAMETER,
-         "pWtrGlobalInterface is M4OSA_NULL in M4MCS_registerWriter");
-    M4OSA_DEBUG_IF2((pWtrDataInterface == M4OSA_NULL),M4ERR_PARAMETER,
-         "pWtrDataInterface is M4OSA_NULL in M4MCS_registerWriter");
-
-    M4OSA_TRACE3_3("MCS: M4MCS_registerWriter called with pContext=0x%x,\
-     pWtrGlobalInterface=0x%x, pWtrDataInterface=0x%x", pC,pWtrGlobalInterface,
-     pWtrDataInterface);
-
-    if((MediaType == M4WRITER_kUnknown) || (MediaType >= M4WRITER_kType_NB))
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
-        return M4ERR_PARAMETER;
-    }
-
-    if (pC->WriterInterface[MediaType].pGlobalFcts != M4OSA_NULL)
-    {
-      /* a writer corresponding to this media type has already been registered !*/
-      M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "This media type has already been registered");
-      return M4ERR_PARAMETER;
-    }
-
-    /*
-     * Save writer interface in context */
-    pC->WriterInterface[MediaType].pGlobalFcts = pWtrGlobalInterface;
-    pC->WriterInterface[MediaType].pDataFcts = pWtrDataInterface;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4MCS_registerEncoder()
- * @brief    This function will register a specific video encoder.
- * @note    According to the Mediatype, this function will store in the internal context
- *           the encoder context.
- * @param    pContext:    (IN) Execution context.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER    pContext or pEncGlobalInterface is M4OSA_NULL (debug only),
- *                             or invalid MediaType
- ******************************************************************************
- */
-M4OSA_ERR   M4MCS_registerVideoEncoder (
-                    M4MCS_Context pContext,
-                    M4ENCODER_Format MediaType,
-                    M4ENCODER_GlobalInterface *pEncGlobalInterface)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-
-    /**
-     *    Check input parameters */
-    M4OSA_DEBUG_IF2((pC == M4OSA_NULL),M4ERR_PARAMETER,
-         "MCS: context is M4OSA_NULL in M4MCS_registerVideoEncoder");
-    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL),M4ERR_PARAMETER,
-         "pEncGlobalInterface is M4OSA_NULL in M4MCS_registerVideoEncoder");
-
-    M4OSA_TRACE3_2("MCS: M4MCS_registerVideoEncoder called with pContext=0x%x,\
-         pEncGlobalInterface=0x%x", pC, pEncGlobalInterface);
-
-    if (MediaType >= M4ENCODER_kVideo_NB)
-    {
-      M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid video encoder type");
-      return M4ERR_PARAMETER;
-    }
-
-    if (pC->pVideoEncoderInterface[MediaType] != M4OSA_NULL)
-    {
-        /* can be legitimate, in cases where we have one version that can use external encoders
-        but which still has the built-in one to be able to work without an external encoder; in
-        this case the new encoder simply replaces the old one (i.e. we unregister it first). */
-        free(pC->pVideoEncoderInterface[MediaType]);
-        pC->pVideoEncoderInterface[MediaType] = M4OSA_NULL;
-    }
-
-    /*
-     * Save encoder interface in context */
-    pC->pVideoEncoderInterface[MediaType] = pEncGlobalInterface;
-    /* The actual userData and external API will be set by the registration function in the case
-    of an external encoder (add it as a parameter to this function in the long run?) */
-    pC->pVideoEncoderUserDataTable[MediaType] = M4OSA_NULL;
-    pC->pVideoEncoderExternalAPITable[MediaType] = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4MCS_registerAudioEncoder()
- * @brief    This function will register a specific audio encoder.
- * @note    According to the Mediatype, this function will store in the internal context
- *           the encoder context.
- * @param    pContext:                (IN) Execution context.
- * @param    mediaType:                (IN) The media type.
- * @param    pEncGlobalInterface:    (OUT) the encoder interface functions.
- * @return    M4NO_ERROR: there is no error
- * @return   M4ERR_PARAMETER:   pContext or pEncGlobalInterface is
- *                              M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR   M4MCS_registerAudioEncoder(
-                    M4MCS_Context pContext,
-                    M4ENCODER_AudioFormat MediaType,
-                    M4ENCODER_AudioGlobalInterface *pEncGlobalInterface)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-
-    /**
-     *    Check input parameters */
-    M4OSA_DEBUG_IF2((pC == M4OSA_NULL),M4ERR_PARAMETER,
-         "MCS: context is M4OSA_NULL in M4MCS_registerAudioEncoder");
-    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL),M4ERR_PARAMETER,
-         "pEncGlobalInterface is M4OSA_NULL in M4MCS_registerAudioEncoder");
-
-    M4OSA_TRACE3_2("MCS: M4MCS_registerAudioEncoder called with pContext=0x%x,\
-         pEncGlobalInterface=0x%x", pC, pEncGlobalInterface);
-
-    if (MediaType >= M4ENCODER_kAudio_NB)
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid audio encoder type");
-        return M4ERR_PARAMETER;
-    }
-
-    if(M4OSA_NULL != pC->pAudioEncoderInterface[MediaType])
-    {
-        free(pC->pAudioEncoderInterface[MediaType]);
-        pC->pAudioEncoderInterface[MediaType] = M4OSA_NULL;
-
-        if(M4OSA_NULL != pC->pAudioEncoderUserDataTable[MediaType])
-        {
-            free(pC->pAudioEncoderUserDataTable[MediaType]);
-            pC->pAudioEncoderUserDataTable[MediaType] = M4OSA_NULL;
-        }
-    }
-
-    /*
-     * Save encoder interface in context */
-    pC->pAudioEncoderInterface[MediaType] = pEncGlobalInterface;
-    pC->pAudioEncoderFlag[MediaType] = M4OSA_FALSE; /* internal encoder */
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_registerReader()
- * @brief    Register reader.
- * @param    pContext            (IN/OUT) MCS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_registerReader(
-                        M4MCS_Context pContext,
-                        M4READER_MediaType mediaType,
-                        M4READER_GlobalInterface *pRdrGlobalInterface,
-                        M4READER_DataInterface *pRdrDataInterface)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrGlobalInterface),
-         M4ERR_PARAMETER, "M4MCS_registerReader: invalid pointer on global interface");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrDataInterface),
-         M4ERR_PARAMETER, "M4MCS_registerReader: invalid pointer on data interface");
-
-    if (mediaType == M4READER_kMediaTypeUnknown || mediaType >= M4READER_kMediaType_NB)
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
-        return M4ERR_PARAMETER;
-    }
-
-    if (pC->m_pReaderGlobalItTable[mediaType] != M4OSA_NULL)
-    {
-        /* a reader corresponding to this media type has already been registered !*/
-      M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "This media type has already been registered");
-      return M4ERR_PARAMETER;
-    }
-
-    pC->m_pReaderGlobalItTable[mediaType] = pRdrGlobalInterface;
-    pC->m_pReaderDataItTable[mediaType]   = pRdrDataInterface;
-
-    pC->m_uiNbRegisteredReaders++;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_registerVideoDecoder()
- * @brief    Register video decoder
- * @param    pContext                (IN/OUT) MCS context.
- * @param    decoderType            (IN) Decoder type
- * @param    pDecoderInterface    (IN) Decoder interface.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only), or the decoder
- *                              type is invalid
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_registerVideoDecoder(
-                            M4MCS_Context pContext,
-                            M4DECODER_VideoType decoderType,
-                            M4DECODER_VideoInterface *pDecoderInterface)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
-         "M4MCS_registerVideoDecoder: invalid pointer on decoder interface");
-
-    if (decoderType >= M4DECODER_kVideoType_NB)
-    {
-      M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid video decoder type");
-      return M4ERR_PARAMETER;
-    }
-
-    if (pC->m_pVideoDecoderItTable[decoderType] != M4OSA_NULL)
-    {
-#ifndef M4VSS_ENABLE_EXTERNAL_DECODERS
-        /* a decoder corresponding to this media type has already been registered !*/
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Decoder has already been registered");
-        return M4ERR_PARAMETER;
-#else /* external decoders are possible */
-        /* can be legitimate, in cases where we have one version that can use external decoders
-        but which still has the built-in one to be able to work without an external decoder; in
-        this case the new decoder simply replaces the old one (i.e. we unregister it first). */
-        free(pC->m_pVideoDecoderItTable[decoderType]);
-        pC->m_pVideoDecoderItTable[decoderType] = M4OSA_NULL;
-        /* oh, and don't forget the user data, too. */
-        if (pC->m_pVideoDecoderUserDataTable[decoderType] != M4OSA_NULL)
-        {
-            free(pC->m_pVideoDecoderUserDataTable[decoderType]);
-            pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
-        }
-#endif /* are external decoders possible? */
-    }
-
-    pC->m_pVideoDecoderItTable[decoderType] = pDecoderInterface;
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-    pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
-    /* The actual userData will be set by the registration function in the case
-    of an external decoder (add it as a parameter to this function in the long run?) */
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-    pC->m_uiNbRegisteredVideoDec++;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_registerAudioDecoder()
- * @brief    Register audio decoder
- * @note        This function is used internaly by the MCS to
- *              register audio decoders,
- * @param    context                (IN/OUT) MCS context.
- * @param    decoderType            (IN) Audio decoder type
- * @param    pDecoderInterface    (IN) Audio decoder interface.
- * @return    M4NO_ERROR:            No error
- * @return   M4ERR_PARAMETER:    A parameter is null, or the decoder type is invalid(in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_registerAudioDecoder(
-                                    M4MCS_Context pContext,
-                                    M4AD_Type decoderType,
-                                    M4AD_Interface *pDecoderInterface)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
-         "M4MCS_registerAudioDecoder: invalid pointer on decoder interface");
-
-    if (decoderType >= M4AD_kType_NB)
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid audio decoder type");
-        return M4ERR_PARAMETER;
-    }
-
-    if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
-    {
-        free(pC->m_pAudioDecoderItTable[decoderType]);
-        pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
-
-        if(M4OSA_NULL != pC->m_pAudioDecoderUserDataTable[decoderType])
-        {
-            free(pC->m_pAudioDecoderUserDataTable[decoderType]);
-            pC->m_pAudioDecoderUserDataTable[decoderType] = M4OSA_NULL;
-        }
-    }
-    pC->m_pAudioDecoderItTable[decoderType] = pDecoderInterface;
-    pC->m_pAudioDecoderFlagTable[decoderType] = M4OSA_FALSE; /* internal decoder */
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_unRegisterAllWriters()
- * @brief    Unregister writer
- * @param    pContext            (IN/OUT) MCS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_unRegisterAllWriters(M4MCS_Context pContext)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4OSA_Int32 i;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    for (i = 0; i < M4WRITER_kType_NB; i++)
-    {
-        if (pC->WriterInterface[i].pGlobalFcts != M4OSA_NULL)
-        {
-            free(pC->WriterInterface[i].pGlobalFcts );
-            pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
-        }
-        if (pC->WriterInterface[i].pDataFcts != M4OSA_NULL)
-        {
-            free(pC->WriterInterface[i].pDataFcts );
-            pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
-        }
-    }
-
-    pC->pWriterGlobalFcts = M4OSA_NULL;
-    pC->pWriterDataFcts = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_unRegisterAllEncoders()
- * @brief    Unregister the encoders
- * @param    pContext            (IN/OUT) MCS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_unRegisterAllEncoders(M4MCS_Context pContext)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4OSA_Int32 i;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    for (i = 0; i < M4ENCODER_kVideo_NB; i++)
-    {
-        if (pC->pVideoEncoderInterface[i] != M4OSA_NULL)
-        {
-            free(pC->pVideoEncoderInterface[i] );
-            pC->pVideoEncoderInterface[i] = M4OSA_NULL;
-        }
-    }
-
-    for (i = 0; i < M4ENCODER_kAudio_NB; i++)
-    {
-        if (pC->pAudioEncoderInterface[i] != M4OSA_NULL)
-        {
-            /*Don't free external audio encoders interfaces*/
-            if (M4OSA_FALSE == pC->pAudioEncoderFlag[i])
-            {
-                free(pC->pAudioEncoderInterface[i] );
-            }
-            pC->pAudioEncoderInterface[i] = M4OSA_NULL;
-        }
-    }
-
-    pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
-    pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_unRegisterAllReaders()
- * @brief    Unregister reader
- * @param    pContext            (IN/OUT) MCS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_unRegisterAllReaders(M4MCS_Context pContext)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4OSA_Int32 i;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    for (i = 0; i < M4READER_kMediaType_NB; i++)
-    {
-        if (pC->m_pReaderGlobalItTable[i] != M4OSA_NULL)
-        {
-            free(pC->m_pReaderGlobalItTable[i] );
-            pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
-        }
-        if (pC->m_pReaderDataItTable[i] != M4OSA_NULL)
-        {
-            free(pC->m_pReaderDataItTable[i] );
-            pC->m_pReaderDataItTable[i] = M4OSA_NULL;
-        }
-    }
-
-    pC->m_uiNbRegisteredReaders = 0;
-    pC->m_pReader = M4OSA_NULL;
-    pC->m_pReaderDataIt = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_unRegisterAllDecoders()
- * @brief    Unregister the decoders
- * @param    pContext            (IN/OUT) MCS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_unRegisterAllDecoders(M4MCS_Context pContext)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4OSA_Int32 i;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    for (i = 0; i < M4DECODER_kVideoType_NB; i++)
-    {
-        if (pC->m_pVideoDecoderItTable[i] != M4OSA_NULL)
-        {
-            free(pC->m_pVideoDecoderItTable[i] );
-            pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
-        }
-    }
-
-    for (i = 0; i < M4AD_kType_NB; i++)
-    {
-        if (pC->m_pAudioDecoderItTable[i] != M4OSA_NULL)
-        {
-            /*Don't free external audio decoders interfaces*/
-            if (M4OSA_FALSE == pC->m_pAudioDecoderFlagTable[i])
-            {
-                free(pC->m_pAudioDecoderItTable[i] );
-            }
-            pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
-        }
-    }
-
-    pC->m_uiNbRegisteredVideoDec = 0;
-    pC->m_pVideoDecoder = M4OSA_NULL;
-
-    pC->m_pAudioDecoder = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_setCurrentWriter()
- * @brief    Set current writer
- * @param    pContext            (IN/OUT) MCS context.
- * @param    mediaType            (IN) Media type.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentWriter( M4MCS_Context pContext,
-                                    M4VIDEOEDITING_FileType mediaType)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4WRITER_OutputFileType writerType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    switch (mediaType)
-    {
-        case M4VIDEOEDITING_kFileType_3GPP:
-        case M4VIDEOEDITING_kFileType_MP4:
-        case M4VIDEOEDITING_kFileType_M4V:
-            writerType = M4WRITER_k3GPP;
-            break;
-        case M4VIDEOEDITING_kFileType_AMR:
-            writerType = M4WRITER_kAMR;
-            break;
-        case M4VIDEOEDITING_kFileType_MP3:
-            writerType = M4WRITER_kMP3;
-            break;
-        case M4VIDEOEDITING_kFileType_PCM:
-            pC->b_isRawWriter = M4OSA_TRUE;
-            writerType = M4WRITER_kPCM;
-            break;
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-                 "Writer type not supported");
-            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    pC->pWriterGlobalFcts = pC->WriterInterface[writerType].pGlobalFcts;
-    pC->pWriterDataFcts = pC->WriterInterface[writerType].pDataFcts;
-
-    if (pC->pWriterGlobalFcts == M4OSA_NULL || pC->pWriterDataFcts == M4OSA_NULL)
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-             "Writer type not supported");
-        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    pC->pWriterDataFcts->pWriterContext = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_setCurrentVideoEncoder()
- * @brief    Set a video encoder
- * @param    pContext            (IN/OUT) MCS context.
- * @param    MediaType           (IN) Encoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentVideoEncoder(
-                                M4MCS_Context pContext,
-                                M4VIDEOEDITING_VideoFormat mediaType)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4ENCODER_Format encoderType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    switch (mediaType)
-    {
-        case M4VIDEOEDITING_kH263:
-            encoderType = M4ENCODER_kH263;
-            break;
-        case M4VIDEOEDITING_kMPEG4:
-            encoderType = M4ENCODER_kMPEG4;
-            break;
-        case M4VIDEOEDITING_kH264:
-#ifdef M4VSS_SUPPORT_ENCODER_AVC
-            encoderType = M4ENCODER_kH264;
-        break;
-#endif
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-                 "Video encoder type not supported");
-            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    pC->pVideoEncoderGlobalFcts = pC->pVideoEncoderInterface[encoderType];
-    pC->pCurrentVideoEncoderExternalAPI = pC->pVideoEncoderExternalAPITable[encoderType];
-    pC->pCurrentVideoEncoderUserData = pC->pVideoEncoderUserDataTable[encoderType];
-
-    if (pC->pVideoEncoderGlobalFcts == M4OSA_NULL)
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-             "Video encoder type not supported");
-        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_setCurrentAudioEncoder()
- * @brief    Set an audio encoder
- * @param    context            (IN/OUT) MCS context.
- * @param    MediaType        (IN) Encoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentAudioEncoder(
-                                M4MCS_Context pContext,
-                                M4VIDEOEDITING_AudioFormat mediaType)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4ENCODER_AudioFormat encoderType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    switch (mediaType)
-    {
-        case M4VIDEOEDITING_kAMR_NB:
-            encoderType = M4ENCODER_kAMRNB;
-            break;
-        case M4VIDEOEDITING_kAAC:
-            encoderType = M4ENCODER_kAAC;
-            break;
-        case M4VIDEOEDITING_kMP3:
-            encoderType = M4ENCODER_kMP3;
-            break;
-//EVRC
-//        case M4VIDEOEDITING_kEVRC:
-//            encoderType = M4ENCODER_kEVRC;
-//            break;
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-                 "Audio encoder type not supported");
-            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    pC->pAudioEncoderGlobalFcts = pC->pAudioEncoderInterface[encoderType];
-    pC->pCurrentAudioEncoderUserData = pC->pAudioEncoderUserDataTable[encoderType];
-
-    if (pC->pAudioEncoderGlobalFcts == M4OSA_NULL)
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-             "Audio encoder type not supported");
-        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_setCurrentReader()
- * @brief    Set current reader
- * @param    pContext            (IN/OUT) MCS context.
- * @param    mediaType            (IN) Media type.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentReader( M4MCS_Context pContext,
-                                    M4VIDEOEDITING_FileType mediaType)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4READER_MediaType readerType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    switch (mediaType)
-    {
-        case M4VIDEOEDITING_kFileType_3GPP:
-        case M4VIDEOEDITING_kFileType_MP4:
-        case M4VIDEOEDITING_kFileType_M4V:
-            readerType = M4READER_kMediaType3GPP;
-            break;
-        case M4VIDEOEDITING_kFileType_AMR:
-            readerType = M4READER_kMediaTypeAMR;
-            break;
-        case M4VIDEOEDITING_kFileType_MP3:
-            readerType = M4READER_kMediaTypeMP3;
-            break;
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-                 "Reader type not supported");
-            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    pC->m_pReader       = pC->m_pReaderGlobalItTable[readerType];
-    pC->m_pReaderDataIt = pC->m_pReaderDataItTable[readerType];
-
-    if (pC->m_pReader == M4OSA_NULL || pC->m_pReaderDataIt == M4OSA_NULL)
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-             "Reader type not supported");
-        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_setCurrentVideoDecoder()
- * @brief    Set a video decoder
- * @param    pContext            (IN/OUT) MCS context.
- * @param    decoderType        (IN) Decoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentVideoDecoder(   M4MCS_Context pContext,
-                                            M4_StreamType mediaType)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4DECODER_VideoType decoderType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    switch (mediaType)
-    {
-        case M4DA_StreamTypeVideoMpeg4:
-        case M4DA_StreamTypeVideoH263:
-            decoderType = M4DECODER_kVideoTypeMPEG4;
-            break;
-        case M4DA_StreamTypeVideoMpeg4Avc:
-            decoderType = M4DECODER_kVideoTypeAVC;
-            break;
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-                 "Video decoder type not supported");
-            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    pC->m_pVideoDecoder = pC->m_pVideoDecoderItTable[decoderType];
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-    pC->m_pCurrentVideoDecoderUserData =
-            pC->m_pVideoDecoderUserDataTable[decoderType];
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-
-    if (pC->m_pVideoDecoder == M4OSA_NULL)
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-             "Video decoder type not supported");
-        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4MCS_setCurrentAudioDecoder()
- * @brief    Set an audio decoder
- * @param    context            (IN/OUT) MCS context.
- * @param    decoderType        (IN) Decoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR   M4MCS_setCurrentAudioDecoder(   M4MCS_Context pContext,
-                                            M4_StreamType mediaType)
-{
-    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
-    M4AD_Type decoderType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
-
-    switch (mediaType)
-    {
-        case M4DA_StreamTypeAudioAmrNarrowBand:
-            decoderType = M4AD_kTypeAMRNB;
-            break;
-        case M4DA_StreamTypeAudioAac:
-        case M4DA_StreamTypeAudioAacADTS:
-        case M4DA_StreamTypeAudioAacADIF:
-            decoderType = M4AD_kTypeAAC;
-            break;
-        case M4DA_StreamTypeAudioMp3:
-            decoderType = M4AD_kTypeMP3;
-            break;
-//EVRC
-//        case M4DA_StreamTypeAudioEvrc:
-//            decoderType = M4AD_kTypeEVRC;
-//            break;
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-                 "Audio decoder type not supported");
-            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    pC->m_pAudioDecoder = pC->m_pAudioDecoderItTable[decoderType];
-    pC->m_pCurrentAudioDecoderUserData =
-                    pC->m_pAudioDecoderUserDataTable[decoderType];
-
-    if (pC->m_pAudioDecoder == M4OSA_NULL)
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
-             "Audio decoder type not supported");
-        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
-    }
-
-    return M4NO_ERROR;
-}
-
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_MediaAndCodecSubscription.c b/libvideoeditor/vss/mcs/src/M4MCS_MediaAndCodecSubscription.c
deleted file mode 100755
index 631ca87..0000000
--- a/libvideoeditor/vss/mcs/src/M4MCS_MediaAndCodecSubscription.c
+++ /dev/null
@@ -1,467 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file   M4MCS_MediaAndCodecSubscription.c
- * @brief  Media readers and codecs subscription
- * @note   This file implements the subscription of supported media
- *         readers and decoders for the MCS. Potential support can
- *         be activated or de-activated
- *         using compilation flags set in the projects settings.
- ************************************************************************
- */
-
-/**
- ********************************************************************
- * Includes
- ********************************************************************
- */
-#include "NXPSW_CompilerSwitches.h"
-
-
-#include "M4OSA_Debug.h"
-#include "M4MCS_InternalTypes.h"                /**< Include for MCS specific types */
-#include "M4MCS_InternalFunctions.h"            /**< Registration module */
-
-/* _______________________ */
-/*|                       |*/
-/*|  reader subscription  |*/
-/*|_______________________|*/
-
-/* Reader registration : at least one reader must be defined */
-#ifndef M4VSS_SUPPORT_READER_3GP
-#ifndef M4VSS_SUPPORT_READER_AMR
-#ifndef M4VSS_SUPPORT_READER_MP3
-#error "no reader registered"
-#endif /* M4VSS_SUPPORT_READER_MP3 */
-#endif /* M4VSS_SUPPORT_READER_AMR */
-#endif /* M4VSS_SUPPORT_READER_3GP */
-
-/* Include files for each reader to subscribe */
-#ifdef M4VSS_SUPPORT_READER_3GP
-#include "VideoEditor3gpReader.h"
-#endif
-
-#ifdef M4VSS_SUPPORT_READER_AMR
-#include "M4READER_Amr.h"
-#endif
-#ifdef M4VSS_SUPPORT_READER_MP3
-#include "VideoEditorMp3Reader.h"
-#endif
-
-/* ______________________________ */
-/*|                              |*/
-/*|  video decoder subscription  |*/
-/*|______________________________|*/
-
-#include "VideoEditorAudioDecoder.h"
-#include "VideoEditorVideoDecoder.h"
-
-
-
-/* _______________________ */
-/*|                       |*/
-/*|  writer subscription  |*/
-/*|_______________________|*/
-
-/* Writer registration : at least one writer must be defined */
-#ifndef M4VSS_SUPPORT_WRITER_AMR
-#ifndef M4VSS_SUPPORT_WRITER_3GPP
-#ifndef M4VSS_SUPPORT_WRITER_PCM
-#ifndef M4VSS_SUPPORT_WRITER_MP3
-#error "no writer registered"
-#endif /* M4VSS_SUPPORT_WRITER_MP3 */
-#endif /* M4VSS_SUPPORT_WRITER_PCM */
-#endif /* M4VSS_SUPPORT_WRITER_3GPP */
-#endif /* M4VSS_SUPPORT_WRITER_AMR */
-
-/* Include files for each writer to subscribe */
-#ifdef M4VSS_SUPPORT_WRITER_AMR
-extern M4OSA_ERR M4WRITER_AMR_getInterfaces( M4WRITER_OutputFileType* Type,
-                                             M4WRITER_GlobalInterface** SrcGlobalInterface,
-                                             M4WRITER_DataInterface** SrcDataInterface);
-#endif
-#ifdef M4VSS_SUPPORT_WRITER_3GPP
-extern M4OSA_ERR M4WRITER_3GP_getInterfaces( M4WRITER_OutputFileType* Type,
-                                             M4WRITER_GlobalInterface** SrcGlobalInterface,
-                                             M4WRITER_DataInterface** SrcDataInterface);
-#endif
-#ifdef M4VSS_SUPPORT_WRITER_PCM
-extern M4OSA_ERR M4WRITER_PCM_getInterfaces( M4WRITER_OutputFileType* Type,
-                                             M4WRITER_GlobalInterface** SrcGlobalInterface,
-                                             M4WRITER_DataInterface** SrcDataInterface);
-#endif
-#ifdef M4VSS_SUPPORT_WRITER_MP3
-extern M4OSA_ERR M4WRITER_MP3_getInterfaces( M4WRITER_OutputFileType* Type,
-                                             M4WRITER_GlobalInterface** SrcGlobalInterface,
-                                             M4WRITER_DataInterface** SrcDataInterface);
-#endif
-
-/* ______________________________ */
-/*|                              |*/
-/*|  video encoder subscription  |*/
-/*|______________________________|*/
-#include "VideoEditorAudioEncoder.h"
-#include "VideoEditorVideoEncoder.h"
-
-
-/* Include files for each video encoder to subscribe */
-#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
-//#include "M4MP4E_interface.h"
-#endif
-
-
-#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) \
-    if ((pointer) == M4OSA_NULL) return ((M4OSA_ERR)(retval));
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4MCS_SubscribeMediaAndCodec(M4MCS_Context pContext);
- * @brief    This function registers the reader, decoders, writers and encoders
- *          in the MCS.
- * @note
- * @param    pContext:    (IN) Execution context.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER    pContext is NULL
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_subscribeMediaAndCodec(M4MCS_Context pContext)
-{
-    M4OSA_ERR                   err = M4NO_ERROR;
-
-    M4READER_MediaType          readerMediaType;
-    M4READER_GlobalInterface*   pReaderGlobalInterface;
-    M4READER_DataInterface*     pReaderDataInterface;
-
-    M4WRITER_OutputFileType     writerMediaType;
-    M4WRITER_GlobalInterface*   pWriterGlobalInterface;
-    M4WRITER_DataInterface*     pWriterDataInterface;
-
-    M4AD_Type                   audioDecoderType;
-    M4ENCODER_AudioFormat       audioCodecType;
-    M4ENCODER_AudioGlobalInterface* pAudioCodecInterface;
-    M4AD_Interface*             pAudioDecoderInterface;
-
-    M4DECODER_VideoType         videoDecoderType;
-    M4ENCODER_Format            videoCodecType;
-    M4ENCODER_GlobalInterface*  pVideoCodecInterface;
-    M4DECODER_VideoInterface*   pVideoDecoderInterface;
-
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext);
-
-    /* _______________________ */
-    /*|                       |*/
-    /*|  reader subscription  |*/
-    /*|_______________________|*/
-
-    /* --- 3GP --- */
-
-#ifdef M4VSS_SUPPORT_READER_3GP
-    err = VideoEditor3gpReader_getInterface(&readerMediaType,
-                                            &pReaderGlobalInterface,
-                                            &pReaderDataInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4READER_3GP interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerReader( pContext, readerMediaType,
-                                pReaderGlobalInterface,
-                                pReaderDataInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register 3GP reader");
-#endif /* M4VSS_SUPPORT_READER_3GP */
-
-    /* --- AMR --- */
-
-#ifdef M4VSS_SUPPORT_READER_AMR
-    err = M4READER_AMR_getInterfaces(   &readerMediaType,
-                                        &pReaderGlobalInterface,
-                                        &pReaderDataInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4READER_AMR interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerReader( pContext, readerMediaType,
-                                pReaderGlobalInterface,
-                                pReaderDataInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register AMR reader");
-#endif /* M4VSS_SUPPORT_READER_AMR */
-
-    /* --- MP3 --- */
-
-#ifdef M4VSS_SUPPORT_READER_MP3
-
-    err = VideoEditorMp3Reader_getInterface(&readerMediaType,
-                                            &pReaderGlobalInterface,
-                                            &pReaderDataInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4READER_MP3 interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerReader( pContext, readerMediaType,
-                                pReaderGlobalInterface,
-                                pReaderDataInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register MP3 reader");
-#endif /* M4VSS_SUPPORT_READER_MP3 */
-
-    /* ______________________________ */
-    /*|                              |*/
-    /*|  video decoder subscription  |*/
-    /*|______________________________|*/
-
-    /* --- MPEG4 & H263 --- */
-
-#ifdef M4VSS_SUPPORT_VIDEC_3GP
-
-    err = VideoEditorVideoDecoder_getInterface_MPEG4( &videoDecoderType,
-                                (M4OSA_Void *)&pVideoDecoderInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4DECODER_MPEG4 interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerVideoDecoder(   pContext, videoDecoderType,
-                                        pVideoDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register MPEG4 decoder");
-#endif /* M4VSS_SUPPORT_VIDEC_3GP */
-
-
-#ifdef M4VSS_SUPPORT_VIDEO_AVC
-
-    err = VideoEditorVideoDecoder_getInterface_H264( &videoDecoderType,
-                                (M4OSA_Void *)&pVideoDecoderInterface);
-
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4DECODER_AVC interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerVideoDecoder(   pContext, videoDecoderType,
-                                        pVideoDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register AVC decoder");
-#endif /* M4VSS_SUPPORT_VIDEO_AVC */
-
-
-    /* ______________________________ */
-    /*|                              |*/
-    /*|  audio decoder subscription  |*/
-    /*|______________________________|*/
-
-    /* --- AMRNB --- */
-
-#ifdef M4VSS_SUPPORT_AUDEC_AMRNB
-    err = VideoEditorAudioDecoder_getInterface_AMRNB(&audioDecoderType,
-                                                &pAudioDecoderInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4AD PHILIPS AMRNB interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerAudioDecoder(   pContext, audioDecoderType,
-                                        pAudioDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register PHILIPS AMRNB decoder");
-#endif /* M4VSS_SUPPORT_AUDEC_AMRNB */
-
-    /* --- AAC --- */
-
-#ifdef M4VSS_SUPPORT_AUDEC_AAC
-
-    err = VideoEditorAudioDecoder_getInterface_AAC(&audioDecoderType,
-                                            &pAudioDecoderInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4AD PHILIPS AAC interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerAudioDecoder(   pContext, audioDecoderType,
-                                        pAudioDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register PHILIPS AAC decoder");
-#endif /* M4VSS_SUPPORT_AUDEC_AAC */
-
-    /* --- MP3 --- */
-
-#ifdef M4VSS_SUPPORT_AUDEC_MP3
-
-    err = VideoEditorAudioDecoder_getInterface_MP3(&audioDecoderType,
-                                            &pAudioDecoderInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4AD PHILIPS MP3 interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerAudioDecoder(   pContext, audioDecoderType,
-                                        pAudioDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register PHILIPS MP3 decoder");
-#endif  /* M4VSS_SUPPORT_AUDEC_MP3 */
-
-    /* --- EVRC --- */
-
-
-    /* _______________________ */
-    /*|                       |*/
-    /*|  writer subscription  |*/
-    /*|_______________________|*/
-
-    /* --- PCM --- */
-
-
-    /* --- 3GPP --- */
-
-#ifdef M4VSS_SUPPORT_WRITER_3GPP
-    /* retrieves the 3GPP writer media type and pointer to functions*/
-    err = M4WRITER_3GP_getInterfaces(   &writerMediaType,
-                                        &pWriterGlobalInterface,
-                                        &pWriterDataInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4WRITER_3GP interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerWriter( pContext, writerMediaType,
-                                pWriterGlobalInterface,
-                                pWriterDataInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register 3GPP writer");
-#endif /* M4VSS_SUPPORT_WRITER_3GPP */
-
-
-    /* ______________________________ */
-    /*|                              |*/
-    /*|  video encoder subscription  |*/
-    /*|______________________________|*/
-
-    /* --- MPEG4 --- */
-
-#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
-       /* retrieves the MPEG4 encoder type and pointer to functions*/
-    err = VideoEditorVideoEncoder_getInterface_MPEG4(&videoCodecType,
-                                                &pVideoCodecInterface,
-                                                M4ENCODER_OPEN_ADVANCED);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4MP4E_MPEG4 interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerVideoEncoder(   pContext, videoCodecType,
-                                        pVideoCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register video MPEG4 encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
-
-    /* --- H263 --- */
-
-#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
-    /* retrieves the H263 encoder type and pointer to functions*/
-    err = VideoEditorVideoEncoder_getInterface_H263(&videoCodecType,
-                                                &pVideoCodecInterface,
-                                                M4ENCODER_OPEN_ADVANCED);
-
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4MP4E_H263 interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerVideoEncoder( pContext, videoCodecType,
-                                      pVideoCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register video H263 encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
-
-#ifdef M4VSS_SUPPORT_ENCODER_AVC
-    /* retrieves the H263 encoder type and pointer to functions*/
-    err = VideoEditorVideoEncoder_getInterface_H264(&videoCodecType,
-                                                &pVideoCodecInterface,
-                                                M4ENCODER_OPEN_ADVANCED);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4H264E interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register video H264 encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_AVC */
-
-    /* ______________________________ */
-    /*|                              |*/
-    /*|  audio encoder subscription  |*/
-    /*|______________________________|*/
-
-    /* --- AMR --- */
-
-#ifdef M4VSS_SUPPORT_ENCODER_AMR
-       /* retrieves the AMR encoder type and pointer to functions*/
-    err = VideoEditorAudioEncoder_getInterface_AMRNB(&audioCodecType,
-                                                &pAudioCodecInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4AMR interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerAudioEncoder(   pContext, audioCodecType,
-                                        pAudioCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register audio AMR encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_AMR */
-
-    /* --- AAC --- */
-
-#ifdef M4VSS_SUPPORT_ENCODER_AAC
-    /* retrieves the AAC encoder type and pointer to functions*/
-    err = VideoEditorAudioEncoder_getInterface_AAC(&audioCodecType,
-                                                &pAudioCodecInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4AAC interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerAudioEncoder(   pContext, audioCodecType,
-                                        pAudioCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register audio AAC encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_AAC */
-
-
-
-    /* --- MP3 --- */
-#ifdef M4VSS_SUPPORT_ENCODER_MP3
-    /* retrieves the MP3 encoder type and pointer to functions*/
-    err = VideoEditorAudioEncoder_getInterface_MP3(&audioCodecType,
-                                                &pAudioCodecInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4MP3E interface allocation error");
-        return err;
-    }
-    err = M4MCS_registerAudioEncoder( pContext, audioCodecType,
-                                      pAudioCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-         "M4MCS_subscribeMediaAndCodec: can't register audio MP3 encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_MP3 */
-
-    return err;
-}
-
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_VideoPreProcessing.c b/libvideoeditor/vss/mcs/src/M4MCS_VideoPreProcessing.c
deleted file mode 100755
index 749f68e..0000000
--- a/libvideoeditor/vss/mcs/src/M4MCS_VideoPreProcessing.c
+++ /dev/null
@@ -1,455 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *************************************************************************
- * @file   M4MCS_VideoPreProcessing.c
- * @brief  MCS implementation
- * @note   This file implements the encoder callback of the MCS.
- *************************************************************************
- **/
-
-/**
- ********************************************************************
- * Includes
- ********************************************************************
- */
-/* OSAL headers */
-#include "M4OSA_Memory.h"       /* OSAL memory management */
-#include "M4OSA_Debug.h"        /* OSAL debug management */
-
-
-/* Core headers */
-#include "M4MCS_InternalTypes.h"
-#include "M4MCS_ErrorCodes.h"
-
-/**
- * Video preprocessing interface definition */
-#include "M4VPP_API.h"
-
-/**
- * Video filters */
-#include "M4VIFI_FiltersAPI.h" /**< for M4VIFI_ResizeBilinearYUV420toYUV420() */
-
-#ifndef M4MCS_AUDIOONLY
-#include "M4AIR_API.h"
-#endif /*M4MCS_AUDIOONLY*/
-/**/
-
-
-
-
-/*
- ******************************************************************************
- * M4OSA_ERR M4MCS_intApplyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
- *                               M4VIFI_ImagePlane* pPlaneOut)
- * @brief    Do the video rendering and the resize (if needed)
- * @note    It is called by the video encoder
- * @param    pContext    (IN) VPP context, which actually is the MCS internal context in our case
- * @param    pPlaneIn    (IN) Contains the image
- * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the output
- *                                  YUV420 image
- * @return    M4NO_ERROR:    No error
- * @return    M4MCS_ERR_VIDEO_DECODE_ERROR: the video decoding failed
- * @return    M4MCS_ERR_RESIZE_ERROR: the resizing failed
- * @return    Any error returned by an underlaying module
- ******************************************************************************
- */
-M4OSA_ERR M4MCS_intApplyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
-                             M4VIFI_ImagePlane* pPlaneOut)
-{
-    M4OSA_ERR        err = M4NO_ERROR;
-
-/* This part is used only if video codecs are compiled*/
-#ifndef M4MCS_AUDIOONLY
-    /**
-     * The VPP context is actually the MCS context! */
-    M4MCS_InternalContext *pC = (M4MCS_InternalContext*)(pContext);
-
-    M4_MediaTime mtCts = pC->dViDecCurrentCts;
-
-    /**
-     * When Closing after an error occured, it may happen that pReaderVideoAU->m_dataAddress has
-     * not been allocated yet. When closing in pause mode, the decoder can be null.
-     * We don't want an error to be returned because it would interrupt the close process and
-     * thus some resources would be locked. So we return M4NO_ERROR.
-     */
-    /* Initialize to black plane the output plane if the media rendering
-     is black borders */
-    if(pC->MediaRendering == M4MCS_kBlackBorders)
-    {
-        memset((void *)pPlaneOut[0].pac_data,Y_PLANE_BORDER_VALUE,
-            (pPlaneOut[0].u_height*pPlaneOut[0].u_stride));
-        memset((void *)pPlaneOut[1].pac_data,U_PLANE_BORDER_VALUE,
-            (pPlaneOut[1].u_height*pPlaneOut[1].u_stride));
-        memset((void *)pPlaneOut[2].pac_data,V_PLANE_BORDER_VALUE,
-            (pPlaneOut[2].u_height*pPlaneOut[2].u_stride));
-    }
-    else if ((M4OSA_NULL == pC->ReaderVideoAU.m_dataAddress) ||
-             (M4OSA_NULL == pC->pViDecCtxt))
-    {
-        /**
-         * We must fill the input of the encoder with a dummy image, because
-         * encoding noise leads to a huge video AU, and thus a writer buffer overflow. */
-        memset((void *)pPlaneOut[0].pac_data,0,
-             pPlaneOut[0].u_stride * pPlaneOut[0].u_height);
-        memset((void *)pPlaneOut[1].pac_data,0,
-             pPlaneOut[1].u_stride * pPlaneOut[1].u_height);
-        memset((void *)pPlaneOut[2].pac_data,0,
-             pPlaneOut[2].u_stride * pPlaneOut[2].u_height);
-
-        M4OSA_TRACE1_0("M4MCS_intApplyVPP: pReaderVideoAU->m_dataAddress is M4OSA_NULL,\
-                       returning M4NO_ERROR");
-        return M4NO_ERROR;
-    }
-
-    if(pC->isRenderDup == M4OSA_FALSE)
-    {
-        /**
-         *    m_pPreResizeFrame different than M4OSA_NULL means that resizing is needed */
-        if (M4OSA_NULL != pC->pPreResizeFrame)
-        {
-            /** FB 2008/10/20:
-            Used for cropping and black borders*/
-            M4AIR_Params Params;
-
-            M4OSA_TRACE3_0("M4MCS_intApplyVPP: Need to resize");
-            err = pC->m_pVideoDecoder->m_pFctRender(pC->pViDecCtxt, &mtCts,
-                pC->pPreResizeFrame, M4OSA_TRUE);
-            if (M4NO_ERROR != err)
-            {
-                M4OSA_TRACE1_1("M4MCS_intApplyVPP: m_pFctRender returns 0x%x!", err);
-                return err;
-            }
-
-            if(pC->MediaRendering == M4MCS_kResizing)
-            {
-                /*
-                 * Call the resize filter. From the intermediate frame to the encoder
-                 * image plane
-                 */
-                err = M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL,
-                    pC->pPreResizeFrame, pPlaneOut);
-                if (M4NO_ERROR != err)
-                {
-                    M4OSA_TRACE1_1("M4MCS_intApplyVPP: M4ViFilResizeBilinearYUV420toYUV420\
-                                   returns 0x%x!", err);
-                    return err;
-                }
-            }
-            else
-            {
-                M4VIFI_ImagePlane pImagePlanesTemp[3];
-                M4VIFI_ImagePlane* pPlaneTemp;
-                M4OSA_UInt8* pOutPlaneY = pPlaneOut[0].pac_data +
-                                          pPlaneOut[0].u_topleft;
-                M4OSA_UInt8* pOutPlaneU = pPlaneOut[1].pac_data +
-                                          pPlaneOut[1].u_topleft;
-                M4OSA_UInt8* pOutPlaneV = pPlaneOut[2].pac_data +
-                                          pPlaneOut[2].u_topleft;
-                M4OSA_UInt8* pInPlaneY = M4OSA_NULL;
-                M4OSA_UInt8* pInPlaneU = M4OSA_NULL;
-                M4OSA_UInt8* pInPlaneV = M4OSA_NULL;
-                M4OSA_UInt32 i = 0;
-
-                /*FB 2008/10/20: to keep media aspect ratio*/
-                /*Initialize AIR Params*/
-                Params.m_inputCoord.m_x = 0;
-                Params.m_inputCoord.m_y = 0;
-                Params.m_inputSize.m_height = pC->pPreResizeFrame->u_height;
-                Params.m_inputSize.m_width = pC->pPreResizeFrame->u_width;
-                Params.m_outputSize.m_width = pPlaneOut->u_width;
-                Params.m_outputSize.m_height = pPlaneOut->u_height;
-                Params.m_bOutputStripe = M4OSA_FALSE;
-                Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
-
-                /**
-                Media rendering: Black borders*/
-                if(pC->MediaRendering == M4MCS_kBlackBorders)
-                {
-                    pImagePlanesTemp[0].u_width = pPlaneOut[0].u_width;
-                    pImagePlanesTemp[0].u_height = pPlaneOut[0].u_height;
-                    pImagePlanesTemp[0].u_stride = pPlaneOut[0].u_width;
-                    pImagePlanesTemp[0].u_topleft = 0;
-
-                    pImagePlanesTemp[1].u_width = pPlaneOut[1].u_width;
-                    pImagePlanesTemp[1].u_height = pPlaneOut[1].u_height;
-                    pImagePlanesTemp[1].u_stride = pPlaneOut[1].u_width;
-                    pImagePlanesTemp[1].u_topleft = 0;
-
-                    pImagePlanesTemp[2].u_width = pPlaneOut[2].u_width;
-                    pImagePlanesTemp[2].u_height = pPlaneOut[2].u_height;
-                    pImagePlanesTemp[2].u_stride = pPlaneOut[2].u_width;
-                    pImagePlanesTemp[2].u_topleft = 0;
-
-                    /* Allocates plan in local image plane structure */
-                    pImagePlanesTemp[0].pac_data =
-                        (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[0]\
-                        .u_width * pImagePlanesTemp[0].u_height, M4VS,
-                        (M4OSA_Char *)"M4xVSS_PictureCallbackFct: temporary plane bufferY") ;
-                    if(pImagePlanesTemp[0].pac_data == M4OSA_NULL)
-                    {
-                        M4OSA_TRACE1_0("Error alloc in M4MCS_intApplyVPP");
-                        return M4ERR_ALLOC;
-                    }
-                    pImagePlanesTemp[1].pac_data =
-                        (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[1]\
-                        .u_width * pImagePlanesTemp[1].u_height, M4VS,
-                        (M4OSA_Char *)"M4xVSS_PictureCallbackFct: temporary plane bufferU") ;
-                    if(pImagePlanesTemp[1].pac_data == M4OSA_NULL)
-                    {
-                        M4OSA_TRACE1_0("Error alloc in M4MCS_intApplyVPP");
-                        return M4ERR_ALLOC;
-                    }
-                    pImagePlanesTemp[2].pac_data =
-                        (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[2]\
-                        .u_width * pImagePlanesTemp[2].u_height,
-                        M4VS,(M4OSA_Char *)"M4xVSS_PictureCallbackFct: temporary plane bufferV") ;
-                    if(pImagePlanesTemp[2].pac_data == M4OSA_NULL)
-                    {
-                        M4OSA_TRACE1_0("Error alloc in M4MCS_intApplyVPP");
-                        return M4ERR_ALLOC;
-                    }
-
-                    pInPlaneY = pImagePlanesTemp[0].pac_data ;
-                    pInPlaneU = pImagePlanesTemp[1].pac_data ;
-                    pInPlaneV = pImagePlanesTemp[2].pac_data ;
-
-                    memset((void *)pImagePlanesTemp[0].pac_data,Y_PLANE_BORDER_VALUE,
-                        (pImagePlanesTemp[0].u_height*pImagePlanesTemp[0].u_stride));
-                    memset((void *)pImagePlanesTemp[1].pac_data,U_PLANE_BORDER_VALUE,
-                        (pImagePlanesTemp[1].u_height*pImagePlanesTemp[1].u_stride));
-                    memset((void *)pImagePlanesTemp[2].pac_data,V_PLANE_BORDER_VALUE,
-                        (pImagePlanesTemp[2].u_height*pImagePlanesTemp[2].u_stride));
-
-                    if((M4OSA_UInt32)((pC->pPreResizeFrame->u_height * pPlaneOut->u_width)\
-                         /pC->pPreResizeFrame->u_width) <= pPlaneOut->u_height)
-                         //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
-                    {
-                        /*it is height so black borders will be on the top and on the bottom side*/
-                        Params.m_outputSize.m_width = pPlaneOut->u_width;
-                        Params.m_outputSize.m_height =
-                             (M4OSA_UInt32)
-                             ((pC->pPreResizeFrame->u_height * pPlaneOut->u_width)\
-                             /pC->pPreResizeFrame->u_width);
-                        /*number of lines at the top*/
-                        pImagePlanesTemp[0].u_topleft =
-                             (M4MCS_ABS((M4OSA_Int32)
-                             (pImagePlanesTemp[0].u_height\
-                             -Params.m_outputSize.m_height)>>1)) *
-                             pImagePlanesTemp[0].u_stride;
-                        pImagePlanesTemp[0].u_height = Params.m_outputSize.m_height;
-                        pImagePlanesTemp[1].u_topleft =
-                             (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_height\
-                             -(Params.m_outputSize.m_height>>1)))>>1)\
-                             * pImagePlanesTemp[1].u_stride;
-                        pImagePlanesTemp[1].u_height = Params.m_outputSize.m_height>>1;
-                        pImagePlanesTemp[2].u_topleft =
-                             (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_height\
-                             -(Params.m_outputSize.m_height>>1)))>>1)\
-                             * pImagePlanesTemp[2].u_stride;
-                        pImagePlanesTemp[2].u_height = Params.m_outputSize.m_height>>1;
-                    }
-                    else
-                    {
-                        /*it is width so black borders will be on the left and right side*/
-                        Params.m_outputSize.m_height = pPlaneOut->u_height;
-                        Params.m_outputSize.m_width =
-                             (M4OSA_UInt32)((pC->pPreResizeFrame->u_width
-                             * pPlaneOut->u_height)\
-                             /pC->pPreResizeFrame->u_height);
-
-                        pImagePlanesTemp[0].u_topleft =
-                             (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_width-\
-                                Params.m_outputSize.m_width)>>1));
-                        pImagePlanesTemp[0].u_width = Params.m_outputSize.m_width;
-                        pImagePlanesTemp[1].u_topleft =
-                             (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_width-\
-                                (Params.m_outputSize.m_width>>1)))>>1);
-                        pImagePlanesTemp[1].u_width = Params.m_outputSize.m_width>>1;
-                        pImagePlanesTemp[2].u_topleft =
-                            (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_width-\
-                                (Params.m_outputSize.m_width>>1)))>>1);
-                        pImagePlanesTemp[2].u_width = Params.m_outputSize.m_width>>1;
-                    }
-
-                    /*Width and height have to be even*/
-                    Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
-                    Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
-                    Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
-                    Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
-                    pImagePlanesTemp[0].u_width = (pImagePlanesTemp[0].u_width>>1)<<1;
-                    pImagePlanesTemp[1].u_width = (pImagePlanesTemp[1].u_width>>1)<<1;
-                    pImagePlanesTemp[2].u_width = (pImagePlanesTemp[2].u_width>>1)<<1;
-                    pImagePlanesTemp[0].u_height = (pImagePlanesTemp[0].u_height>>1)<<1;
-                    pImagePlanesTemp[1].u_height = (pImagePlanesTemp[1].u_height>>1)<<1;
-                    pImagePlanesTemp[2].u_height = (pImagePlanesTemp[2].u_height>>1)<<1;
-
-                    /*Check that values are coherent*/
-                    if(Params.m_inputSize.m_height == Params.m_outputSize.m_height)
-                    {
-                        Params.m_inputSize.m_width = Params.m_outputSize.m_width;
-                    }
-                    else if(Params.m_inputSize.m_width == Params.m_outputSize.m_width)
-                    {
-                        Params.m_inputSize.m_height = Params.m_outputSize.m_height;
-                    }
-                    pPlaneTemp = pImagePlanesTemp;
-                }
-
-                /**
-                Media rendering: Cropping*/
-                if(pC->MediaRendering == M4MCS_kCropping)
-                {
-                    Params.m_outputSize.m_height = pPlaneOut->u_height;
-                    Params.m_outputSize.m_width = pPlaneOut->u_width;
-                    if((Params.m_outputSize.m_height * Params.m_inputSize.m_width)\
-                         /Params.m_outputSize.m_width<Params.m_inputSize.m_height)
-                    {
-                        /*height will be cropped*/
-                        Params.m_inputSize.m_height =
-                             (M4OSA_UInt32)((Params.m_outputSize.m_height \
-                             * Params.m_inputSize.m_width) /
-                             Params.m_outputSize.m_width);
-                        Params.m_inputSize.m_height =
-                            (Params.m_inputSize.m_height>>1)<<1;
-                        Params.m_inputCoord.m_y =
-                            (M4OSA_Int32)((M4OSA_Int32)
-                            ((pC->pPreResizeFrame->u_height\
-                            - Params.m_inputSize.m_height))>>1);
-                    }
-                    else
-                    {
-                        /*width will be cropped*/
-                        Params.m_inputSize.m_width =
-                             (M4OSA_UInt32)((Params.m_outputSize.m_width\
-                                 * Params.m_inputSize.m_height) /
-                                 Params.m_outputSize.m_height);
-                        Params.m_inputSize.m_width =
-                             (Params.m_inputSize.m_width>>1)<<1;
-                        Params.m_inputCoord.m_x =
-                            (M4OSA_Int32)((M4OSA_Int32)
-                            ((pC->pPreResizeFrame->u_width\
-                            - Params.m_inputSize.m_width))>>1);
-                    }
-                    pPlaneTemp = pPlaneOut;
-                }
-                /**
-                 * Call AIR functions */
-                if(M4OSA_NULL == pC->m_air_context)
-                {
-                    err = M4AIR_create(&pC->m_air_context, M4AIR_kYUV420P);
-                    if(err != M4NO_ERROR)
-                    {
-                        M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
-                         Error when initializing AIR: 0x%x", err);
-                        return err;
-                    }
-                }
-
-                err = M4AIR_configure(pC->m_air_context, &Params);
-                if(err != M4NO_ERROR)
-                {
-                    M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
-                     Error when configuring AIR: 0x%x", err);
-                    M4AIR_cleanUp(pC->m_air_context);
-                    return err;
-                }
-
-                err = M4AIR_get(pC->m_air_context, pC->pPreResizeFrame,
-                                pPlaneTemp);
-                if(err != M4NO_ERROR)
-                {
-                    M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
-                     Error when getting AIR plane: 0x%x", err);
-                    M4AIR_cleanUp(pC->m_air_context);
-                    return err;
-                }
-
-                if(pC->MediaRendering == M4MCS_kBlackBorders)
-                {
-                    for(i=0; i<pPlaneOut[0].u_height; i++)
-                    {
-                        memcpy(   (void *)pOutPlaneY,
-                                        (void *)pInPlaneY,
-                                        pPlaneOut[0].u_width);
-                        pInPlaneY += pPlaneOut[0].u_width;
-                        pOutPlaneY += pPlaneOut[0].u_stride;
-                    }
-                    for(i=0; i<pPlaneOut[1].u_height; i++)
-                    {
-                        memcpy(   (void *)pOutPlaneU,
-                                        (void *)pInPlaneU,
-                                        pPlaneOut[1].u_width);
-                        pInPlaneU += pPlaneOut[1].u_width;
-                        pOutPlaneU += pPlaneOut[1].u_stride;
-                    }
-                    for(i=0; i<pPlaneOut[2].u_height; i++)
-                    {
-                        memcpy(   (void *)pOutPlaneV,
-                                        (void *)pInPlaneV,
-                                        pPlaneOut[2].u_width);
-                        pInPlaneV += pPlaneOut[2].u_width;
-                        pOutPlaneV += pPlaneOut[2].u_stride;
-                    }
-
-                    for(i=0; i<3; i++)
-                    {
-                        if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
-                        {
-                            free(
-                                        pImagePlanesTemp[i].pac_data);
-                            pImagePlanesTemp[i].pac_data = M4OSA_NULL;
-                        }
-                    }
-                }
-            }
-        }
-        else
-        {
-            M4OSA_TRACE3_0("M4MCS_intApplyVPP: Don't need resizing");
-            err = pC->m_pVideoDecoder->m_pFctRender(pC->pViDecCtxt,
-                                                    &mtCts, pPlaneOut,
-                                                    M4OSA_TRUE);
-            if (M4NO_ERROR != err)
-            {
-                M4OSA_TRACE1_1("M4MCS_intApplyVPP: m_pFctRender returns 0x%x!", err);
-                return err;
-            }
-        }
-        pC->lastDecodedPlane = pPlaneOut;
-    }
-    else
-    {
-        /* Copy last decoded plane to output plane */
-        memcpy((void *)pPlaneOut[0].pac_data,
-                        (void *)pC->lastDecodedPlane[0].pac_data,
-                         (pPlaneOut[0].u_height * pPlaneOut[0].u_width));
-        memcpy((void *)pPlaneOut[1].pac_data,
-                        (void *)pC->lastDecodedPlane[1].pac_data,
-                          (pPlaneOut[1].u_height * pPlaneOut[1].u_width));
-        memcpy((void *)pPlaneOut[2].pac_data,
-                        (void *)pC->lastDecodedPlane[2].pac_data,
-                          (pPlaneOut[2].u_height * pPlaneOut[2].u_width));
-        pC->lastDecodedPlane = pPlaneOut;
-    }
-
-
-#endif /*M4MCS_AUDIOONLY*/
-    M4OSA_TRACE3_0("M4MCS_intApplyVPP: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-
diff --git a/libvideoeditor/vss/mcs/src/MODULE_LICENSE_APACHE2 b/libvideoeditor/vss/mcs/src/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/libvideoeditor/vss/mcs/src/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/libvideoeditor/vss/mcs/src/NOTICE b/libvideoeditor/vss/mcs/src/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/libvideoeditor/vss/mcs/src/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
-   Copyright (c) 2005-2008, The Android Open Source Project
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
diff --git a/libvideoeditor/vss/src/Android.mk b/libvideoeditor/vss/src/Android.mk
deleted file mode 100755
index 47627ec..0000000
--- a/libvideoeditor/vss/src/Android.mk
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-# Copyright (C) 2011 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH:= $(call my-dir)
-
-#
-# libvss
-#
-include $(CLEAR_VARS)
-
-LOCAL_MODULE:= libvideoeditor_core
-
-LOCAL_SRC_FILES:=          \
-      M4PTO3GPP_API.c \
-      M4PTO3GPP_VideoPreProcessing.c \
-      M4VIFI_xVSS_RGB565toYUV420.c \
-      M4xVSS_API.c \
-      M4xVSS_internal.c \
-      M4VSS3GPP_AudioMixing.c \
-      M4VSS3GPP_Clip.c \
-      M4VSS3GPP_ClipAnalysis.c \
-      M4VSS3GPP_Codecs.c \
-      M4VSS3GPP_Edit.c \
-      M4VSS3GPP_EditAudio.c \
-      M4VSS3GPP_EditVideo.c \
-      M4VSS3GPP_MediaAndCodecSubscription.c \
-      M4ChannelConverter.c \
-      M4VD_EXTERNAL_BitstreamParser.c \
-      M4AIR_API.c \
-      M4READER_Pcm.c \
-      M4PCMR_CoreReader.c \
-      M4AD_Null.c \
-      M4AMRR_CoreReader.c \
-      M4READER_Amr.c \
-      M4VD_Tools.c \
-      VideoEditorResampler.cpp \
-      M4DECODER_Null.c
-
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_SHARED_LIBRARIES :=       \
-    libaudioresampler           \
-    libaudioutils               \
-    libbinder                   \
-    libcutils                   \
-    liblog                      \
-    libmedia                    \
-    libstagefright              \
-    libstagefright_foundation   \
-    libstagefright_omx          \
-    libutils                    \
-    libvideoeditor_osal         \
-    libvideoeditor_videofilters \
-    libvideoeditorplayer        \
-
-LOCAL_STATIC_LIBRARIES := \
-    libstagefright_color_conversion \
-    libvideoeditor_mcs \
-    libvideoeditor_stagefrightshells \
-    libvideoeditor_3gpwriter \
-
-LOCAL_C_INCLUDES += \
-    $(TOP)/frameworks/av/libvideoeditor/osal/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/mcs/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/common/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/stagefrightshells/inc \
-    $(TOP)/frameworks/av/services/audioflinger \
-    $(TOP)/frameworks/native/include/media/openmax \
-    $(TOP)/system/media/audio_effects/include \
-    $(TOP)/system/media/audio_utils/include
-
-
-LOCAL_SHARED_LIBRARIES += libdl
-
-# All of the shared libraries we link against.
-LOCAL_LDLIBS := \
-    -lpthread -ldl
-
-LOCAL_CFLAGS += -Wno-multichar \
-    -DM4xVSS_RESERVED_MOOV_DISK_SPACEno \
-    -DDECODE_GIF_ON_SAVING
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/libvideoeditor/vss/src/M4AD_Null.c b/libvideoeditor/vss/src/M4AD_Null.c
deleted file mode 100755
index cd1ec73..0000000
--- a/libvideoeditor/vss/src/M4AD_Null.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
- * @file    M4AD_Null.c
- * @brief   Implementation of the MP3 decoder public interface
- * @note    This file implements a "null" audio decoder, that is a decoder
- *          that do nothing except getting AU from the reader
-*************************************************************************
-*/
-#include "M4OSA_Debug.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Debug.h"
-#include "M4TOOL_VersionInfo.h"
-#include "M4AD_Common.h"
-#include "M4AD_Null.h"
-
-#define M4AD_FORCE_16BITS
-
-/**
- ************************************************************************
- * NULL Audio Decoder version information
- ************************************************************************
-*/
-/* CHANGE_VERSION_HERE */
-#define M4AD_NULL_MAJOR    1
-#define M4AD_NULL_MINOR    1
-#define M4AD_NULL_REVISION 4
-
-/**
- ************************************************************************
- * structure    M4AD_NullContext
- * @brief        Internal null decoder context
- ************************************************************************
-*/
-typedef struct
-{
-    /**< Pointer to the stream handler provided by the user */
-    M4_AudioStreamHandler*    m_pAudioStreamhandler;
-} M4AD_NullContext;
-
-
-/**
- ************************************************************************
- * NXP MP3 decoder functions definition
- ************************************************************************
-*/
-
-/**
- ************************************************************************
- * @brief   Creates an instance of the null decoder
- * @note    Allocates the context
- *
- * @param    pContext:        (OUT)    Context of the decoder
- * @param    pStreamHandler: (IN)    Pointer to an audio stream description
- * @param    pUserData:        (IN)    Pointer to User data
- *
- * @return    M4NO_ERROR              there is no error
- * @return    M4ERR_STATE             State automaton is not applied
- * @return    M4ERR_ALLOC             a memory allocation has failed
- * @return    M4ERR_PARAMETER         at least one parameter is not properly set (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR    M4AD_NULL_create(  M4AD_Context* pContext,
-                                M4_AudioStreamHandler *pStreamHandler,
-                                void* pUserData)
-{
-    M4AD_NullContext* pC;
-
-    M4OSA_DEBUG_IF1((pContext == 0), M4ERR_PARAMETER,
-                "M4AD_NULL_create: invalid context pointer");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-                "M4AD_NULL_create: invalid pointer pStreamHandler");
-
-    pC = (M4AD_NullContext*)M4OSA_32bitAlignedMalloc(sizeof(M4AD_NullContext),
-                 M4DECODER_AUDIO, (M4OSA_Char *)"M4AD_NullContext");
-    if (pC == (M4AD_NullContext*)0)
-    {
-        M4OSA_TRACE1_0("Can not allocate null decoder context");
-        return M4ERR_ALLOC;
-    }
-
-    *pContext = pC;
-
-    pC->m_pAudioStreamhandler = pStreamHandler;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * @brief    Destroys the instance of the null decoder
- * @note     After this call the context is invalid
- *
- * @param    context:    (IN)    Context of the decoder
- *
- * @return   M4NO_ERROR            There is no error
- * @return   M4ERR_PARAMETER     The context is invalid (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR    M4AD_NULL_destroy(M4AD_Context context)
-{
-    M4AD_NullContext* pC = (M4AD_NullContext*)context;
-
-    M4OSA_DEBUG_IF1((context == M4OSA_NULL), M4ERR_PARAMETER, "M4AD_NULL_destroy: invalid context");
-
-    free(pC);
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * @brief   Simply output the given audio data
- * @note
- *
- * @param   context:          (IN)    Context of the decoder
- * @param   pInputBuffer:     (IN/OUT)Input Data buffer. It contains at least one audio frame.
- *                                    The size of the buffer must be updated inside the function
- *                                    to reflect the size of the actually decoded data.
- *                                    (e.g. the first frame in pInputBuffer)
- * @param   pDecodedPCMBuffer: (OUT)  Output PCM buffer (decoded data).
- * @param   jumping:           (IN)   M4OSA_TRUE if a jump was just done, M4OSA_FALSE otherwise.
- * @return    M4NO_ERROR              there is no error
- * @return    M4ERR_PARAMETER         at least one parameter is not properly set
- ************************************************************************
-*/
-M4OSA_ERR    M4AD_NULL_step(M4AD_Context context, M4AD_Buffer *pInputBuffer,
-                            M4AD_Buffer *pDecodedPCMBuffer, M4OSA_Bool jumping)
-{
-    M4AD_NullContext* pC = (M4AD_NullContext*)context;
-
-    /*The VPS sends a zero buffer at the end*/
-    if (0 == pInputBuffer->m_bufferSize)
-    {
-        return M4WAR_NO_MORE_AU;
-    }
-
-    if (pInputBuffer->m_bufferSize > pDecodedPCMBuffer->m_bufferSize)
-    {
-        return M4ERR_PARAMETER;
-    }
-#ifdef M4AD_FORCE_16BITS
-    /*if read samples are 8 bits, complete them to 16 bits*/
-    if (pC->m_pAudioStreamhandler->m_byteSampleSize == 1)
-    {
-        M4OSA_UInt32 i;
-        M4OSA_Int16  val;
-
-        for (i = 0; i < pInputBuffer->m_bufferSize; i++)
-        {
-            val = (M4OSA_Int16)((M4OSA_UInt8)(pInputBuffer->m_dataAddress[i]) - 128);
-
-            pDecodedPCMBuffer->m_dataAddress[i*2]   = (M4OSA_Int8)(val>>8);
-            pDecodedPCMBuffer->m_dataAddress[i*2+1] = (M4OSA_Int8)(val&0x00ff);
-        }
-    }
-    else
-    {
-        memcpy((void *)pDecodedPCMBuffer->m_dataAddress, (void *)pInputBuffer->m_dataAddress,
-                    pInputBuffer->m_bufferSize );
-    }
-#else /*M4AD_FORCE_16BITS*/
-    memcpy((void *)pDecodedPCMBuffer->m_dataAddress, (void *)pInputBuffer->m_dataAddress,
-                    pInputBuffer->m_bufferSize );
-#endif /*M4AD_FORCE_16BITS*/
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * @brief   Gets the decoder version
- * @note    The version is given in a M4_VersionInfo structure
- *
- * @param   pValue:     (OUT)       Pointer to the version structure
- *
- * @return  M4NO_ERROR              there is no error
- * @return  M4ERR_PARAMETER         pVersionInfo pointer is null (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR    M4AD_NULL_getVersion(M4_VersionInfo* pVersionInfo)
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_DEBUG_IF1((pVersionInfo == 0), M4ERR_PARAMETER,
-        "M4AD_NULL_getVersion: invalid pointer pVersionInfo");
-
-    /* Up until now, the null decoder version is not available */
-
-    /* CHANGE_VERSION_HERE */
-    pVersionInfo->m_major        = M4AD_NULL_MAJOR;      /*major version of the component*/
-    pVersionInfo->m_minor        = M4AD_NULL_MINOR;      /*minor version of the component*/
-    pVersionInfo->m_revision    = M4AD_NULL_REVISION;    /*revision version of the component*/
-    pVersionInfo->m_structSize=sizeof(M4_VersionInfo);
-
-    return err;
-}
-
-
-/**
- ************************************************************************
- * getInterface function definitions of NXP MP3 decoder
- ************************************************************************
-*/
-
-/**
- ************************************************************************
- * @brief Retrieves the interface implemented by the decoder
- * @param pDecoderType        : pointer on an M4AD_Type (allocated by the caller)
- *                              that will be filled with the decoder type supported by
- *                              this decoder
- * @param pDecoderInterface   : address of a pointer that will be set to the interface
- *                              implemented by this decoder. The interface is a structure
- *                              allocated by the function and must be un-allocated by the
- *                              caller.
- *
- * @return    M4NO_ERROR  if OK
- * @return    M4ERR_ALLOC if allocation failed
- ************************************************************************
-*/
-M4OSA_ERR M4AD_NULL_getInterface( M4AD_Type *pDecoderType, M4AD_Interface **pDecoderInterface)
-{
-    *pDecoderInterface = (  M4AD_Interface*)M4OSA_32bitAlignedMalloc( sizeof(M4AD_Interface),
-                            M4DECODER_AUDIO, (M4OSA_Char *)"M4AD_Interface" );
-    if (M4OSA_NULL == *pDecoderInterface)
-    {
-        return M4ERR_ALLOC;
-    }
-
-    *pDecoderType = M4AD_kTypePCM;
-
-    (*pDecoderInterface)->m_pFctCreateAudioDec       = M4AD_NULL_create;
-    (*pDecoderInterface)->m_pFctDestroyAudioDec      = M4AD_NULL_destroy;
-    (*pDecoderInterface)->m_pFctStepAudioDec         = M4AD_NULL_step;
-    (*pDecoderInterface)->m_pFctGetVersionAudioDec   = M4AD_NULL_getVersion;
-    (*pDecoderInterface)->m_pFctStartAudioDec        = M4OSA_NULL;
-    (*pDecoderInterface)->m_pFctResetAudioDec        = M4OSA_NULL;
-    (*pDecoderInterface)->m_pFctSetOptionAudioDec    = M4OSA_NULL;
-    (*pDecoderInterface)->m_pFctGetOptionAudioDec    = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
diff --git a/libvideoeditor/vss/src/M4AIR_API.c b/libvideoeditor/vss/src/M4AIR_API.c
deleted file mode 100755
index 62897b0..0000000
--- a/libvideoeditor/vss/src/M4AIR_API.c
+++ /dev/null
@@ -1,968 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *************************************************************************
- * @file   M4AIR_API.c
- * @brief  Area of Interest Resizer  API
- *************************************************************************
- */
-
-#define M4AIR_YUV420_FORMAT_SUPPORTED
-#define M4AIR_YUV420A_FORMAT_SUPPORTED
-
-/************************* COMPILATION CHECKS ***************************/
-#ifndef M4AIR_YUV420_FORMAT_SUPPORTED
-#ifndef M4AIR_BGR565_FORMAT_SUPPORTED
-#ifndef M4AIR_RGB565_FORMAT_SUPPORTED
-#ifndef M4AIR_BGR888_FORMAT_SUPPORTED
-#ifndef M4AIR_RGB888_FORMAT_SUPPORTED
-#ifndef M4AIR_JPG_FORMAT_SUPPORTED
-
-#error "Please define at least one input format for the AIR component"
-
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
-
-/******************************* INCLUDES *******************************/
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_CoreID.h"
-#include "M4OSA_Mutex.h"
-#include "M4OSA_Memory.h"
-#include "M4VIFI_FiltersAPI.h"
-#include "M4AIR_API.h"
-
-/************************ M4AIR INTERNAL TYPES DEFINITIONS ***********************/
-
-/**
- ******************************************************************************
- * enum         M4AIR_States
- * @brief       The following enumeration defines the internal states of the AIR.
- ******************************************************************************
- */
-typedef enum
-{
-    M4AIR_kCreated,        /**< State after M4AIR_create has been called */
-    M4AIR_kConfigured      /**< State after M4AIR_configure has been called */
-}M4AIR_States;
-
-
-/**
- ******************************************************************************
- * struct         M4AIR_InternalContext
- * @brief         The following structure is the internal context of the AIR.
- ******************************************************************************
- */
-typedef struct
-{
-    M4AIR_States            m_state;        /**< Internal state */
-    M4AIR_InputFormatType   m_inputFormat;  /**< Input format like YUV420Planar,
-                                                 RGB565, JPG, etc ... */
-    M4AIR_Params            m_params;       /**< Current input Parameter of  the processing */
-    M4OSA_UInt32            u32_x_inc[4];   /**< ratio between input and ouput width for YUV */
-    M4OSA_UInt32            u32_y_inc[4];   /**< ratio between input and ouput height for YUV */
-    M4OSA_UInt32            u32_x_accum_start[4];    /**< horizontal initial accumulator value */
-    M4OSA_UInt32            u32_y_accum_start[4];    /**< Vertical initial accumulator value */
-    M4OSA_UInt32            u32_x_accum[4]; /**< save of horizontal accumulator value */
-    M4OSA_UInt32            u32_y_accum[4]; /**< save of vertical accumulator value */
-    M4OSA_UInt8*            pu8_data_in[4]; /**< Save of input plane pointers
-                                                             in case of stripe mode */
-    M4OSA_UInt32            m_procRows;     /**< Number of processed rows,
-                                                     used in stripe mode only */
-    M4OSA_Bool                m_bOnlyCopy;  /**< Flag to know if we just perform a copy
-                                                        or a bilinear interpolation */
-    M4OSA_Bool                m_bFlipX;     /**< Depend on output orientation, used during
-                                                processing to revert processing order in X
-                                                coordinates */
-    M4OSA_Bool                m_bFlipY;     /**< Depend on output orientation, used during
-                                                processing to revert processing order in Y
-                                                coordinates */
-    M4OSA_Bool                m_bRevertXY;  /**< Depend on output orientation, used during
-                                                processing to revert X and Y processing order
-                                                 (+-90° rotation) */
-}M4AIR_InternalContext;
-
-/********************************* MACROS *******************************/
-#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer)\
-     if ((pointer) == M4OSA_NULL) return ((M4OSA_ERR)(retval));
-
-
-/********************** M4AIR PUBLIC API IMPLEMENTATION ********************/
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
- * @brief    This function initialize an instance of the AIR.
- * @param    pContext:      (IN/OUT) Address of the context to create
- * @param    inputFormat:   (IN) input format type.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only). Invalid formatType
- * @return    M4ERR_ALLOC: No more memory is available
- ******************************************************************************
- */
-M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
-{
-    M4OSA_ERR err = M4NO_ERROR ;
-    M4AIR_InternalContext* pC = M4OSA_NULL ;
-
-    /* Check that the address on the context is not NULL */
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
-
-    *pContext = M4OSA_NULL ;
-
-    /* Internal Context creation */
-    pC = (M4AIR_InternalContext*)M4OSA_32bitAlignedMalloc(sizeof(M4AIR_InternalContext),
-         M4AIR,(M4OSA_Char *)"AIR internal context") ;
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_ALLOC, pC) ;
-
-
-    /* Check if the input format is supported */
-    switch(inputFormat)
-    {
-#ifdef M4AIR_YUV420_FORMAT_SUPPORTED
-        case M4AIR_kYUV420P:
-        break ;
-#endif
-#ifdef M4AIR_YUV420A_FORMAT_SUPPORTED
-        case M4AIR_kYUV420AP:
-        break ;
-#endif
-        default:
-            err = M4ERR_AIR_FORMAT_NOT_SUPPORTED;
-            goto M4AIR_create_cleanup ;
-    }
-
-    /**< Save input format and update state */
-    pC->m_inputFormat = inputFormat;
-    pC->m_state = M4AIR_kCreated;
-
-    /* Return the context to the caller */
-    *pContext = pC ;
-
-    return M4NO_ERROR ;
-
-M4AIR_create_cleanup:
-    /* Error management : we destroy the context if needed */
-    if(M4OSA_NULL != pC)
-    {
-        free(pC) ;
-    }
-
-    *pContext = M4OSA_NULL ;
-
-    return err ;
-}
-
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
- * @brief    This function destroys an instance of the AIR component
- * @param    pContext:    (IN) Context identifying the instance to destroy
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
- * @return    M4ERR_STATE: Internal state is incompatible with this function call.
- ******************************************************************************
- */
-M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
-{
-    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
-
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
-
-    /**< Check state */
-    if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
-    {
-        return M4ERR_STATE;
-    }
-    free(pC) ;
-
-    return M4NO_ERROR ;
-
-}
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
- * @brief   This function will configure the AIR.
- * @note    It will set the input and output coordinates and sizes,
- *          and indicates if we will proceed in stripe or not.
- *          In case a M4AIR_get in stripe mode was on going, it will cancel this previous
- *          processing and reset the get process.
- * @param    pContext:                (IN) Context identifying the instance
- * @param    pParams->m_bOutputStripe:(IN) Stripe mode.
- * @param    pParams->m_inputCoord:    (IN) X,Y coordinates of the first valid pixel in input.
- * @param    pParams->m_inputSize:    (IN) input ROI size.
- * @param    pParams->m_outputSize:    (IN) output size.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_ALLOC: No more memory space to add a new effect.
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
- * @return    M4ERR_AIR_FORMAT_NOT_SUPPORTED: the requested input format is not supported.
- ******************************************************************************
- */
-M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
-{
-    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
-    M4OSA_UInt32    i,u32_width_in, u32_width_out, u32_height_in, u32_height_out;
-    M4OSA_UInt32    nb_planes;
-
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
-
-    if(M4AIR_kYUV420AP == pC->m_inputFormat)
-    {
-        nb_planes = 4;
-    }
-    else
-    {
-        nb_planes = 3;
-    }
-
-    /**< Check state */
-    if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
-    {
-        return M4ERR_STATE;
-    }
-
-    /** Save parameters */
-    pC->m_params = *pParams;
-
-    /* Check for the input&output width and height are even */
-        if( ((pC->m_params.m_inputSize.m_height)&0x1)    ||
-            ((pC->m_params.m_inputSize.m_height)&0x1))
-        {
-            return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
-        }
-
-     if( ((pC->m_params.m_inputSize.m_width)&0x1)    ||
-            ((pC->m_params.m_inputSize.m_width)&0x1))
-        {
-            return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
-        }
-    if(((pC->m_params.m_inputSize.m_width) == (pC->m_params.m_outputSize.m_width))
-        &&((pC->m_params.m_inputSize.m_height) == (pC->m_params.m_outputSize.m_height)))
-    {
-        /**< No resize in this case, we will just copy input in output */
-        pC->m_bOnlyCopy = M4OSA_TRUE;
-    }
-    else
-    {
-        pC->m_bOnlyCopy = M4OSA_FALSE;
-
-        /**< Initialize internal variables used for resize filter */
-        for(i=0;i<nb_planes;i++)
-        {
-
-            u32_width_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_width:\
-                (pC->m_params.m_inputSize.m_width+1)>>1;
-            u32_height_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_height:\
-                (pC->m_params.m_inputSize.m_height+1)>>1;
-            u32_width_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_width:\
-                (pC->m_params.m_outputSize.m_width+1)>>1;
-            u32_height_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_height:\
-                (pC->m_params.m_outputSize.m_height+1)>>1;
-
-                /* Compute horizontal ratio between src and destination width.*/
-                if (u32_width_out >= u32_width_in)
-                {
-                    pC->u32_x_inc[i]   = ((u32_width_in-1) * 0x10000) / (u32_width_out-1);
-                }
-                else
-                {
-                    pC->u32_x_inc[i]   = (u32_width_in * 0x10000) / (u32_width_out);
-                }
-
-                /* Compute vertical ratio between src and destination height.*/
-                if (u32_height_out >= u32_height_in)
-                {
-                    pC->u32_y_inc[i]   = ((u32_height_in - 1) * 0x10000) / (u32_height_out-1);
-                }
-                else
-                {
-                    pC->u32_y_inc[i] = (u32_height_in * 0x10000) / (u32_height_out);
-                }
-
-                /*
-                Calculate initial accumulator value : u32_y_accum_start.
-                u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
-                */
-                if (pC->u32_y_inc[i] >= 0x10000)
-                {
-                    /*
-                        Keep the fractionnal part, assimung that integer  part is coded
-                        on the 16 high bits and the fractionnal on the 15 low bits
-                    */
-                    pC->u32_y_accum_start[i] = pC->u32_y_inc[i] & 0xffff;
-
-                    if (!pC->u32_y_accum_start[i])
-                    {
-                        pC->u32_y_accum_start[i] = 0x10000;
-                    }
-
-                    pC->u32_y_accum_start[i] >>= 1;
-                }
-                else
-                {
-                    pC->u32_y_accum_start[i] = 0;
-                }
-                /**< Take into account that Y coordinate can be odd
-                    in this case we have to put a 0.5 offset
-                    for U and V plane as there a 2 times sub-sampled vs Y*/
-                if((pC->m_params.m_inputCoord.m_y&0x1)&&((i==1)||(i==2)))
-                {
-                    pC->u32_y_accum_start[i] += 0x8000;
-                }
-
-                /*
-                    Calculate initial accumulator value : u32_x_accum_start.
-                    u32_x_accum_start is coded on 15 bits, and represents a value between
-                    0 and 0.5
-                */
-
-                if (pC->u32_x_inc[i] >= 0x10000)
-                {
-                    pC->u32_x_accum_start[i] = pC->u32_x_inc[i] & 0xffff;
-
-                    if (!pC->u32_x_accum_start[i])
-                    {
-                        pC->u32_x_accum_start[i] = 0x10000;
-                    }
-
-                    pC->u32_x_accum_start[i] >>= 1;
-                }
-                else
-                {
-                    pC->u32_x_accum_start[i] = 0;
-                }
-                /**< Take into account that X coordinate can be odd
-                    in this case we have to put a 0.5 offset
-                    for U and V plane as there a 2 times sub-sampled vs Y*/
-                if((pC->m_params.m_inputCoord.m_x&0x1)&&((i==1)||(i==2)))
-                {
-                    pC->u32_x_accum_start[i] += 0x8000;
-                }
-        }
-    }
-
-    /**< Reset variable used for stripe mode */
-    pC->m_procRows = 0;
-
-    /**< Initialize var for X/Y processing order according to orientation */
-    pC->m_bFlipX = M4OSA_FALSE;
-    pC->m_bFlipY = M4OSA_FALSE;
-    pC->m_bRevertXY = M4OSA_FALSE;
-    switch(pParams->m_outputOrientation)
-    {
-        case M4COMMON_kOrientationTopLeft:
-            break;
-        case M4COMMON_kOrientationTopRight:
-            pC->m_bFlipX = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationBottomRight:
-            pC->m_bFlipX = M4OSA_TRUE;
-            pC->m_bFlipY = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationBottomLeft:
-            pC->m_bFlipY = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationLeftTop:
-            pC->m_bRevertXY = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationRightTop:
-            pC->m_bRevertXY = M4OSA_TRUE;
-            pC->m_bFlipY = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationRightBottom:
-            pC->m_bRevertXY = M4OSA_TRUE;
-            pC->m_bFlipX = M4OSA_TRUE;
-            pC->m_bFlipY = M4OSA_TRUE;
-            break;
-        case M4COMMON_kOrientationLeftBottom:
-            pC->m_bRevertXY = M4OSA_TRUE;
-            pC->m_bFlipX = M4OSA_TRUE;
-            break;
-        default:
-        return M4ERR_PARAMETER;
-    }
-    /**< Update state */
-    pC->m_state = M4AIR_kConfigured;
-
-    return M4NO_ERROR ;
-}
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
- * @brief   This function will provide the requested resized area of interest according to
- *          settings  provided in M4AIR_configure.
- * @note    In case the input format type is JPEG, input plane(s)
- *          in pIn is not used. In normal mode, dimension specified in output plane(s) structure
- *          must be the same than the one specified in M4AIR_configure. In stripe mode, only the
- *          width will be the same, height will be taken as the stripe height (typically 16).
- *          In normal mode, this function is call once to get the full output picture.
- *          In stripe mode, it is called for each stripe till the whole picture has been
- *          retrieved,and  the position of the output stripe in the output picture
- *          is internally incremented at each step.
- *          Any call to M4AIR_configure during stripe process will reset this one to the
- *          beginning of the output picture.
- * @param    pContext:    (IN) Context identifying the instance
- * @param    pIn:            (IN) Plane structure containing input Plane(s).
- * @param    pOut:        (IN/OUT)  Plane structure containing output Plane(s).
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_ALLOC: No more memory space to add a new effect.
- * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
- ******************************************************************************
- */
-M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
-{
-    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
-    M4OSA_UInt32 i,j,k,u32_x_frac,u32_y_frac,u32_x_accum,u32_y_accum,u32_shift;
-        M4OSA_UInt8    *pu8_data_in, *pu8_data_in_org, *pu8_data_in_tmp, *pu8_data_out;
-        M4OSA_UInt8    *pu8_src_top;
-        M4OSA_UInt8    *pu8_src_bottom;
-    M4OSA_UInt32    u32_temp_value;
-    M4OSA_Int32    i32_tmp_offset;
-    M4OSA_UInt32    nb_planes;
-
-
-
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
-
-    /**< Check state */
-    if(M4AIR_kConfigured != pC->m_state)
-    {
-        return M4ERR_STATE;
-    }
-
-    if(M4AIR_kYUV420AP == pC->m_inputFormat)
-    {
-        nb_planes = 4;
-    }
-    else
-    {
-        nb_planes = 3;
-    }
-
-    /**< Loop on each Plane */
-    for(i=0;i<nb_planes;i++)
-    {
-
-         /* Set the working pointers at the beginning of the input/output data field */
-
-        u32_shift = ((i==0)||(i==3))?0:1; /**< Depend on Luma or Chroma */
-
-        if((M4OSA_FALSE == pC->m_params.m_bOutputStripe)\
-            ||((M4OSA_TRUE == pC->m_params.m_bOutputStripe)&&(0 == pC->m_procRows)))
-        {
-            /**< For input, take care about ROI */
-            pu8_data_in     = pIn[i].pac_data + pIn[i].u_topleft \
-                + (pC->m_params.m_inputCoord.m_x>>u32_shift)
-                        + (pC->m_params.m_inputCoord.m_y >> u32_shift) * pIn[i].u_stride;
-
-            /** Go at end of line/column in case X/Y scanning is flipped */
-            if(M4OSA_TRUE == pC->m_bFlipX)
-            {
-                pu8_data_in += ((pC->m_params.m_inputSize.m_width)>>u32_shift) -1 ;
-            }
-            if(M4OSA_TRUE == pC->m_bFlipY)
-            {
-                pu8_data_in += ((pC->m_params.m_inputSize.m_height>>u32_shift) -1)\
-                     * pIn[i].u_stride;
-            }
-
-            /**< Initialize accumulators in case we are using it (bilinear interpolation) */
-            if( M4OSA_FALSE == pC->m_bOnlyCopy)
-            {
-                pC->u32_x_accum[i] = pC->u32_x_accum_start[i];
-                pC->u32_y_accum[i] = pC->u32_y_accum_start[i];
-            }
-
-        }
-        else
-        {
-            /**< In case of stripe mode for other than first stripe, we need to recover input
-                 pointer from internal context */
-            pu8_data_in = pC->pu8_data_in[i];
-        }
-
-        /**< In every mode, output data are at the beginning of the output plane */
-        pu8_data_out    = pOut[i].pac_data + pOut[i].u_topleft;
-
-        /**< Initialize input offset applied after each pixel */
-        if(M4OSA_FALSE == pC->m_bFlipY)
-        {
-            i32_tmp_offset = pIn[i].u_stride;
-        }
-        else
-        {
-            i32_tmp_offset = -pIn[i].u_stride;
-        }
-
-        /**< In this case, no bilinear interpolation is needed as input and output dimensions
-            are the same */
-        if( M4OSA_TRUE == pC->m_bOnlyCopy)
-        {
-            /**< No +-90° rotation */
-            if(M4OSA_FALSE == pC->m_bRevertXY)
-            {
-                /**< No flip on X abscissa */
-                if(M4OSA_FALSE == pC->m_bFlipX)
-                {
-                    /**< Loop on each row */
-                    for(j=0;j<pOut[i].u_height;j++)
-                    {
-                        /**< Copy one whole line */
-                        memcpy((void *)pu8_data_out, (void *)pu8_data_in,
-                             pOut[i].u_width);
-
-                        /**< Update pointers */
-                        pu8_data_out += pOut[i].u_stride;
-                        if(M4OSA_FALSE == pC->m_bFlipY)
-                        {
-                            pu8_data_in += pIn[i].u_stride;
-                        }
-                        else
-                        {
-                            pu8_data_in -= pIn[i].u_stride;
-                        }
-                    }
-                }
-                else
-                {
-                    /**< Loop on each row */
-                    for(j=0;j<pOut[i].u_height;j++)
-                    {
-                        /**< Loop on each pixel of 1 row */
-                        for(k=0;k<pOut[i].u_width;k++)
-                        {
-                            *pu8_data_out++ = *pu8_data_in--;
-                        }
-
-                        /**< Update pointers */
-                        pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
-
-                        pu8_data_in += pOut[i].u_width + i32_tmp_offset;
-
-                    }
-                }
-            }
-            /**< Here we have a +-90° rotation */
-            else
-            {
-
-                /**< Loop on each row */
-                for(j=0;j<pOut[i].u_height;j++)
-                {
-                    pu8_data_in_tmp = pu8_data_in;
-
-                    /**< Loop on each pixel of 1 row */
-                    for(k=0;k<pOut[i].u_width;k++)
-                    {
-                        *pu8_data_out++ = *pu8_data_in_tmp;
-
-                        /**< Update input pointer in order to go to next/past line */
-                        pu8_data_in_tmp += i32_tmp_offset;
-                    }
-
-                    /**< Update pointers */
-                    pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
-                    if(M4OSA_FALSE == pC->m_bFlipX)
-                    {
-                        pu8_data_in ++;
-                    }
-                    else
-                    {
-                        pu8_data_in --;
-                    }
-                }
-            }
-        }
-        /**< Bilinear interpolation */
-        else
-        {
-
-        if(3 != i)    /**< other than alpha plane */
-        {
-            /**No +-90° rotation */
-            if(M4OSA_FALSE == pC->m_bRevertXY)
-            {
-
-                /**< Loop on each row */
-                for(j=0;j<pOut[i].u_height;j++)
-                {
-                    /* Vertical weight factor */
-                    u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
-
-                    /* Reinit horizontal weight factor */
-                    u32_x_accum = pC->u32_x_accum_start[i];
-
-
-
-                        if(M4OSA_TRUE ==  pC->m_bFlipX)
-                        {
-
-                            /**< Loop on each output pixel in a row */
-                            for(k=0;k<pOut[i].u_width;k++)
-                            {
-
-                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
-                                                                        weight factor */
-
-                                pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
-
-                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                                /* Weighted combination */
-                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
-                                                   pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
-                                                   (pu8_src_bottom[1]*(16-u32_x_frac) +
-                                                   pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
-
-                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                                /* Update horizontal accumulator */
-                                u32_x_accum += pC->u32_x_inc[i];
-                            }
-                        }
-
-                        else
-                        {
-                            /**< Loop on each output pixel in a row */
-                            for(k=0;k<pOut[i].u_width;k++)
-                            {
-                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
-                                                                        weight factor */
-
-                                pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
-
-                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                                /* Weighted combination */
-                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
-                                                   pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
-                                                   (pu8_src_bottom[0]*(16-u32_x_frac) +
-                                                   pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
-
-                                    *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                                /* Update horizontal accumulator */
-                                u32_x_accum += pC->u32_x_inc[i];
-                            }
-
-                        }
-
-                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
-
-                    /* Update vertical accumulator */
-                    pC->u32_y_accum[i] += pC->u32_y_inc[i];
-                      if (pC->u32_y_accum[i]>>16)
-                    {
-                        pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
-                          pC->u32_y_accum[i] &= 0xffff;
-                       }
-                }
-        }
-            /** +-90° rotation */
-            else
-            {
-                pu8_data_in_org = pu8_data_in;
-
-                /**< Loop on each output row */
-                for(j=0;j<pOut[i].u_height;j++)
-                {
-                    /* horizontal weight factor */
-                    u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
-
-                    /* Reinit accumulator */
-                    u32_y_accum = pC->u32_y_accum_start[i];
-
-                    if(M4OSA_TRUE ==  pC->m_bFlipX)
-                    {
-
-                        /**< Loop on each output pixel in a row */
-                        for(k=0;k<pOut[i].u_width;k++)
-                        {
-
-                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
-
-
-                            pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
-
-                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                            /* Weighted combination */
-                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
-                                                 pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
-                                                (pu8_src_bottom[1]*(16-u32_x_frac) +
-                                                 pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
-
-                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                            /* Update vertical accumulator */
-                            u32_y_accum += pC->u32_y_inc[i];
-                              if (u32_y_accum>>16)
-                            {
-                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
-                                  u32_y_accum &= 0xffff;
-                               }
-
-                        }
-                    }
-                    else
-                    {
-                        /**< Loop on each output pixel in a row */
-                        for(k=0;k<pOut[i].u_width;k++)
-                        {
-
-                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
-
-                            pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
-
-                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                            /* Weighted combination */
-                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
-                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
-                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
-                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
-
-                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                            /* Update vertical accumulator */
-                            u32_y_accum += pC->u32_y_inc[i];
-                              if (u32_y_accum>>16)
-                            {
-                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
-                                  u32_y_accum &= 0xffff;
-                               }
-                        }
-                    }
-                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
-
-                    /* Update horizontal accumulator */
-                    pC->u32_x_accum[i] += pC->u32_x_inc[i];
-
-                    pu8_data_in = pu8_data_in_org;
-                }
-
-            }
-            }/** 3 != i */
-            else
-            {
-            /**No +-90° rotation */
-            if(M4OSA_FALSE == pC->m_bRevertXY)
-            {
-
-                /**< Loop on each row */
-                for(j=0;j<pOut[i].u_height;j++)
-                {
-                    /* Vertical weight factor */
-                    u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
-
-                    /* Reinit horizontal weight factor */
-                    u32_x_accum = pC->u32_x_accum_start[i];
-
-
-
-                        if(M4OSA_TRUE ==  pC->m_bFlipX)
-                        {
-
-                            /**< Loop on each output pixel in a row */
-                            for(k=0;k<pOut[i].u_width;k++)
-                            {
-
-                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
-                                                                         weight factor */
-
-                                pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
-
-                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                                /* Weighted combination */
-                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
-                                                   pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
-                                                  (pu8_src_bottom[1]*(16-u32_x_frac) +
-                                                   pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
-
-                                u32_temp_value= (u32_temp_value >> 7)*0xff;
-
-                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                                /* Update horizontal accumulator */
-                                u32_x_accum += pC->u32_x_inc[i];
-                            }
-                        }
-
-                        else
-                        {
-                            /**< Loop on each output pixel in a row */
-                            for(k=0;k<pOut[i].u_width;k++)
-                            {
-                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
-                                                                        weight factor */
-
-                                pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
-
-                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                                /* Weighted combination */
-                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
-                                                   pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
-                                                   (pu8_src_bottom[0]*(16-u32_x_frac) +
-                                                   pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
-
-                                u32_temp_value= (u32_temp_value >> 7)*0xff;
-
-                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                                /* Update horizontal accumulator */
-                                u32_x_accum += pC->u32_x_inc[i];
-                            }
-
-                        }
-
-                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
-
-                    /* Update vertical accumulator */
-                    pC->u32_y_accum[i] += pC->u32_y_inc[i];
-                      if (pC->u32_y_accum[i]>>16)
-                    {
-                        pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
-                          pC->u32_y_accum[i] &= 0xffff;
-                       }
-                }
-
-            } /**< M4OSA_FALSE == pC->m_bRevertXY */
-            /** +-90° rotation */
-            else
-            {
-                pu8_data_in_org = pu8_data_in;
-
-                /**< Loop on each output row */
-                for(j=0;j<pOut[i].u_height;j++)
-                {
-                    /* horizontal weight factor */
-                    u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
-
-                    /* Reinit accumulator */
-                    u32_y_accum = pC->u32_y_accum_start[i];
-
-                    if(M4OSA_TRUE ==  pC->m_bFlipX)
-                    {
-
-                        /**< Loop on each output pixel in a row */
-                        for(k=0;k<pOut[i].u_width;k++)
-                        {
-
-                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
-
-
-                            pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
-
-                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                            /* Weighted combination */
-                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
-                                                 pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
-                                                (pu8_src_bottom[1]*(16-u32_x_frac) +
-                                                 pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
-
-                            u32_temp_value= (u32_temp_value >> 7)*0xff;
-
-                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                            /* Update vertical accumulator */
-                            u32_y_accum += pC->u32_y_inc[i];
-                              if (u32_y_accum>>16)
-                            {
-                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
-                                  u32_y_accum &= 0xffff;
-                               }
-
-                        }
-                    }
-                    else
-                    {
-                        /**< Loop on each output pixel in a row */
-                        for(k=0;k<pOut[i].u_width;k++)
-                        {
-
-                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
-
-                            pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
-
-                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
-
-                            /* Weighted combination */
-                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
-                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
-                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
-                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
-
-                            u32_temp_value= (u32_temp_value >> 7)*0xff;
-
-                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
-
-                            /* Update vertical accumulator */
-                            u32_y_accum += pC->u32_y_inc[i];
-                              if (u32_y_accum>>16)
-                            {
-                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
-                                  u32_y_accum &= 0xffff;
-                               }
-                        }
-                    }
-                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
-
-                    /* Update horizontal accumulator */
-                    pC->u32_x_accum[i] += pC->u32_x_inc[i];
-
-                    pu8_data_in = pu8_data_in_org;
-
-                }
-                } /**< M4OSA_TRUE == pC->m_bRevertXY */
-        }/** 3 == i */
-            }
-        /**< In case of stripe mode, save current input pointer */
-        if(M4OSA_TRUE == pC->m_params.m_bOutputStripe)
-        {
-            pC->pu8_data_in[i] = pu8_data_in;
-        }
-    }
-
-    /**< Update number of processed rows, reset it if we have finished
-         with the whole processing */
-    pC->m_procRows += pOut[0].u_height;
-    if(M4OSA_FALSE == pC->m_bRevertXY)
-    {
-        if(pC->m_params.m_outputSize.m_height <= pC->m_procRows)    pC->m_procRows = 0;
-    }
-    else
-    {
-        if(pC->m_params.m_outputSize.m_width <= pC->m_procRows)    pC->m_procRows = 0;
-    }
-
-    return M4NO_ERROR ;
-
-}
-
-
-
diff --git a/libvideoeditor/vss/src/M4AMRR_CoreReader.c b/libvideoeditor/vss/src/M4AMRR_CoreReader.c
deleted file mode 100755
index 630f9dc..0000000
--- a/libvideoeditor/vss/src/M4AMRR_CoreReader.c
+++ /dev/null
@@ -1,909 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file        M4AMRR_CoreReader.c
- * @brief       Implementation of AMR parser
- * @note        This file contains the API Implementation for
- *              AMR Parser.
- ******************************************************************************
-*/
-#include "M4AMRR_CoreReader.h"
-#include "M4OSA_Debug.h"
-#include "M4OSA_CoreID.h"
-
-/**
- ******************************************************************************
- * Maximum bitrate per amr type
- ******************************************************************************
-*/
-#define M4AMRR_NB_MAX_BIT_RATE    12200
-#define M4AMRR_WB_MAX_BIT_RATE    23850
-
-/**
- ******************************************************************************
- * AMR reader context ID
- ******************************************************************************
-*/
-#define M4AMRR_CONTEXTID    0x414d5252
-
-/**
- ******************************************************************************
- * An AMR frame is 20ms
- ******************************************************************************
-*/
-#define M4AMRR_FRAME_LENGTH     20
-
-/**
- ******************************************************************************
- * For the seek, the file is splitted in 40 segments for faster search
- ******************************************************************************
-*/
-#define    M4AMRR_NUM_SEEK_ENTRIES 40
-
-#define M4AMRR_NB_SAMPLE_FREQUENCY 8000        /**< Narrow band sampling rate */
-#define M4AMRR_WB_SAMPLE_FREQUENCY 16000    /**< Wide band sampling rate */
-
-/**
- ******************************************************************************
- * AMR reader version numbers
- ******************************************************************************
-*/
-/* CHANGE_VERSION_HERE */
-#define M4AMRR_VERSION_MAJOR 1
-#define M4AMRR_VERSION_MINOR 11
-#define M4AMRR_VERSION_REVISION 3
-
-/**
- ******************************************************************************
- * structure    M4_AMRR_Context
- * @brief        Internal AMR reader context structure
- ******************************************************************************
-*/
-typedef struct
-{
-    M4OSA_UInt32             m_contextId ;      /* Fixed Id. to check for valid Context*/
-    M4OSA_FileReadPointer*   m_pOsaFilePtrFct;  /* File function pointer */
-    M4SYS_StreamDescription* m_pStreamHandler;  /* Stream Description */
-    M4OSA_UInt32*            m_pSeekIndex;      /* Seek Index Table */
-    M4OSA_UInt32             m_seekInterval;    /* Stores the seek Interval stored in the Index */
-    M4OSA_UInt32             m_maxAuSize;       /* Stores the max Au Size */
-    M4OSA_MemAddr32          m_pdataAddress;    /* Pointer to store AU data */
-    M4SYS_StreamType         m_streamType;      /* Stores the stream type AMR NB or WB */
-    M4OSA_Context            m_pAMRFile;        /* Data storage */
-    M4AMRR_State             m_status;          /* AMR Reader Status */
-    M4OSA_Int32              m_structSize;      /* size of structure*/
-} M4_AMRR_Context;
-
-/**
- ******************************************************************************
- * Parser internal functions, not usable from outside the reader context
- ******************************************************************************
-*/
-M4OSA_UInt32    M4AMRR_getAuSize(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType);
-M4OSA_UInt32    M4AMRR_getBitrate(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType);
-
-/**
- ******************************************************************************
- * M4OSA_UInt32    M4AMRR_getAuSize(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
- * @brief    Internal function to the AMR Parser, returns the AU size of the Frame
- * @note     This function takes the stream type and the frametype and returns the
- *           frame lenght
- * @param    frameType(IN)    : AMR frame type
- * @param    streamType(IN)    : AMR stream type NB or WB
- * @returns  The frame size based on the frame type.
- ******************************************************************************
- */
-M4OSA_UInt32    M4AMRR_getAuSize(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
-{
-    const M4OSA_UInt32    M4AMRR_NB_AUSIZE[]={13,14,16,18,20,21,27,32,6,6,6};
-    const M4OSA_UInt32    M4AMRR_WB_AUSIZE[]={18,24,33,37,41,47,51,59,61,6};
-
-    if ( streamType == M4SYS_kAMR )
-    {
-            return M4AMRR_NB_AUSIZE[frameType];
-    }
-    else /* M4SYS_kAMR_WB */
-    {
-            return M4AMRR_WB_AUSIZE[frameType];
-    }
-}
-
-/**
- ******************************************************************************
- * M4OSA_UInt32    M4AMRR_getBitrate(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
- * @brief    Internal function to the AMR Parser, returns the Bit rate of the Frame
- * @note     This function takes the stream type and the frametype and returns the
- *           bit rate for the given frame.
- * @param    frameType(IN)    : AMR frame type
- * @param    streamType(IN)    : AMR stream type NB or WB
- * @returns  The frame's bit rate based on the frame type.
- ******************************************************************************
- */
-M4OSA_UInt32    M4AMRR_getBitrate(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
-{
-    const M4OSA_UInt32    M4AMRR_NB_BITRATE[]=
-        {4750,5150,5900,6700,7400,7950,10200,12200,12200,12200,12200};
-    const M4OSA_UInt32    M4AMRR_WB_BITRATE[]=
-        {6600,8850,12650,14250,15850,18250,19850,23050,23850,12200};
-
-    if ( streamType == M4SYS_kAMR )
-    {
-            return M4AMRR_NB_BITRATE[frameType];
-    }
-    else /* M4SYS_kAMR_WB */
-    {
-            return M4AMRR_WB_BITRATE[frameType];
-    }
-}
-
-/*********************************************************/
-M4OSA_ERR M4AMRR_openRead(M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
-                        M4OSA_FileReadPointer* pFileFunction)
-/*********************************************************/
-{
-    M4_AMRR_Context*    pStreamContext;
-    M4OSA_FilePosition  filePos;
-
-    M4OSA_ERR err = M4ERR_FILE_NOT_FOUND ;
-    M4OSA_UInt32 size ;
-    M4OSA_UInt32 data ;
-    M4OSA_Char *M4_Token;
-    M4OSA_UInt32 *tokenPtr;
-
-    /* Header for AMR NB */
-    M4OSA_UInt32 M4_AMR_1       = 0x4d412123;
-    M4OSA_UInt32 M4_AMR_NB_2    = 0x00000a52;
-
-    /* Header for AMR WB */
-    M4OSA_UInt32 M4_AMR_WB_2    = 0x42572d52;
-    M4OSA_UInt32 M4_AMR_WB_3    = 0x0000000a;
-    *pContext = M4OSA_NULL ;
-
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext),M4ERR_PARAMETER,"Context M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileDescriptor),M4ERR_PARAMETER,"File Desc. M4OSA_NULL");
-
-    M4_Token = (M4OSA_Char*)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_MemAddr32)*3, M4AMR_READER,
-                 (M4OSA_Char *)("M4_Token"));
-    if(M4OSA_NULL == M4_Token)
-    {
-        M4OSA_DEBUG_IF3((M4OSA_NULL == M4_Token),M4ERR_ALLOC,"Mem Alloc failed - M4_Token");
-        return M4ERR_ALLOC ;
-    }
-
-    pStreamContext= (M4_AMRR_Context*)M4OSA_32bitAlignedMalloc(sizeof(M4_AMRR_Context), M4AMR_READER,
-                     (M4OSA_Char *)("pStreamContext"));
-    if(M4OSA_NULL == pStreamContext)
-    {
-        free(M4_Token);
-        *pContext = M4OSA_NULL ;
-        return M4ERR_ALLOC ;
-    }
-
-    /* Initialize the context */
-    pStreamContext->m_contextId = M4AMRR_CONTEXTID;
-    pStreamContext->m_structSize=sizeof(M4_AMRR_Context);
-    pStreamContext->m_pOsaFilePtrFct=pFileFunction ;
-    pStreamContext->m_pStreamHandler = M4OSA_NULL ;
-    pStreamContext->m_pAMRFile = M4OSA_NULL ;
-    pStreamContext->m_status = M4AMRR_kOpening ;
-    pStreamContext->m_pSeekIndex = M4OSA_NULL ;
-    pStreamContext->m_seekInterval = 0;
-    pStreamContext->m_maxAuSize = 0 ;
-    pStreamContext->m_pdataAddress = M4OSA_NULL;
-    err=pStreamContext->m_pOsaFilePtrFct->openRead(&pStreamContext->m_pAMRFile,
-        (M4OSA_Char*)pFileDescriptor,M4OSA_kFileRead );
-    if ( err != M4NO_ERROR )
-    {
-        /* M4OSA_DEBUG_IF3((err != M4NO_ERROR),err,"File open failed"); */
-        free(pStreamContext);
-        free(M4_Token);
-        *pContext = M4OSA_NULL ;
-        return err ;
-    }
-
-    pStreamContext->m_status = M4AMRR_kOpening ;
-
-    size = 6;
-    pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
-                (M4OSA_MemAddr8)M4_Token, &size);
-    if(size != 6)
-    {
-        goto cleanup;
-    }
-
-    tokenPtr = (M4OSA_UInt32*)M4_Token ;
-    /* Check for the first 4 bytes of the header common to WB and NB*/
-    if (*tokenPtr != M4_AMR_1)
-    {
-        goto cleanup;
-    }
-
-    tokenPtr++;
-    data = *tokenPtr & 0x0000FFFF ;
-    /* Check if the next part is Narrow band header */
-    if (data!= M4_AMR_NB_2)
-    {
-        /* Stream is AMR Wide Band */
-        filePos = 4;
-        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
-             M4OSA_kFileSeekBeginning, &filePos);
-        size = 5;
-        pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
-             (M4OSA_MemAddr8)M4_Token, &size);
-        if(size != 5)
-            goto cleanup;
-        tokenPtr=(M4OSA_UInt32*)M4_Token;
-        /* Check for the Wide band hader */
-        if(*tokenPtr!= M4_AMR_WB_2)
-            goto cleanup;
-        tokenPtr++;
-        data = *tokenPtr & 0x000000FF ;
-        if(data!= M4_AMR_WB_3)
-            goto cleanup;
-        pStreamContext->m_streamType = M4SYS_kAMR_WB ;
-    }
-    else
-    {
-        /* Stream is a Narrow band stream */
-        pStreamContext->m_streamType = M4SYS_kAMR ;
-    }
-    /*  No Profile level defined */
-    pStreamContext->m_status = M4AMRR_kOpened;
-
-    free(M4_Token);
-    *pContext = pStreamContext ;
-    return M4NO_ERROR;
-
-cleanup:
-
-    if(M4OSA_NULL != pStreamContext->m_pAMRFile)
-    {
-        pStreamContext->m_pOsaFilePtrFct->closeRead(pStreamContext->m_pAMRFile);
-    }
-
-    free(M4_Token);
-    free(pStreamContext);
-
-    *pContext = M4OSA_NULL ;
-
-    return (M4OSA_ERR)M4ERR_AMR_NOT_COMPLIANT;
-}
-
-
-/*********************************************************/
-M4OSA_ERR M4AMRR_getNextStream(M4OSA_Context Context, M4SYS_StreamDescription* pStreamDesc )
-/*********************************************************/
-{
-    M4_AMRR_Context*    pStreamContext=(M4_AMRR_Context*)Context;
-    M4OSA_Char            frameHeader, frameType ;
-    M4OSA_UInt32        size, auCount=0;
-    M4OSA_FilePosition  filePos;
-
-    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamDesc),M4ERR_PARAMETER,"Stream Desc. M4OSA_NULL");
-    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
-         "Bad Context");
-    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
-
-    if (M4OSA_NULL != pStreamContext->m_pStreamHandler)
-    {
-        return M4WAR_NO_MORE_STREAM ;
-    }
-
-    size = 1;
-    pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
-         (M4OSA_MemAddr8)&frameHeader, &size);
-
-    /* XFFF FXXX -> F is the Frame type */
-    frameType = ( frameHeader & 0x78 ) >> 3 ;
-
-    if ( frameType == 15 )
-    {
-        return M4WAR_NO_DATA_YET ;
-    }
-
-    if (( pStreamContext->m_streamType == M4SYS_kAMR ) && ( frameType > 11 ))
-    {
-        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
-    }
-
-    if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) && ( frameType > 9 ))
-    {
-        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
-    }
-
-    /* Average bit rate is assigned the bitrate of the first frame */
-    pStreamDesc->averageBitrate = M4AMRR_getBitrate(frameType,pStreamContext->m_streamType);
-
-    filePos = -1;
-    pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile, M4OSA_kFileSeekCurrent,
-         &filePos);
-
-    /* Initialize pStreamDesc */
-    pStreamDesc->profileLevel = 0xFF ;
-    pStreamDesc->decoderSpecificInfoSize = 0 ;
-    pStreamDesc->decoderSpecificInfo = M4OSA_NULL ;
-    pStreamDesc->maxBitrate = (pStreamContext->m_streamType ==
-        M4SYS_kAMR )?M4AMRR_NB_MAX_BIT_RATE:M4AMRR_WB_MAX_BIT_RATE;
-    pStreamDesc->profileLevel = 0xFF ;
-    pStreamDesc->streamID = 1;
-    pStreamDesc->streamType = pStreamContext->m_streamType;
-
-    /* Timescale equals Sampling Frequency: NB-8000 Hz, WB-16000 Hz */
-    pStreamDesc->timeScale = (pStreamContext->m_streamType == M4SYS_kAMR )?8000:16000;
-    pStreamDesc->duration = M4OSA_TIME_UNKNOWN;
-
-    pStreamContext->m_pStreamHandler =
-         (M4SYS_StreamDescription*)M4OSA_32bitAlignedMalloc(sizeof(M4SYS_StreamDescription),
-             M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pStreamHandler"));
-    if(M4OSA_NULL == pStreamContext->m_pStreamHandler)
-    {
-        return M4ERR_ALLOC;
-    }
-
-    /* Copy the Stream Desc. into the Context */
-    pStreamContext->m_pStreamHandler->averageBitrate = pStreamDesc->averageBitrate;
-    pStreamContext->m_pStreamHandler->decoderSpecificInfo = M4OSA_NULL ;
-    pStreamContext->m_pStreamHandler->decoderSpecificInfoSize = 0 ;
-    pStreamContext->m_pStreamHandler->duration = M4OSA_TIME_UNKNOWN;
-    pStreamContext->m_pStreamHandler->profileLevel = 0xFF ;
-    pStreamContext->m_pStreamHandler->streamID = 1;
-    pStreamContext->m_pStreamHandler->streamType = pStreamDesc->streamType ;
-    pStreamContext->m_pStreamHandler->timeScale = pStreamDesc->timeScale ;
-
-    /* Count the number of Access Unit in the File to get the */
-    /* duration of the stream = 20 ms * number of access unit */
-    while(1)
-    {
-        size = 1;
-        pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
-             (M4OSA_MemAddr8)&frameHeader, &size);
-        if ( size == 0)
-            break ;
-        frameType = (frameHeader & 0x78) >> 3 ;
-        /* Get the frame size and skip so many bytes */
-        if(frameType != 15){
-            /* GLA 20050628 when frametype is >10 we read over a table */
-            if(frameType > 10)
-                continue ;
-
-            size = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
-            if(size > pStreamContext->m_maxAuSize )
-            {
-                pStreamContext->m_maxAuSize = size ;
-            }
-            filePos = size-1;
-            pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
-                 M4OSA_kFileSeekCurrent, &filePos);
-            auCount++;
-        }
-    }
-
-    /* Each Frame is 20 m Sec. */
-    pStreamContext->m_pStreamHandler->duration = auCount * M4AMRR_FRAME_LENGTH ;
-    pStreamDesc->duration = pStreamContext->m_pStreamHandler->duration ;
-
-    /* Put the file pointer back at the first Access unit */
-    if( pStreamContext->m_streamType == M4SYS_kAMR )
-    {
-        filePos = 6;
-        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
-             M4OSA_kFileSeekBeginning, &filePos);
-    }
-    if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )
-    {
-        filePos = 9;
-        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
-             M4OSA_kFileSeekBeginning, &filePos);
-    }
-    return M4NO_ERROR ;
-}
-
-/*********************************************************/
-M4OSA_ERR M4AMRR_startReading(M4OSA_Context Context, M4SYS_StreamID* pStreamIDs )
-/*********************************************************/
-{
-    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
-    M4OSA_Int32 size = 0 ;
-
-    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamIDs),M4ERR_PARAMETER,"Stream Ids. M4OSA_NULL");
-    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
-         "Bad Context");
-    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
-
-    while( pStreamIDs[size] != 0 )
-    {
-        if( pStreamIDs[size++] != 1 )
-        {
-            return M4ERR_BAD_STREAM_ID ;
-        }
-    }
-
-    /* Allocate memory for data Address for use in NextAU() */
-    if(M4OSA_NULL == pStreamContext->m_pdataAddress)
-    {
-        size = pStreamContext->m_maxAuSize ;
-        /* dataAddress is owned by Parser, application should not delete or free it */
-        pStreamContext->m_pdataAddress =(M4OSA_MemAddr32)M4OSA_32bitAlignedMalloc(size + (4 - size % 4),
-            M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pdataAddress"));
-        if(M4OSA_NULL == pStreamContext->m_pdataAddress)
-        {
-                M4OSA_DEBUG_IF3((M4OSA_NULL == pStreamContext->m_pdataAddress),M4ERR_ALLOC,
-                    "Mem Alloc failed - dataAddress");
-                return M4ERR_ALLOC;
-        }
-    }
-
-    /* Set the state of context to Reading */
-    pStreamContext->m_status = M4AMRR_kReading ;
-
-    return M4NO_ERROR ;
-}
-
-
-/*********************************************************/
-M4OSA_ERR M4AMRR_nextAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu)
-/*********************************************************/
-{
-    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
-    M4OSA_Char        frameHeader ;
-    M4OSA_Char        frameType ;
-    M4OSA_Int32        auSize;
-    M4OSA_UInt32    size ;
-    M4OSA_FilePosition  filePos;
-
-    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pAu),M4ERR_PARAMETER,"Access Unit . M4OSA_NULL");
-    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
-         "Bad Context");
-    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading), M4ERR_STATE, "Invalid State");
-
-    if ( StreamID != 1 )
-    {
-            return M4ERR_BAD_STREAM_ID;
-    }
-
-    /* Read the frame header byte */
-    size = pStreamContext->m_maxAuSize;
-    pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
-         (M4OSA_MemAddr8)pStreamContext->m_pdataAddress, &size);
-    if(size != pStreamContext->m_maxAuSize)
-    {
-        return M4WAR_NO_MORE_AU;
-    }
-
-    frameHeader = ((M4OSA_MemAddr8)pStreamContext->m_pdataAddress)[0];
-
-    frameType = ( frameHeader & 0x78 ) >> 3 ;
-
-    if (( pStreamContext->m_streamType == M4SYS_kAMR ) &&
-        ( frameType > 11 ) && ( frameType != 15 ))
-    {
-        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
-    }
-
-    if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) &&
-        ( frameType > 9 ) && ( frameType != 15 ))
-    {
-        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
-    }
-
-    /* Get the frame size */
-    if(frameType == 15)
-    {
-        auSize = 1;
-    }
-    else
-    {
-        auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
-    }
-
-    size -= auSize ;
-    if(size != 0)
-    {
-        filePos = -((M4OSA_FilePosition)size);
-        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
-             M4OSA_kFileSeekCurrent, &filePos);
-    }
-
-    pAu->size = auSize ;
-
-    /* even when frameType == 15 (no data frame), ARM core decoder outputs full PCM buffer */
-    /*if(frameType == 15 )
-    {
-        pAu->CTS += 0;
-    }*/
-    /*else*/
-    {
-        pAu->CTS += M4AMRR_FRAME_LENGTH ;
-    }
-
-
-    pAu->DTS = pAu->CTS ;
-    pAu->attribute = M4SYS_kFragAttrOk;
-
-    pAu->stream = pStreamContext->m_pStreamHandler;
-    pAu->dataAddress = pStreamContext->m_pdataAddress ;
-
-    if(frameHeader & 0x80)
-    {
-        return M4WAR_NO_MORE_AU;
-    }
-
-    /* Change the state to implement NextAu->freeAu->NextAu FSM */
-    pStreamContext->m_status = M4AMRR_kReading_nextAU ;
-
-    return M4NO_ERROR ;
-}
-
-/*********************************************************/
-M4OSA_ERR M4AMRR_freeAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu)
-/*********************************************************/
-{
-    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
-    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pAu),M4ERR_PARAMETER,"Access Unit . M4OSA_NULL");
-    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
-         "Bad Context");
-    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading_nextAU), M4ERR_STATE,
-         "Invalid State");
-
-    if (( StreamID != 1 ) && ( StreamID != 0))
-    {
-            return M4ERR_BAD_STREAM_ID;
-    }
-
-    /* Change the state to Reading so as to allow access to next AU */
-    pStreamContext->m_status = M4AMRR_kReading ;
-
-    return M4NO_ERROR ;
-}
-
-/*********************************************************/
-M4OSA_ERR M4AMRR_seek(M4OSA_Context Context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
-                         M4SYS_SeekAccessMode seekMode, M4OSA_Time* pObtainCTS)
-/*********************************************************/
-{
-    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
-    M4OSA_UInt32 count, prevAU, nextAU ;
-    M4OSA_UInt32 size ;
-    M4OSA_UInt32 auSize ;
-    M4OSA_UInt32 position, partSeekTime;
-    M4OSA_UInt32 auCount = 0, skipAuCount = 0 ;
-    M4OSA_Char    frameHeader ;
-    M4OSA_Char    frameType ;
-    M4OSA_FilePosition  filePos;
-    M4OSA_Double time_double;
-
-    /*Make explicit time cast, but take care that timescale is not used !!!*/
-    M4OSA_TIME_TO_MS(time_double, time, 1000);
-
-    *pObtainCTS = 0;
-
-    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
-    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
-         "Bad Context");
-    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading) && \
-        ( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
-    M4OSA_DEBUG_IF1((time_double < 0),M4ERR_PARAMETER,"negative time");
-
-    /* Coming to seek for the first time, need to build the seekIndex Table */
-    if(M4OSA_NULL == pStreamContext->m_pSeekIndex)
-    {
-        M4OSA_Double duration_double;
-
-        count = 0 ;
-        pStreamContext->m_pSeekIndex =
-             (M4OSA_UInt32*)M4OSA_32bitAlignedMalloc(M4AMRR_NUM_SEEK_ENTRIES * sizeof(M4OSA_UInt32),
-                 M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pSeekIndex"));
-
-        if(M4OSA_NULL == pStreamContext->m_pSeekIndex)
-        {
-            M4OSA_DEBUG_IF3((M4OSA_NULL == pStreamContext->m_pSeekIndex),M4ERR_ALLOC,
-                "Mem Alloc Failed - SeekIndex");
-            return M4ERR_ALLOC ;
-        }
-
-        /* point to the first AU */
-        if( pStreamContext->m_streamType == M4SYS_kAMR )
-        {
-            filePos = 6;
-        }
-        else /*if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )*/
-        {
-            filePos = 9;
-        }
-
-        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
-             M4OSA_kFileSeekBeginning, &filePos);
-
-        /* Set the postion to begining of first AU */
-        position = (pStreamContext->m_streamType != M4SYS_kAMR)?9:6;
-
-        /*Make explicit time cast, but take care that timescale is not used !!!*/
-        M4OSA_TIME_TO_MS(duration_double, pStreamContext->m_pStreamHandler->duration, 1000);
-
-        /* Calculate the seek Interval duration based on total dutation */
-        /* Interval = (duration / ENTRIES) in multiples of AU frame length */
-        pStreamContext->m_seekInterval =
-             (M4OSA_UInt32)(duration_double / M4AMRR_NUM_SEEK_ENTRIES) ;
-        pStreamContext->m_seekInterval /= M4AMRR_FRAME_LENGTH ;
-        pStreamContext->m_seekInterval *= M4AMRR_FRAME_LENGTH ;
-        skipAuCount = pStreamContext->m_seekInterval / M4AMRR_FRAME_LENGTH ;
-
-        pStreamContext->m_pSeekIndex[count++]=position;
-        while(count < M4AMRR_NUM_SEEK_ENTRIES )
-        {
-            size = 1;
-            pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
-                 (M4OSA_MemAddr8)&frameHeader, &size);
-            if ( size == 0)
-            {
-                break ;
-            }
-            frameType = (frameHeader & 0x78) >> 3 ;
-            if(frameType != 15)
-            {
-                /**< bugfix Ronan Cousyn 05/04/2006: In the core reader AMR, the
-                 * function M4AMRR_seek doesn't check the frameType */
-                if (( pStreamContext->m_streamType == M4SYS_kAMR ) && ( frameType > 10 ))
-                {
-                    return M4ERR_AMR_INVALID_FRAME_TYPE;
-                }
-                if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) && ( frameType > 9 ))
-                {
-                    return M4ERR_AMR_INVALID_FRAME_TYPE;
-                }
-                auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
-                position += auSize ;
-                filePos = auSize-1;
-                pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
-                     M4OSA_kFileSeekCurrent, &filePos);
-                auCount++;
-            }
-            else
-            {
-                position ++;
-            }
-            /* Skip the number of AU's as per interval and store in the Index table */
-            if ( (skipAuCount != 0) && !(auCount % skipAuCount))
-            {
-                pStreamContext->m_pSeekIndex[count++] = position;
-            }
-        }
-    }/* End of Building the seek table */
-
-    /* Use the seek table to seek the required time in the stream */
-
-    /* If we are seeking the begining of the file point to first AU */
-    if ( seekMode == M4SYS_kBeginning )
-    {
-        if( pStreamContext->m_streamType == M4SYS_kAMR )
-        {
-            filePos = 6;
-        }
-        else /*if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )*/
-        {
-            filePos = 9;
-        }
-        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
-             M4OSA_kFileSeekBeginning, &filePos );
-        return M4NO_ERROR ;
-    }
-
-    /* Get the Nearest Second */
-    if (0 != pStreamContext->m_seekInterval)
-    {
-        position = (M4OSA_UInt32)(time_double / pStreamContext->m_seekInterval);
-    }
-    else
-    {
-        /*avoid division by 0*/
-        position = 0;
-    }
-
-    /* We have only 40 seek Index. */
-    position=(position >= M4AMRR_NUM_SEEK_ENTRIES)?M4AMRR_NUM_SEEK_ENTRIES-1:position;
-
-    /* SeekIndex will point to nearest Au, we need to search for the
-    required time form that position */
-    partSeekTime = (M4OSA_UInt32)time_double - position * pStreamContext->m_seekInterval;
-
-    position = pStreamContext->m_pSeekIndex[position];
-
-    if(!position)
-    {
-        return M4WAR_INVALID_TIME ;
-    }
-
-    /* point the file pointer to nearest AU */
-    filePos = position;
-    pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile, M4OSA_kFileSeekBeginning,
-         &filePos );
-
-    if ( partSeekTime == 0)
-    {
-        *pObtainCTS = time;
-        return M4NO_ERROR;
-    }
-
-    *pObtainCTS = (M4OSA_Time)(time_double - (M4OSA_Double)partSeekTime);
-
-    switch(seekMode)
-    {
-        /* Get the AU before the target time */
-        case M4SYS_kPreviousRAP:
-        case M4SYS_kNoRAPprevious:
-            position = partSeekTime / M4AMRR_FRAME_LENGTH ;
-            if ( !(partSeekTime % M4AMRR_FRAME_LENGTH) )
-            {
-                position -- ;
-            }
-        break;
-        /* Get the Closest AU following the target time */
-        case M4SYS_kNextRAP:
-        case M4SYS_kNoRAPnext:
-            position = (partSeekTime + M4AMRR_FRAME_LENGTH )/ M4AMRR_FRAME_LENGTH ;
-        break;
-        /*  Get the closest AU to target time */
-        case M4SYS_kClosestRAP:
-        case M4SYS_kNoRAPclosest:
-            prevAU = partSeekTime-(partSeekTime/M4AMRR_FRAME_LENGTH)*M4AMRR_FRAME_LENGTH;
-            nextAU =
-                 ((partSeekTime+M4AMRR_FRAME_LENGTH)/M4AMRR_FRAME_LENGTH)*M4AMRR_FRAME_LENGTH -\
-                     partSeekTime ;
-            if(prevAU < nextAU)
-            {
-                position = partSeekTime / M4AMRR_FRAME_LENGTH ;
-            }
-            else
-            {
-                position = (partSeekTime + M4AMRR_FRAME_LENGTH )/ M4AMRR_FRAME_LENGTH ;
-            }
-        break;
-        case M4SYS_kBeginning:
-        break;
-    }
-
-    count = 0 ;
-    /* Skip the Access unit in the stream to skip the part seek time,
-       to reach the required target time */
-    while(count < position )
-    {
-        size = 1;
-        pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
-             (M4OSA_MemAddr8)&frameHeader, &size);
-        if ( size == 0)
-        {
-            /* If the target time is invalid, point to begining and return */
-            *pObtainCTS = 0;
-            filePos = pStreamContext->m_pSeekIndex[0];
-            pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
-                 M4OSA_kFileSeekBeginning, &filePos);
-            return M4WAR_INVALID_TIME ;
-        }
-        *pObtainCTS += M4AMRR_FRAME_LENGTH; /*Should use M4OSA_INT64_ADD !!*/
-        count++;
-        frameType = (frameHeader & 0x78) >> 3 ;
-        if(frameType == 15)
-        {
-            auSize = 1 ;
-        }
-        else
-        {
-            auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
-        }
-
-        filePos = auSize-1;
-        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
-             M4OSA_kFileSeekCurrent, &filePos);
-    }
-
-    return M4NO_ERROR;
-}
-
-/*********************************************************/
-M4OSA_ERR M4AMRR_closeRead(M4OSA_Context Context)
-/*********************************************************/
-{
-    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
-    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
-
-    /* Close the AMR stream */
-    pStreamContext->m_pOsaFilePtrFct->closeRead(pStreamContext->m_pAMRFile);
-
-    pStreamContext->m_status=M4AMRR_kClosed ;
-
-    /* Check if AU data Address is allocated memory and free it */
-    if(M4OSA_NULL != pStreamContext->m_pdataAddress)
-    {
-        free(pStreamContext->m_pdataAddress);
-    }
-
-    /* Check if the stream handler is allocated memory */
-    if(M4OSA_NULL != pStreamContext->m_pStreamHandler)
-    {
-        free(pStreamContext->m_pStreamHandler);
-    }
-
-    /* Seek table is created only when seek is used, so check if memory is allocated */
-    if(M4OSA_NULL != pStreamContext->m_pSeekIndex)
-    {
-        free(pStreamContext->m_pSeekIndex);
-    }
-
-    /* Free the context */
-    free(pStreamContext);
-
-    return M4NO_ERROR ;
-}
-
-/*********************************************************/
-M4OSA_ERR M4AMRR_getState(M4OSA_Context Context, M4AMRR_State* pState, M4SYS_StreamID streamId)
-/*********************************************************/
-{
-    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
-    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
-    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
-         "Bad Context");
-
-    if (( streamId != 1 ) && ( streamId != 0))
-    {
-            return M4ERR_BAD_STREAM_ID;
-    }
-
-    *pState = pStreamContext->m_status ;
-
-    return M4NO_ERROR ;
-}
-
-
-/*********************************************************/
-M4OSA_ERR M4AMRR_getVersion    (M4_VersionInfo *pVersion)
-/*********************************************************/
-{
-    M4OSA_TRACE1_1("M4AMRR_getVersion called with pVersion: 0x%x\n", pVersion);
-    M4OSA_DEBUG_IF1(((M4OSA_UInt32) pVersion == 0),M4ERR_PARAMETER,
-         "pVersion is NULL in M4AMRR_getVersion");
-
-    pVersion->m_major = M4AMRR_VERSION_MAJOR;
-    pVersion->m_minor = M4AMRR_VERSION_MINOR;
-    pVersion->m_revision = M4AMRR_VERSION_REVISION;
-
-    return M4NO_ERROR;
-}
-
-/*********************************************************/
-M4OSA_ERR M4AMRR_getmaxAUsize(M4OSA_Context Context, M4OSA_UInt32 *pMaxAuSize)
-/*********************************************************/
-{
-    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
-
-    /**
-     * Check input parameters */
-    M4OSA_DEBUG_IF1((M4OSA_NULL == Context),  M4ERR_PARAMETER,
-                "M4AMRR_getmaxAUsize: Context is M4OSA_NULL");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pMaxAuSize),M4ERR_PARAMETER,
-                "M4AMRR_getmaxAUsize: pMaxAuSize is M4OSA_NULL");
-
-    *pMaxAuSize = pStreamContext->m_maxAuSize;
-
-    return M4NO_ERROR;
-}
-
diff --git a/libvideoeditor/vss/src/M4ChannelConverter.c b/libvideoeditor/vss/src/M4ChannelConverter.c
deleted file mode 100755
index fca5550..0000000
--- a/libvideoeditor/vss/src/M4ChannelConverter.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4ChannelCoverter.c
- * @brief
- * @note
- ******************************************************************************
- */
-
-void MonoTo2I_16( const short *src,
-                        short *dst,
-                        short n)
-{
-    short ii;
-    src += n-1;
-    dst += (n*2)-1;
-
-    for (ii = n; ii != 0; ii--){
-        *dst-- = *src;
-        *dst-- = *src--;
-    }
-
-    return;
-}
-
-void From2iToMono_16( const short *src,
-                            short *dst,
-                            short n)
-{
-    short ii;
-    long Temp;
-    for (ii = n; ii != 0; ii--){
-        Temp = (long)*(src++);
-        Temp += (long)*(src++);
-        *(dst++) = (short)(Temp >>1);
-    }
-
-    return;
-}
-
diff --git a/libvideoeditor/vss/src/M4DECODER_Null.c b/libvideoeditor/vss/src/M4DECODER_Null.c
deleted file mode 100755
index ce260e5..0000000
--- a/libvideoeditor/vss/src/M4DECODER_Null.c
+++ /dev/null
@@ -1,436 +0,0 @@
-/*

- * Copyright (C) 2011 The Android Open Source Project

- *

- * Licensed under the Apache License, Version 2.0 (the "License");

- * you may not use this file except in compliance with the License.

- * You may obtain a copy of the License at

- *

- *      http://www.apache.org/licenses/LICENSE-2.0

- *

- * Unless required by applicable law or agreed to in writing, software

- * distributed under the License is distributed on an "AS IS" BASIS,

- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

- * See the License for the specific language governing permissions and

- * limitations under the License.

- */

-/**

-*************************************************************************

- * @file    M4DECODER_Null.c

- * @brief   Implementation of the Null decoder public interface

- * @note    This file implements a "null" video decoder, i.e. a decoder

- *          that does nothing

-*************************************************************************

-*/

-#include "NXPSW_CompilerSwitches.h"

-

-#include "M4OSA_Types.h"

-#include "M4OSA_Debug.h"

-#include "M4TOOL_VersionInfo.h"

-#include "M4DA_Types.h"

-#include "M4DECODER_Common.h"

-#include "M4DECODER_Null.h"

-

-/**

- ************************************************************************

- * NULL Video Decoder version information

- ************************************************************************

-*/

-/* CHANGE_VERSION_HERE */

-#define M4DECODER_NULL_MAJOR    1

-#define M4DECODER_NULL_MINOR    0

-#define M4DECODER_NULL_REVISION 0

-

-/**

- ************************************************************************

- * structure    M4_VideoHandler_Context

- * @brief       Defines the internal context of a video decoder instance

- * @note        The context is allocated and freed by the video decoder

- ************************************************************************

-*/

-typedef struct {

-    void*                    m_pLibrary;            // Core library identifier

-    M4OSA_Int32              m_DecoderId;           // Core decoder identifier

-    M4OSA_Int32              m_RendererId;          // Core renderer identifier

-    M4_VideoStreamHandler*   m_pVideoStreamhandler; // Video stream description

-    M4_AccessUnit*           m_pNextAccessUnitToDecode; // Access unit used to

-                                                        // read and decode one frame

-    void*                    m_pUserData;           // Pointer to any user data

-    M4READER_DataInterface*  m_pReader;             // Reader data interface

-    M4OSA_Bool               m_bDoRendering;        // Decides if render required

-    M4OSA_Int32              m_structSize;          // Size of the structure

-

-    M4DECODER_OutputFilter* m_pVideoFilter;         // Color conversion filter

-    M4VIFI_ImagePlane       *pDecYuvData;           // Pointer to Yuv data plane

-    M4VIFI_ImagePlane       *pDecYuvWithEffect;     // Pointer to Yuv plane with color effect

-    M4OSA_Bool               bYuvWithEffectSet;     // Original Yuv data OR Yuv with color effect

-

-} M4_VideoHandler_Context;

-

-/***********************************************************************/

-/************** M4DECODER_VideoInterface implementation ****************/

-/***********************************************************************/

-

-/**

- ************************************************************************

- * @brief   Creates an instance of the decoder

- * @note    Allocates the context

- *

- * @param   pContext:       (OUT)   Context of the decoder

- * @param   pStreamHandler: (IN)    Pointer to a video stream description

- * @param   pSrcInterface:  (IN)    Pointer to the M4READER_DataInterface

- *                                  structure that must be used by the

- *                                  decoder to read data from the stream

- * @param   pAccessUnit     (IN)    Pointer to an access unit

- *                                  (allocated by the caller) where decoded data

- *                                  are stored

- *

- * @return  M4NO_ERROR              There is no error

- * @return  M4ERR_STATE             State automaton is not applied

- * @return  M4ERR_ALLOC             A memory allocation has failed

- * @return  M4ERR_PARAMETER         At least one input parameter is not proper

- ************************************************************************

-*/

-M4OSA_ERR M4DECODER_NULL_create(M4OSA_Context *pContext,

-                                M4_StreamHandler *pStreamHandler,

-                                M4READER_GlobalInterface *pReaderGlobalInterface,
-                                M4READER_DataInterface *pReaderDataInterface,

-                                M4_AccessUnit* pAccessUnit,

-                                M4OSA_Void* pUserData) {

-

-    M4_VideoHandler_Context* pStreamContext = M4OSA_NULL;

-

-    *pContext = M4OSA_NULL;

-    pStreamContext = (M4_VideoHandler_Context*)M4OSA_32bitAlignedMalloc (

-                        sizeof(M4_VideoHandler_Context), M4DECODER_MPEG4,

-                        (M4OSA_Char *)"M4_VideoHandler_Context");

-    if (pStreamContext == 0) {

-        return M4ERR_ALLOC;

-    }

-

-    pStreamContext->m_structSize = sizeof(M4_VideoHandler_Context);

-    pStreamContext->m_pNextAccessUnitToDecode = M4OSA_NULL;

-    pStreamContext->m_pLibrary              = M4OSA_NULL;

-    pStreamContext->m_pVideoStreamhandler   = M4OSA_NULL;

-    pStreamContext->m_DecoderId             = -1;

-    pStreamContext->m_RendererId            = -1;

-

-    pStreamContext->m_pUserData = M4OSA_NULL;

-    pStreamContext->m_bDoRendering = M4OSA_TRUE;

-    pStreamContext->m_pVideoFilter = M4OSA_NULL;

-    pStreamContext->bYuvWithEffectSet = M4OSA_FALSE;

-

-    *pContext=pStreamContext;

-    return M4NO_ERROR;

-}

-

-/**

- ************************************************************************

- * @brief   Destroy the instance of the decoder

- * @note    After this call the context is invalid

- *

- * @param   context:    (IN)    Context of the decoder

- *

- * @return  M4NO_ERROR          There is no error

- * @return  M4ERR_PARAMETER     The context is invalid

- ************************************************************************

-*/

-M4OSA_ERR M4DECODER_NULL_destroy(M4OSA_Context pContext) {

-

-    M4_VideoHandler_Context* pStreamContext = (M4_VideoHandler_Context*)pContext;

-

-    M4OSA_DEBUG_IF1((M4OSA_NULL == pStreamContext),

-        M4ERR_PARAMETER, "M4DECODER_NULL_destroy: invalid context pointer");

-

-    free(pStreamContext);

-

-    return M4NO_ERROR;

-}

-

-/**

- ************************************************************************

- * @brief   Get an option value from the decoder

- * @note    This function allows the caller to retrieve a property value:

- *

- * @param   context:    (IN)        Context of the decoder

- * @param   optionId:   (IN)        Indicates the option to get

- * @param   pValue:     (IN/OUT)    Pointer to structure or value where

- *                                  option is stored

- *

- * @return  M4NO_ERROR              There is no error

- * @return  M4ERR_PARAMETER         The context is invalid (in DEBUG only)

- * @return  M4ERR_BAD_OPTION_ID     When the option ID is not a valid one

- * @return  M4ERR_STATE             State automaton is not applied

- * @return  M4ERR_NOT_IMPLEMENTED   Function not implemented

- ************************************************************************

-*/

-M4OSA_ERR M4DECODER_NULL_getOption(M4OSA_Context context,

-                                   M4OSA_OptionID optionId,

-                                   M4OSA_DataOption  pValue) {

-

-    return M4ERR_NOT_IMPLEMENTED;

-}

-

-/**

- ************************************************************************

- * @brief   Set an option value of the decoder

- * @note    Allows the caller to set a property value:

- *

- * @param   context:    (IN)        Context of the decoder

- * @param   optionId:   (IN)        Identifier indicating the option to set

- * @param   pValue:     (IN)        Pointer to structure or value

- *                                  where option is stored

- *

- * @return  M4NO_ERROR              There is no error

- * @return  M4ERR_BAD_OPTION_ID     The option ID is not a valid one

- * @return  M4ERR_STATE             State automaton is not applied

- * @return  M4ERR_PARAMETER         The option parameter is invalid

- ************************************************************************

-*/

-M4OSA_ERR M4DECODER_NULL_setOption(M4OSA_Context context,

-                                   M4OSA_OptionID optionId,

-                                   M4OSA_DataOption pValue) {

-

-    M4DECODER_OutputFilter *pFilterOption;

-

-    M4_VideoHandler_Context *pStreamContext =

-        (M4_VideoHandler_Context*)context;

-

-    M4OSA_ERR err = M4NO_ERROR;

-    M4OSA_UInt32 height = 0;

-    M4OSA_UInt8 *p_src,*p_des;

-    M4VIFI_ImagePlane* pTempDecYuvData = M4OSA_NULL;

-

-    switch (optionId) {

-        case M4DECODER_kOptionID_DecYuvData:

-            pStreamContext->pDecYuvData = (M4VIFI_ImagePlane *)pValue;

-            break;

-

-        case M4DECODER_kOptionID_YuvWithEffectContiguous:

-            pStreamContext->pDecYuvWithEffect = (M4VIFI_ImagePlane *)pValue;

-            break;

-

-        case M4DECODER_kOptionID_EnableYuvWithEffect:

-            pStreamContext->bYuvWithEffectSet = (M4OSA_Bool)(intptr_t)pValue;

-            break;

-

-        case M4DECODER_kOptionID_YuvWithEffectNonContiguous:

-            pTempDecYuvData =  (M4VIFI_ImagePlane *)pValue;

-

-            p_des = pStreamContext->pDecYuvWithEffect[0].pac_data +

-                 pStreamContext->pDecYuvWithEffect[0].u_topleft;

-            p_src = pTempDecYuvData[0].pac_data +

-                 pTempDecYuvData[0].u_topleft;

-

-            for (height = 0; height<pStreamContext->pDecYuvWithEffect[0].u_height;

-             height++) {

-                memcpy((void *)p_des, (void *)p_src,

-                 pStreamContext->pDecYuvWithEffect[0].u_width);

-

-                p_des += pStreamContext->pDecYuvWithEffect[0].u_stride;

-                p_src += pTempDecYuvData[0].u_stride;

-            }

-

-            p_des = pStreamContext->pDecYuvWithEffect[1].pac_data +

-             pStreamContext->pDecYuvWithEffect[1].u_topleft;

-            p_src = pTempDecYuvData[1].pac_data +

-             pTempDecYuvData[1].u_topleft;

-

-            for (height = 0; height<pStreamContext->pDecYuvWithEffect[1].u_height;

-             height++) {

-                memcpy((void *)p_des, (void *)p_src,

-                 pStreamContext->pDecYuvWithEffect[1].u_width);

-

-                p_des += pStreamContext->pDecYuvWithEffect[1].u_stride;

-                p_src += pTempDecYuvData[1].u_stride;

-            }

-

-            p_des = pStreamContext->pDecYuvWithEffect[2].pac_data +

-             pStreamContext->pDecYuvWithEffect[2].u_topleft;

-            p_src = pTempDecYuvData[2].pac_data +

-             pTempDecYuvData[2].u_topleft;

-

-            for (height = 0; height<pStreamContext->pDecYuvWithEffect[2].u_height;

-             height++) {

-                memcpy((void *)p_des, (void *)p_src,

-                 pStreamContext->pDecYuvWithEffect[2].u_width);

-

-                p_des += pStreamContext->pDecYuvWithEffect[2].u_stride;

-                p_src += pTempDecYuvData[2].u_stride;

-            }

-            break;

-

-        case M4DECODER_kOptionID_OutputFilter:

-            pFilterOption = (M4DECODER_OutputFilter*)pValue;

-            break;

-

-        case M4DECODER_kOptionID_DeblockingFilter:

-            err = M4ERR_BAD_OPTION_ID;

-            break;

-

-        default:

-            err = M4ERR_BAD_OPTION_ID;

-            break;

-    }

-    return err;

-}

-

-/**

- ************************************************************************

- * @brief   Decode video Access Units up to a target time

- * @note    Parse and decode the video until it can output a decoded image

- *          for which the composition time is equal or greater to the

- *          passed targeted time.

- *          The data are read from the reader data interface passed to

- *          M4DECODER_MPEG4_create.

- *

- * @param   context:    (IN)        Context of the decoder

- * @param   pTime:      (IN/OUT)    IN: Time to decode up to (in msec)

- *                                  OUT:Time of the last decoded frame (in msec)

- * @param   bJump:      (IN)        0 if no jump occured just before this call

- *                                  1 if a a jump has just been made

- * @return  M4NO_ERROR              there is no error

- * @return  M4ERR_PARAMETER         at least one parameter is not properly set

- * @return  M4WAR_NO_MORE_AU        there is no more access unit to decode (EOS)

- ************************************************************************

-*/

-M4OSA_ERR M4DECODER_NULL_decode(M4OSA_Context context,

-                                M4_MediaTime* pTime, M4OSA_Bool bJump,
-                                M4OSA_UInt32 tolerance) {
-

-    // Do nothing; input time stamp itself returned

-    return M4NO_ERROR;

-}

-

-/**

- ************************************************************************

- * @brief   Renders the video at the specified time.

- * @note

- * @param   context:     (IN)       Context of the decoder

- * @param   pTime:       (IN/OUT)   IN: Time to render to (in msecs)

- *                                  OUT:Time of the rendered frame (in ms)

- * @param   pOutputPlane:(OUT)      Output plane filled with decoded data

- * @param   bForceRender:(IN)       1 if the image must be rendered even it

- *                                  has been rendered already

- *                                  0 if not

- *

- * @return  M4NO_ERROR              There is no error

- * @return  M4ERR_PARAMETER         At least one parameter is not properly set

- * @return  M4ERR_STATE             State automaton is not applied

- * @return  M4ERR_ALLOC             There is no more available memory

- * @return  M4WAR_VIDEORENDERER_NO_NEW_FRAME    If the frame has already been rendered

- ************************************************************************

-*/

-M4OSA_ERR M4DECODER_NULL_render(M4OSA_Context context, M4_MediaTime* pTime,

-                                M4VIFI_ImagePlane* pOutputPlane,

-                                M4OSA_Bool bForceRender) {

-

-    M4OSA_ERR err = M4NO_ERROR;

-    M4OSA_UInt32 height;

-    M4OSA_UInt8 *p_src,*p_des;

-    M4_VideoHandler_Context*    pStreamContext =

-        (M4_VideoHandler_Context*)context;

-

-    if (pStreamContext->bYuvWithEffectSet == M4OSA_TRUE) {

-

-        p_des = pOutputPlane[0].pac_data + pOutputPlane[0].u_topleft;

-        p_src = pStreamContext->pDecYuvWithEffect[0].pac_data +

-         pStreamContext->pDecYuvWithEffect[0].u_topleft;

-

-        for (height = 0; height<pOutputPlane[0].u_height; height++) {

-            memcpy((void *)p_des, (void *)p_src, pOutputPlane[0].u_width);

-            p_des += pOutputPlane[0].u_stride;

-            p_src += pStreamContext->pDecYuvWithEffect[0].u_stride;

-        }

-

-        p_des = pOutputPlane[1].pac_data + pOutputPlane[1].u_topleft;

-        p_src = pStreamContext->pDecYuvWithEffect[1].pac_data +

-         pStreamContext->pDecYuvWithEffect[1].u_topleft;

-

-        for (height = 0; height<pOutputPlane[1].u_height; height++) {

-            memcpy((void *)p_des, (void *)p_src, pOutputPlane[1].u_width);

-            p_des += pOutputPlane[1].u_stride;

-            p_src += pStreamContext->pDecYuvWithEffect[1].u_stride;

-        }

-

-        p_des = pOutputPlane[2].pac_data + pOutputPlane[2].u_topleft;

-        p_src = pStreamContext->pDecYuvWithEffect[2].pac_data +

-         pStreamContext->pDecYuvWithEffect[2].u_topleft;

-

-        for (height = 0; height<pOutputPlane[2].u_height; height++) {

-            memcpy((void *)p_des, (void *)p_src, pOutputPlane[2].u_width);

-            p_des += pOutputPlane[2].u_stride;

-            p_src += pStreamContext->pDecYuvWithEffect[2].u_stride;

-        }

-    } else {

-

-        p_des = pOutputPlane[0].pac_data + pOutputPlane[0].u_topleft;

-        p_src = pStreamContext->pDecYuvData[0].pac_data +

-         pStreamContext->pDecYuvData[0].u_topleft;

-

-        for (height = 0; height<pOutputPlane[0].u_height; height++) {

-            memcpy((void *)p_des, (void *)p_src, pOutputPlane[0].u_width);

-            p_des += pOutputPlane[0].u_stride;

-            p_src += pStreamContext->pDecYuvData[0].u_stride;

-        }

-

-        p_des = pOutputPlane[1].pac_data + pOutputPlane[1].u_topleft;

-        p_src = pStreamContext->pDecYuvData[1].pac_data +

-         pStreamContext->pDecYuvData[1].u_topleft;

-

-        for (height = 0; height<pOutputPlane[1].u_height; height++) {

-            memcpy((void *)p_des, (void *)p_src, pOutputPlane[1].u_width);

-            p_des += pOutputPlane[1].u_stride;

-            p_src += pStreamContext->pDecYuvData[1].u_stride;

-        }

-

-        p_des = pOutputPlane[2].pac_data + pOutputPlane[2].u_topleft;

-        p_src = pStreamContext->pDecYuvData[2].pac_data +

-         pStreamContext->pDecYuvData[2].u_topleft;

-

-        for (height = 0; height<pOutputPlane[2].u_height; height++) {

-            memcpy((void *)p_des,(void *)p_src,pOutputPlane[2].u_width);

-            p_des += pOutputPlane[2].u_stride;

-            p_src += pStreamContext->pDecYuvData[2].u_stride;

-        }

-    }

-    return err;

-}

-

-/**

- ************************************************************************

- * @brief Retrieves the interface implemented by the decoder

- * @param pDecoderType        : Pointer to a M4DECODER_VideoType

- *                             (allocated by the caller)

- *                             that will be filled with the decoder type

- * @param pDecoderInterface   : Address of a pointer that will be set to

- *                              the interface implemented by this decoder.

- *                              The interface is a structure allocated by

- *                              this function and must be freed by the caller.

- *

- * @returns : M4NO_ERROR  if OK

- *            M4ERR_ALLOC if allocation failed

- ************************************************************************

-*/

-M4OSA_ERR M4DECODER_NULL_getInterface (M4DECODER_VideoType *pDecoderType,

-                            M4DECODER_VideoInterface **pDecoderInterface) {

-

-    *pDecoderInterface =

-        (M4DECODER_VideoInterface*)M4OSA_32bitAlignedMalloc(

-         sizeof(M4DECODER_VideoInterface),

-         M4DECODER_MPEG4, (M4OSA_Char *)"M4DECODER_VideoInterface");

-

-    if (M4OSA_NULL == *pDecoderInterface) {

-        return M4ERR_ALLOC;

-    }

-

-    *pDecoderType = M4DECODER_kVideoTypeYUV420P;

-

-    (*pDecoderInterface)->m_pFctCreate    = M4DECODER_NULL_create;

-    (*pDecoderInterface)->m_pFctDestroy   = M4DECODER_NULL_destroy;

-    (*pDecoderInterface)->m_pFctGetOption = M4DECODER_NULL_getOption;

-    (*pDecoderInterface)->m_pFctSetOption = M4DECODER_NULL_setOption;

-    (*pDecoderInterface)->m_pFctDecode    = M4DECODER_NULL_decode;

-    (*pDecoderInterface)->m_pFctRender    = M4DECODER_NULL_render;

-

-    return M4NO_ERROR;

-}

diff --git a/libvideoeditor/vss/src/M4PCMR_CoreReader.c b/libvideoeditor/vss/src/M4PCMR_CoreReader.c
deleted file mode 100755
index 19f07dd..0000000
--- a/libvideoeditor/vss/src/M4PCMR_CoreReader.c
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file    M4PCM_PCMReader.c
- * @brief   PCM reader implementation
- * @note    This file implements functions of the PCM reader
- ************************************************************************
- */
-#include "M4OSA_CharStar.h"
-#include "M4PCMR_CoreReader.h"
-#include "M4OSA_Debug.h"
-#include "M4OSA_CharStar.h"
-/**
- ******************************************************************************
- * PCM reader version numbers
- ******************************************************************************
- */
-/* CHANGE_VERSION_HERE */
-#define M4PCMR_VERSION_MAJOR 1
-#define M4PCMR_VERSION_MINOR 0
-#define M4PCMR_VERSION_REVISION 0
-
-/**
- ************************************************************************
- * M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
- *                             M4OSA_FileReaderPointer* pFileFunction)
- * @brief   This function opens a PCM file
- * @note    This function :
- *          - opens a PCM file
- *          - initializes PCM context,
- *          - verifies PCM file format
- *          - Fill decoder config structure
- *          - Changes state of the reader in 'Opening'
- * @param   pContext: (OUT) Pointer on the PCM Reader context
- * @param   pUrl: (IN) Name of the PCM file
- * @param   pFileFunctions: (IN) Pointer on the file access functions
- * @return  M4NO_ERROR                      there is no error during the opening
- * @return  M4ERR_PARAMETER                 pContext and/or pUrl and/or pFileFunction is NULL
- * @return  M4ERR_ALLOC                     there is no more memory available
- * @return  M4ERR_FILE_NOT_FOUND            the file cannot be found
- * @return  M4PCMC_ERR_PCM_NOT_COMPLIANT    the file does not seem to be compliant, no RIFF,
- *                                             or lack of any mandatory chunk.
- * @return  M4PCMC_ERR_PCM_NOT_SUPPORTED    the PCM format of this file is not supported by the
- *                                           reader
- * @return  Any M4OSA_FILE errors           see OSAL File specification for detailed errors
- ************************************************************************
- */
-M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
-                             M4OSA_FileReadPointer* pFileFunction)
-{
-    M4OSA_ERR       err;
-    M4PCMR_Context *context;
-    M4OSA_Char*        pTempURL;
-    M4OSA_Char        value[6];
-
-    /* Check parameters */
-    if((M4OSA_NULL == pContext)|| (M4OSA_NULL == pUrl) ||(M4OSA_NULL == pFileFunction))
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    /* Allocates the context */
-    context = M4OSA_NULL;
-    context = (M4PCMR_Context *)M4OSA_32bitAlignedMalloc(sizeof(M4PCMR_Context), M4WAV_READER,
-         (M4OSA_Char *)"M4PCMR_openRead");
-    if (M4OSA_NULL == context)
-    {
-        return M4ERR_ALLOC;
-    }
-    *pContext = (M4OSA_Context)context;
-
-    /* Initialize the context */
-    context->m_offset = 0;
-
-    context->m_state            = M4PCMR_kInit;
-    context->m_microState       = M4PCMR_kInit;
-    context->m_pFileReadFunc    = M4OSA_NULL;
-    context->m_fileContext      = M4OSA_NULL;
-    context->m_pAuBuffer        = M4OSA_NULL;
-    context->m_pDecoderSpecInfo = M4OSA_NULL;
-
-    /* Set sample frequency */
-    pTempURL = (M4OSA_Char*)pUrl + (strlen((const char *)pUrl)-11);
-    M4OSA_chrNCopy(value, pTempURL, 5);
-    M4OSA_chrGetUInt32(pTempURL, &(context->m_decoderConfig.SampleFrequency),
-         M4OSA_NULL, M4OSA_kchrDec);
-
-    /* Set number of channels */
-    pTempURL += 6;
-    M4OSA_chrNCopy(value, pTempURL, 1);
-    M4OSA_chrGetUInt16(pTempURL, &(context->m_decoderConfig.nbChannels),
-         M4OSA_NULL, M4OSA_kchrDec);
-
-    M4OSA_chrNCopy(pUrl,pUrl, (strlen((const char *)pUrl)-12));
-    /* Open the file */
-    context->m_fileContext = M4OSA_NULL;
-    err = pFileFunction->openRead(&(context->m_fileContext), pUrl, M4OSA_kFileRead);
-    if(M4NO_ERROR != err)
-    {
-        return err;
-    }
-    context->m_decoderConfig.BitsPerSample = 16;
-    context->m_decoderConfig.AvgBytesPerSec = context->m_decoderConfig.SampleFrequency * 2 \
-        * context->m_decoderConfig.nbChannels;
-    err = pFileFunction->getOption(context->m_fileContext, M4OSA_kFileReadGetFileSize,
-         (M4OSA_DataOption*)&(context->m_decoderConfig.DataLength));
-    if(M4NO_ERROR != err)
-    {
-        return err;
-    }
-    context->m_blockSize = 2048 * context->m_decoderConfig.nbChannels;  // Raw PCM.  Hence, get a
-                                                                        // chunk of data
-
-    if(context->m_decoderConfig.SampleFrequency == 8000)
-    {
-        /* AMR case, no pb */
-        context->m_blockSize = context->m_decoderConfig.nbChannels *\
-             (context->m_decoderConfig.SampleFrequency / 50) * \
-                (context->m_decoderConfig.BitsPerSample / 8);
-    }
-    if(context->m_decoderConfig.SampleFrequency == 16000)
-    {
-        /* AAC case, we can't read only 20 ms blocks */
-        context->m_blockSize = 2048 * context->m_decoderConfig.nbChannels;
-    }
-    context->m_dataStartOffset = 0;
-    context->m_pFileReadFunc = pFileFunction;
-
-    context->m_pAuBuffer = (M4OSA_MemAddr32)M4OSA_32bitAlignedMalloc(context->m_blockSize, M4WAV_READER,
-         (M4OSA_Char *)"Core PCM reader Access Unit");
-    if (M4OSA_NULL == context->m_pAuBuffer)
-    {
-        err = M4ERR_ALLOC;
-        goto cleanup;
-    }
-
-    /* Change state */
-    context->m_state = M4PCMR_kOpening;
-
-    return M4NO_ERROR;
-
-cleanup:
-
-    /* Close the file */
-    if(context->m_pFileReadFunc != M4OSA_NULL)
-        context->m_pFileReadFunc->closeRead(context->m_fileContext);
-
-    /* Free internal context */
-    free(context);
-    *pContext = M4OSA_NULL;
-
-    return err;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc)
- * @brief   This function get the (unique) stream of a PCM file
- * @note    This function :
- *          - Allocates and fills the decoder specific info structure
- *          - Fills decoder specific infos structure
- *          - Fills pStreamDesc structure allocated by the caller
- * @param   context: (IN/OUT) PCM Reader context
- * @param   pStreamDesc: (IN) Stream Description context
- * @return  M4NO_ERROR          there is no error
- * @return  M4ERR_PARAMETER     at least one parameter is NULL
- * @return  M4ERR_ALLOC         there is no more memory available
- * @return  M4ERR_STATE         this function cannot be called now
- * @return  Any M4OSA_FILE      errors see OSAL File specification for detailed errors
- ************************************************************************
- */
-M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc)
-{
-    M4PCMR_Context *c = (M4PCMR_Context *)context;
-
-    /* Check parameters */
-    if((M4OSA_NULL == context)|| (M4OSA_NULL == pStreamDesc))
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    if (c->m_state == M4PCMR_kOpening_streamRetrieved)
-    {
-        return M4WAR_NO_MORE_STREAM;
-    }
-    /* Check Reader's m_state */
-    if(c->m_state != M4PCMR_kOpening)
-    {
-        return M4ERR_STATE;
-    }
-
-    /* Only one stream is contained in PCM file */
-    pStreamDesc->streamID = 1;
-    /* Not used */
-    pStreamDesc->profileLevel = 0;
-    pStreamDesc->decoderSpecificInfoSize = sizeof(M4PCMC_DecoderSpecificInfo);
-
-    /* Allocates decoder specific info structure */
-    pStreamDesc->decoderSpecificInfo = M4OSA_NULL;
-    pStreamDesc->decoderSpecificInfo =
-        (M4OSA_MemAddr32)M4OSA_32bitAlignedMalloc( sizeof(M4PCMC_DecoderSpecificInfo), M4WAV_READER,
-             (M4OSA_Char *)"M4PCMR_getNextStream");
-    if(pStreamDesc->decoderSpecificInfo == M4OSA_NULL)
-    {
-        return M4ERR_ALLOC;
-    }
-    /* Fill decoderSpecificInfo structure, with decoder config structure filled in 'openread'
-         function */
-    memcpy((void *)pStreamDesc->decoderSpecificInfo,
-         (void *)&c->m_decoderConfig, sizeof(M4PCMC_DecoderSpecificInfo));
-
-    /* Fill other fields of pStreamDesc structure */
-    pStreamDesc->timeScale = 1000;
-    pStreamDesc->duration = (M4OSA_Time)(((M4OSA_Double)(c->m_decoderConfig.DataLength)\
-         / (M4OSA_Double)(c->m_decoderConfig.AvgBytesPerSec))*pStreamDesc->timeScale);
-    pStreamDesc->averageBitrate = c->m_decoderConfig.AvgBytesPerSec * 8;/* in bits, multiply by 8*/
-    pStreamDesc->maxBitrate = pStreamDesc->averageBitrate; /* PCM stream has constant bitrate */
-
-    /* Determines Stream type */
-    switch(c->m_decoderConfig.BitsPerSample)
-    {
-        case 8:
-            switch(c->m_decoderConfig.nbChannels)
-            {
-                case 1:
-                    pStreamDesc->streamType = M4SYS_kPCM_8bitsU;
-                    break;
-//                case 2:
-//                    pStreamDesc->streamType = M4SYS_kPCM_8bitsS; /* ??? 8bits stereo not
-                                                                  //   defined ? */
-//                    break;
-                default:
-                    pStreamDesc->streamType = M4SYS_kAudioUnknown;
-            }
-            break;
-
-        case 16:
-            switch(c->m_decoderConfig.nbChannels)
-            {
-                case 1:
-                    pStreamDesc->streamType = M4SYS_kPCM_16bitsU;
-                    break;
-                case 2:
-                    pStreamDesc->streamType = M4SYS_kPCM_16bitsS;
-                    break;
-                default:
-                    pStreamDesc->streamType = M4SYS_kAudioUnknown;
-            }
-            break;
-
-        default:
-            pStreamDesc->streamType = M4SYS_kAudioUnknown;
-    }
-
-    c->m_pDecoderSpecInfo = pStreamDesc->decoderSpecificInfo;
-
-    c->m_state = M4PCMR_kOpening_streamRetrieved;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs)
- * @brief   This function starts reading the unique stream of a PCM file
- * @note    This function :
- *          - Verifies that the current reader's state allows to start reading a stream
- *          - Check that provided StreamId is correct (always true, only one stream...)
- *            In the player application, a StreamId table is initialized as follow:
- *              M4SYS_StreamID pStreamID[2]={1,0};
- *          - Change state of the reader in 'Reading'
- * @param   context: (IN/OUT) PCM Reader context
- * @param   streamID: (IN) Stream selection
- * @return  M4NO_ERROR          there is no error
- * @return  M4ERR_PARAMETER     at least one parameter is NULL
- * @return  M4ERR_STATE         this function cannot be called now
- * @return  M4ERR_BAD_STREAM_ID at least one of the streamID does not exist
- *          (should never happen if table pStreamID is correctly initialized as above)
- ************************************************************************
- */
-M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs)
-{
-    M4PCMR_Context *c = (M4PCMR_Context *)context;
-
-    /* Check parameters */
-    if((M4OSA_NULL == context) || (M4OSA_NULL == pStreamIDs))
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    /* Check Reader's state */
-    if(c->m_state != M4PCMR_kOpening_streamRetrieved)
-    {
-        return M4ERR_STATE;
-    }
-
-    /* Check pStreamID and if they're OK, change reader's state */
-    if(pStreamIDs[0] == 1 || pStreamIDs[0] == 0)
-    /* First and unique stream contained in PCM file */
-    {
-        c->m_state = M4PCMR_kReading;
-        c->m_microState = M4PCMR_kReading;
-    }
-    else
-    {
-        return M4ERR_BAD_STREAM_ID;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
- * @brief   This function reads the next AU contained in the PCM file
- * @note    This function :
- *          - Verifies that the current reader's state allows to read an AU
- *          - Allocates memory to store read AU
- *          - Read data from file and store them into previously allocated memory
- *          - Fill AU structure fileds (CTS...)
- *          - Change state of the reader in 'Reading' (not useful...)
- *          - Change Micro state 'Reading' in M4PCMR_kReading_nextAU
- *            (AU is read and can be deleted)
- *          - Check if the last AU has been read or if we're about to read it
- * @param   context: (IN/OUT) PCM Reader context
- * @param   streamID: (IN) Stream selection
- * @param   pAU: (IN/OUT) Acces Unit Structure
- * @return  M4NO_ERROR          there is no error
- * @return  M4ERR_PARAMETER     at least one parameter is NULL
- * @return  M4ERR_ALLOC         there is no more memory available
- * @return  M4ERR_STATE         this function cannot be called now
- * @return  M4M4WAR_NO_DATA_YET there is no enough data in the file to provide a new access unit.
- * @return  M4WAR_END_OF_STREAM There is no more access unit in the stream,
- *                              or the sample number is bigger the maximum one.
- ************************************************************************
- */
-M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
-{
-    M4PCMR_Context *c = (M4PCMR_Context *)context;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt32 size_read;
-
-    /* Check parameters */
-    if((M4OSA_NULL == context) || (M4OSA_NULL == pAU))
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    /* Check Reader's state */
-    if(c->m_state != M4PCMR_kReading && c->m_microState != M4PCMR_kReading)
-    {
-        return M4ERR_STATE;
-    }
-
-    /* Allocates AU dataAdress */
-    pAU->dataAddress = c->m_pAuBuffer;
-    size_read        = c->m_blockSize;
-
-    if((c->m_offset + size_read) >= c->m_decoderConfig.DataLength)
-    {
-        size_read = c->m_decoderConfig.DataLength - c->m_offset;
-    }
-
-    /* Read data in file, and copy it to AU Structure */
-    err = c->m_pFileReadFunc->readData(c->m_fileContext, (M4OSA_MemAddr8)pAU->dataAddress,
-         (M4OSA_UInt32 *)&size_read);
-    if(M4NO_ERROR != err)
-    {
-        return err;
-    }
-
-    /* Calculates the new m_offset, used to determine whether we're at end of reading or not */
-    c->m_offset = c->m_offset + size_read;
-
-    /* Fill others parameters of AU structure */
-    pAU->CTS =
-         (M4OSA_Time)(((M4OSA_Double)c->m_offset/(M4OSA_Double)c->m_decoderConfig.AvgBytesPerSec)\
-            *1000);
-    pAU->DTS = pAU->CTS;
-
-    pAU->attribute  = 0;
-    pAU->frag       = M4OSA_NULL;
-    pAU->nbFrag     = 0;
-    pAU->stream     = M4OSA_NULL;
-    pAU->size       = size_read;
-
-    /* Change states */
-    c->m_state = M4PCMR_kReading; /* Not changed ... */
-    c->m_microState = M4PCMR_kReading_nextAU; /* AU is read and can be deleted */
-
-    /* Check if there is another AU to read */
-    /* ie: if decoded nb of bytes = nb of bytes to decode,
-         it means there is no more AU to decode */
-    if(c->m_offset >= c->m_decoderConfig.DataLength)
-    {
-        return M4WAR_NO_MORE_AU;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
- * @brief   This function frees the AU provided in parameter
- * @note    This function :
- *          - Verifies that the current reader's state allows to free an AU
- *          - Free dataAddress field of AU structure
- *          - Change state of the reader in 'Reading' (not useful...)
- *          - Change Micro state 'Reading' in M4PCMR_kReading (another AU can be read)
- * @param   context: (IN/OUT) PCM Reader context
- * @param   streamID: (IN) Stream selection
- * @param   pAU: (IN) Acces Unit Structure
- * @return  M4NO_ERROR  there is no error
- * @return  M4ERR_PARAMETER at least one parameter is NULL
- * @return  M4ERR_STATE this function cannot be called now
- ************************************************************************
- */
-M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
-{
-    M4PCMR_Context *c = (M4PCMR_Context *)context;
-
-    /* Check parameters */
-    if((M4OSA_NULL == context ) || (M4OSA_NULL == pAU))
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    /* Check Reader's state */
-    if(c->m_state != M4PCMR_kReading && c->m_microState != M4PCMR_kReading_nextAU)
-    {
-        return M4ERR_STATE;
-    }
-
-    pAU->dataAddress = M4OSA_NULL;
-
-    /* Change states */
-    c->m_state = M4PCMR_kReading; /* Not changed ... */
-    c->m_microState = M4PCMR_kReading; /* AU is deleted, another AU can be read */
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID,
-                         M4OSA_Time time, M4SYS_seekAccessMode seekAccessMode,
-                         M4OSA_Time* pObtainCTS[])
- * @brief   This function seeks into the PCM file at the provided time
- * @note    This function :
- *          - Verifies that the current reader's state allows to seek
- *          - Determines from provided time m_offset to seek in file
- *          - If m_offset is correct, seek in file
- *          - Update new m_offset in PCM reader context
- * @param   context: (IN/OUT) PCM Reader context
- * @param   pStreamID: (IN) Stream selection (not used, only 1 stream)
- * @param   time: (IN) Targeted time
- * @param   seekMode: (IN) Selects the seek access mode
- * @param   pObtainCTS[]: (OUT) Returned Time (not used)
- * @return  M4NO_ERROR              there is no error
- * @return  M4ERR_PARAMETER         at least one parameter is NULL
- * @return  M4ERR_ALLOC             there is no more memory available
- * @return  M4ERR_STATE             this function cannot be called now
- * @return  M4WAR_INVALID_TIME      Specified time is not reachable
- * @param   M4ERR_NOT_IMPLEMENTED   This seek mode is not implemented yet
- ************************************************************************
- */
-M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
-                      M4SYS_SeekAccessMode seekAccessMode, M4OSA_Time* pObtainCTS)
-{
-    M4PCMR_Context *c = (M4PCMR_Context *)context;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt32 offset;
-    M4OSA_UInt32 alignment;
-    M4OSA_UInt32 size_read;
-
-    /* Check parameters */
-    if((M4OSA_NULL == context) || (M4OSA_NULL == pStreamID))
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    /* Check Reader's state */
-    if(c->m_state != M4PCMR_kOpening_streamRetrieved && c->m_state != M4PCMR_kReading)
-    {
-        return M4ERR_STATE;
-    }
-
-    switch(seekAccessMode)
-    {
-        case M4SYS_kBeginning:
-            /* Determine m_offset from time*/
-            offset =
-                (M4OSA_UInt32)(time * ((M4OSA_Double)(c->m_decoderConfig.AvgBytesPerSec) / 1000));
-            /** check the alignment on sample boundary */
-            alignment = c->m_decoderConfig.nbChannels*c->m_decoderConfig.BitsPerSample/8;
-            if (offset%alignment != 0)
-            {
-                offset -= offset%alignment;
-            }
-            /*add the header offset*/
-            offset += c->m_dataStartOffset;
-            /* If m_offset is over file size -> Invalid time */
-            if (offset > (c->m_dataStartOffset + c->m_decoderConfig.DataLength))
-            {
-                return M4WAR_INVALID_TIME;
-            }
-            else
-            {
-                /* Seek file */
-                size_read = offset;
-                err = c->m_pFileReadFunc->seek(c->m_fileContext, M4OSA_kFileSeekBeginning,
-                    (M4OSA_FilePosition *) &size_read);
-                if(M4NO_ERROR != err)
-                {
-                    return err;
-                }
-                /* Update m_offset in M4PCMR_context */
-                c->m_offset = offset - c->m_dataStartOffset;
-            }
-            break;
-
-        default:
-            return M4ERR_NOT_IMPLEMENTED;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context)
- * @brief   This function closes PCM file, and frees context
- * @note    This function :
- *          - Verifies that the current reader's state allows close the PCM file
- *          - Closes the file
- *          - Free structures
- * @param   context: (IN/OUT) PCM Reader context
- * @return  M4NO_ERROR              there is no error
- * @return  M4ERR_PARAMETER         at least one parameter is NULL
- * @return  M4ERR_STATE             this function cannot be called now
- ************************************************************************
- */
-M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context)
-{
-    M4PCMR_Context *c = (M4PCMR_Context *)context;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    /* Check parameters */
-    if(M4OSA_NULL == context)
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    if(c->m_pDecoderSpecInfo != M4OSA_NULL)
-    {
-        free(c->m_pDecoderSpecInfo);
-    }
-
-    /* Check Reader's state */
-    if(c->m_state != M4PCMR_kReading)
-    {
-        return M4ERR_STATE;
-    }
-    else if(c->m_microState == M4PCMR_kReading_nextAU)
-    {
-        return M4ERR_STATE;
-    }
-
-    if (M4OSA_NULL != c->m_pAuBuffer)
-    {
-        free(c->m_pAuBuffer);
-    }
-
-    /* Close the file */
-    if (M4OSA_NULL != c->m_pFileReadFunc)
-    {
-        err = c->m_pFileReadFunc->closeRead(c->m_fileContext);
-    }
-
-    /* Free internal context */
-    if (M4OSA_NULL != c)
-    {
-        free(c);
-    }
-
-    return err;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
- *                                M4OSA_DataOption* pValue)
- * @brief   This function get option of the PCM Reader
- * @note    This function :
- *          - Verifies that the current reader's state allows to get an option
- *          - Return corresponding option value
- * @param   context: (IN/OUT) PCM Reader context
- * @param   optionID: (IN) ID of the option to get
- * @param   pValue: (OUT) Variable where the option value is returned
- * @return  M4NO_ERROR              there is no error.
- * @return  M4ERR_PARAMETER         at least one parameter is NULL.
- * @return  M4ERR_BAD_OPTION_ID     the optionID is not a valid one.
- * @return  M4ERR_STATE             this option is not available now.
- * @return  M4ERR_NOT_IMPLEMENTED   this option is not implemented
- ************************************************************************
- */
-M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
-                             M4OSA_DataOption* pValue)
-{
-    M4PCMR_Context *c =(M4PCMR_Context *)context;
-
-    /* Check parameters */
-    if(M4OSA_NULL == context)
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    /* Check reader's state */
-    if((c->m_state != M4PCMR_kOpening) && (c->m_state != M4PCMR_kOpening_streamRetrieved)\
-         && (c->m_state != M4PCMR_kReading))
-    {
-        return M4ERR_STATE;
-    }
-
-    /* Depend of the OptionID, the value to return is different */
-    switch(optionID)
-    {
-        case M4PCMR_kPCMblockSize:
-            *pValue = &c->m_blockSize;
-            break;
-
-        default:
-            return M4ERR_BAD_OPTION_ID;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID,
- *                                 M4OSA_DataOption Value)
- * @brief   This function set option of the PCM Reader
- * @note    This function :
- *          - Verifies that the current reader's state allows to set an option
- *          - Set corresponding option value
- * @param   context: (IN/OUT) PCM Reader context
- * @param   optionID: (IN) ID of the option to get
- * @param   Value: (IN) Variable where the option value is stored
- * @return  M4NO_ERROR              there is no error.
- * @return  M4ERR_PARAMETER         at least one parameter is NULL.
- * @return  M4ERR_BAD_OPTION_ID     the optionID is not a valid one.
- * @return  M4ERR_STATE             this option is not available now.
- * @return  M4ERR_NOT_IMPLEMENTED   this option is not implemented
- ************************************************************************
- */
-M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID, M4OSA_DataOption Value)
-{
-    M4PCMR_Context *c =(M4PCMR_Context *)context;
-
-    /* Check parameters */
-    if(context == M4OSA_NULL)
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    /* Check reader's state */
-    if((c->m_state != M4PCMR_kOpening) && (c->m_state != M4PCMR_kOpening_streamRetrieved)\
-         && (c->m_state != M4PCMR_kReading))
-    {
-        return M4ERR_STATE;
-    }
-
-    /* Depend of the OptionID, the value to set is different */
-    switch(optionID)
-    {
-        case M4PCMR_kPCMblockSize:
-            c->m_blockSize = (M4OSA_UInt32)(uintptr_t)Value;
-            break;
-
-        default:
-            return M4ERR_BAD_OPTION_ID;
-    }
-
-    return M4NO_ERROR;
-}
-
-/*********************************************************/
-M4OSA_ERR M4PCMR_getVersion (M4_VersionInfo *pVersion)
-/*********************************************************/
-{
-    M4OSA_TRACE1_1("M4PCMR_getVersion called with pVersion: 0x%x", pVersion);
-    M4OSA_DEBUG_IF1(((M4OSA_UInt32) pVersion == 0),M4ERR_PARAMETER,
-         "pVersion is NULL in M4PCMR_getVersion");
-
-    pVersion->m_major = M4PCMR_VERSION_MAJOR;
-    pVersion->m_minor = M4PCMR_VERSION_MINOR;
-    pVersion->m_revision = M4PCMR_VERSION_REVISION;
-
-    return M4NO_ERROR;
-}
diff --git a/libvideoeditor/vss/src/M4PTO3GPP_API.c b/libvideoeditor/vss/src/M4PTO3GPP_API.c
deleted file mode 100755
index 042ffb7..0000000
--- a/libvideoeditor/vss/src/M4PTO3GPP_API.c
+++ /dev/null
@@ -1,1928 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4PTO3GPP_API.c
- * @brief   Picture to 3gpp Service implementation.
- * @note
- ******************************************************************************
-*/
-
-/*16 bytes signature to be written in the generated 3gp files */
-#define M4PTO3GPP_SIGNATURE     "NXP-SW : PTO3GPP"
-
-/****************/
-/*** Includes ***/
-/****************/
-
-/**
- *  Our header */
-#include "M4PTO3GPP_InternalTypes.h"
-#include "M4PTO3GPP_API.h"
-
-/**
- *  Our errors */
-#include "M4PTO3GPP_ErrorCodes.h"
-
-#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
-#include "VideoEditorVideoEncoder.h"
-#endif
-
-
-/**
- *  OSAL headers */
-#include "M4OSA_Memory.h"       /* OSAL memory management */
-#include "M4OSA_Debug.h"        /* OSAL debug management */
-
-
-/************************/
-/*** Various Magicals ***/
-/************************/
-
-#define M4PTO3GPP_WRITER_AUDIO_STREAM_ID                1
-#define M4PTO3GPP_WRITER_VIDEO_STREAM_ID                2
-#define M4PTO3GPP_QUANTIZER_STEP                        4       /**< Quantizer step */
-#define M4PTO3GPP_WRITER_AUDIO_PROFILE_LEVEL            0xFF    /**< No specific profile and
-                                                                     level */
-#define M4PTO3GPP_WRITER_AUDIO_AMR_TIME_SCALE           8000    /**< AMR */
-#define M4PTO3GPP_BITRATE_REGULATION_CTS_PERIOD_IN_MS   500     /**< MAGICAL */
-#define M4PTO3GPP_MARGE_OF_FILE_SIZE                    25000   /**< MAGICAL */
-/**
- ******************************************************************************
- * define   AMR 12.2 kbps silence frame
- ******************************************************************************
-*/
-#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE     32
-#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_DURATION 20
-const M4OSA_UInt8 M4PTO3GPP_AMR_AU_SILENCE_122_FRAME[M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE]=
-{ 0x3C, 0x91, 0x17, 0x16, 0xBE, 0x66, 0x78, 0x00, 0x00, 0x01, 0xE7, 0xAF,
-  0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-
-#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE     13
-#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_DURATION 20
-const M4OSA_UInt8 M4PTO3GPP_AMR_AU_SILENCE_048_FRAME[M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
-{ 0x04, 0xFF, 0x18, 0xC7, 0xF0, 0x0D, 0x04, 0x33, 0xFF, 0xE0, 0x00, 0x00, 0x00 };
-
-/***************************/
-/*** "Private" functions ***/
-/***************************/
-static M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC);
-
-/****************************/
-/*** "External" functions ***/
-/****************************/
-extern M4OSA_ERR M4WRITER_3GP_getInterfaces(M4WRITER_OutputFileType* pType,
-                                            M4WRITER_GlobalInterface** SrcGlobalInterface,
-                                            M4WRITER_DataInterface** SrcDataInterface);
-extern M4OSA_ERR M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
-                                            M4READER_GlobalInterface **pRdrGlobalInterface,
-                                            M4READER_DataInterface **pRdrDataInterface);
-extern M4OSA_ERR M4READER_3GP_getInterfaces(M4READER_MediaType *pMediaType,
-                                            M4READER_GlobalInterface **pRdrGlobalInterface,
-                                            M4READER_DataInterface **pRdrDataInterface);
-
-/****************************/
-/*** "Static" functions ***/
-/****************************/
-static M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(
-                                    M4WRITER_DataInterface* pWriterDataIntInterface,
-                                    M4WRITER_Context* pWriterContext,
-                                    M4SYS_AccessUnit* pWriterAudioAU,
-                                    M4OSA_Time mtIncCts);
-static M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(
-                                   M4WRITER_DataInterface* pWriterDataIntInterface,
-                                   M4WRITER_Context* pWriterContext,
-                                   M4SYS_AccessUnit* pWriterAudioAU,
-                                   M4OSA_Time mtIncCts);
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
- * @brief   Get the M4PTO3GPP version.
- * @note    Can be called anytime. Do not need any context.
- * @param   pVersionInfo        (OUT) Pointer to a version info structure
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
- ******************************************************************************
-*/
-
-/*********************************************************/
-M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo)
-/*********************************************************/
-{
-    M4OSA_TRACE3_1("M4PTO3GPP_GetVersion called with pVersionInfo=0x%x", pVersionInfo);
-
-    /**
-     *  Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL==pVersionInfo),M4ERR_PARAMETER,
-            "M4PTO3GPP_GetVersion: pVersionInfo is M4OSA_NULL");
-
-    pVersionInfo->m_major       = M4PTO3GPP_VERSION_MAJOR;
-    pVersionInfo->m_minor       = M4PTO3GPP_VERSION_MINOR;
-    pVersionInfo->m_revision    = M4PTO3GPP_VERSION_REVISION;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_Init(M4PTO3GPP_Context* pContext);
- * @brief   Initializes the M4PTO3GPP (allocates an execution context).
- * @note
- * @param   pContext            (OUT) Pointer on the M4PTO3GPP context to allocate
- * @param   pFileReadPtrFct     (IN) Pointer to OSAL file reader functions
- * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (If Debug Level >= 2)
- * @return  M4ERR_ALLOC:        There is no more available memory
- ******************************************************************************
-*/
-/*********************************************************/
-M4OSA_ERR M4PTO3GPP_Init(   M4PTO3GPP_Context* pContext,
-                            M4OSA_FileReadPointer* pFileReadPtrFct,
-                            M4OSA_FileWriterPointer* pFileWritePtrFct)
-/*********************************************************/
-{
-    M4PTO3GPP_InternalContext *pC;
-    M4OSA_UInt32 i;
-
-    M4OSA_TRACE3_1("M4PTO3GPP_Init called with pContext=0x%x", pContext);
-
-    /**
-     *  Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-            "M4PTO3GPP_Init: pContext is M4OSA_NULL");
-
-    /**
-     *  Allocate the M4PTO3GPP context and return it to the user */
-    pC = (M4PTO3GPP_InternalContext*)M4OSA_32bitAlignedMalloc(sizeof(M4PTO3GPP_InternalContext), M4PTO3GPP,
-        (M4OSA_Char *)"M4PTO3GPP_InternalContext");
-    *pContext = pC;
-    if (M4OSA_NULL == pC)
-    {
-        M4OSA_TRACE1_0("M4PTO3GPP_Step(): unable to allocate M4PTO3GPP_InternalContext,\
-                       returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-    /**
-     *  Init the context. All pointers must be initialized to M4OSA_NULL because CleanUp()
-        can be called just after Init(). */
-    pC->m_State = M4PTO3GPP_kState_CREATED;
-    pC->m_VideoState = M4PTO3GPP_kStreamState_NOSTREAM;
-    pC->m_AudioState = M4PTO3GPP_kStreamState_NOSTREAM;
-
-    /**
-     *  Reader stuff */
-    pC->m_pReaderAudioAU        = M4OSA_NULL;
-    pC->m_pReaderAudioStream    = M4OSA_NULL;
-
-    /**
-     *  Writer stuff */
-    pC->m_pEncoderHeader        = M4OSA_NULL;
-    pC->m_pWriterVideoStream    = M4OSA_NULL;
-    pC->m_pWriterAudioStream    = M4OSA_NULL;
-    pC->m_pWriterVideoStreamInfo= M4OSA_NULL;
-    pC->m_pWriterAudioStreamInfo= M4OSA_NULL;
-
-    /**
-     *  Contexts of the used modules  */
-    pC->m_pAudioReaderContext    = M4OSA_NULL;
-    pC->m_p3gpWriterContext  = M4OSA_NULL;
-    pC->m_pMp4EncoderContext = M4OSA_NULL;
-    pC->m_eEncoderState = M4PTO3GPP_kNoEncoder;
-
-    /**
-     *  Interfaces of the used modules */
-    pC->m_pReaderGlobInt    = M4OSA_NULL;
-    pC->m_pReaderDataInt    = M4OSA_NULL;
-    pC->m_pWriterGlobInt    = M4OSA_NULL;
-    pC->m_pWriterDataInt    = M4OSA_NULL;
-    pC->m_pEncoderInt       = M4OSA_NULL;
-    pC->m_pEncoderExternalAPI = M4OSA_NULL;
-    pC->m_pEncoderUserData = M4OSA_NULL;
-
-    /**
-     * Fill the OSAL file function set */
-    pC->pOsalFileRead = pFileReadPtrFct;
-    pC->pOsalFileWrite = pFileWritePtrFct;
-
-    /**
-     *  Video rate control stuff */
-    pC->m_mtCts             = 0.0F;
-    pC->m_mtNextCts         = 0.0F;
-    pC->m_mtAudioCts        = 0.0F;
-    pC->m_AudioOffSet       = 0.0F;
-    pC->m_dLastVideoRegulCts= 0.0F;
-    pC->m_PrevAudioCts      = 0.0F;
-    pC->m_DeltaAudioCts     = 0.0F;
-
-    pC->m_MaxFileSize       = 0;
-    pC->m_CurrentFileSize   = 0;
-
-    pC->m_IsLastPicture         = M4OSA_FALSE;
-    pC->m_bAudioPaddingSilence  = M4OSA_FALSE;
-    pC->m_bLastInternalCallBack = M4OSA_FALSE;
-    pC->m_NbCurrentFrame        = 0;
-
-    pC->pSavedPlane = M4OSA_NULL;
-    pC->uiSavedDuration = 0;
-
-    M4OSA_TRACE3_0("M4PTO3GPP_Init(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams);
- * @brief   Set the M4PTO3GPP input and output files.
- * @note    It opens the input file, but the output file may not be created yet.
- * @param   pContext            (IN) M4PTO3GPP context
- * @param   pParams             (IN) Pointer to the parameters for the PTO3GPP.
- * @note    The pointed structure can be de-allocated after this function returns because
- *          it is internally copied by the PTO3GPP
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return  M4ERR_STATE:        M4PTO3GPP is not in an appropriate state for this function to be
-                                 called
- * @return  M4ERR_ALLOC:        There is no more available memory
- * @return  ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263 The output video frame
- *                              size parameter is incompatible with H263 encoding
- * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT       The output video format
-                                                            parameter is undefined
- * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE      The output video bit-rate parameter
-                                                            is undefined
- * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE   The output video frame size parameter
-                                                            is undefined
- * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE          The output file size parameter
-                                                            is undefined
- * @return  ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING             The output audio padding parameter
-                                                            is undefined
- * @return  ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE    The input audio file contains
-                                                            a track format not handled by PTO3GPP
- ******************************************************************************
-*/
-/*********************************************************/
-M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams)
-/*********************************************************/
-{
-    M4PTO3GPP_InternalContext   *pC = (M4PTO3GPP_InternalContext*)(pContext);
-    M4OSA_ERR                   err = M4NO_ERROR;
-
-    M4READER_MediaFamily    mediaFamily;
-    M4_StreamHandler*       pStreamHandler;
-    M4READER_MediaType      readerMediaType;
-
-    M4OSA_TRACE2_2("M4PTO3GPP_Open called with pContext=0x%x, pParams=0x%x", pContext, pParams);
-
-    /**
-     *  Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER, \
-                    "M4PTO3GPP_Open: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams),  M4ERR_PARAMETER, \
-                    "M4PTO3GPP_Open: pParams is M4OSA_NULL");
-
-    /**
-     *  Check parameters correctness */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pPictureCallbackFct),
-               M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pPictureCallbackFct is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pPictureCallbackCtxt),
-                M4ERR_PARAMETER,
-                 "M4PTO3GPP_Open: pC->m_Params.pPictureCallbackCtxt is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pOutput3gppFile),
-                M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pOutput3gppFile is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pTemporaryFile),
-                M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pTemporaryFile is M4OSA_NULL");
-
-    /**
-     * Video Format */
-    if( (M4VIDEOEDITING_kH263 != pParams->OutputVideoFormat) &&
-        (M4VIDEOEDITING_kMPEG4 != pParams->OutputVideoFormat) &&
-        (M4VIDEOEDITING_kH264 != pParams->OutputVideoFormat)) {
-        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video format");
-        return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
-     }
-
-     /**
-     * Video Bitrate */
-    if(!((M4VIDEOEDITING_k16_KBPS       == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k24_KBPS       == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k32_KBPS       == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k48_KBPS       == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k64_KBPS       == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k96_KBPS       == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k128_KBPS      == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k192_KBPS      == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k256_KBPS      == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k288_KBPS      == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k384_KBPS      == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k512_KBPS      == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k800_KBPS      == pParams->OutputVideoBitrate) ||
-         /*+ New Encoder bitrates */
-         (M4VIDEOEDITING_k2_MBPS        == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k5_MBPS        == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_k8_MBPS        == pParams->OutputVideoBitrate) ||
-         (M4VIDEOEDITING_kVARIABLE_KBPS == pParams->OutputVideoBitrate))) {
-        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video bitrate");
-        return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
-    }
-
-    /**
-     * Video frame size */
-    if (!((M4VIDEOEDITING_kSQCIF == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_kQQVGA == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_kQCIF == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_kQVGA == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_kCIF  == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_kVGA  == pParams->OutputVideoFrameSize) ||
-
-          (M4VIDEOEDITING_kNTSC == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_kWVGA == pParams->OutputVideoFrameSize) ||
-
-          (M4VIDEOEDITING_k640_360 == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_k854_480 == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_k1280_720 == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_k1080_720 == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_k960_720 == pParams->OutputVideoFrameSize) ||
-          (M4VIDEOEDITING_k1920_1080 == pParams->OutputVideoFrameSize))) {
-        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video frame size");
-        return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
-    }
-
-    /**
-     * Maximum size of the output 3GPP file */
-    if (!((M4PTO3GPP_k50_KB     == pParams->OutputFileMaxSize) ||
-          (M4PTO3GPP_k75_KB     == pParams->OutputFileMaxSize) ||
-          (M4PTO3GPP_k100_KB    == pParams->OutputFileMaxSize) ||
-          (M4PTO3GPP_k150_KB    == pParams->OutputFileMaxSize) ||
-          (M4PTO3GPP_k200_KB    == pParams->OutputFileMaxSize) ||
-          (M4PTO3GPP_k300_KB    == pParams->OutputFileMaxSize) ||
-          (M4PTO3GPP_k400_KB    == pParams->OutputFileMaxSize) ||
-          (M4PTO3GPP_k500_KB    == pParams->OutputFileMaxSize) ||
-          (M4PTO3GPP_kUNLIMITED == pParams->OutputFileMaxSize))) {
-        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output 3GPP file size");
-        return ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE;
-    }
-
-    /* Audio padding */
-    if (M4OSA_NULL != pParams->pInputAudioTrackFile) {
-        if ((!( (M4PTO3GPP_kAudioPaddingMode_None   == pParams->AudioPaddingMode) ||
-                (M4PTO3GPP_kAudioPaddingMode_Silence== pParams->AudioPaddingMode) ||
-                (M4PTO3GPP_kAudioPaddingMode_Loop   == pParams->AudioPaddingMode)))) {
-            M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined audio padding");
-            return ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING;
-        }
-    }
-
-    /**< Size check for H263 (only valid sizes are CIF, QCIF and SQCIF) */
-    if ((M4VIDEOEDITING_kH263 == pParams->OutputVideoFormat) &&
-        (M4VIDEOEDITING_kSQCIF != pParams->OutputVideoFrameSize) &&
-        (M4VIDEOEDITING_kQCIF != pParams->OutputVideoFrameSize) &&
-        (M4VIDEOEDITING_kCIF != pParams->OutputVideoFrameSize)) {
-        M4OSA_TRACE1_0("M4PTO3GPP_Open():\
-             returning ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263");
-        return ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263;
-    }
-
-    /**
-     *  Check state automaton */
-    if (M4PTO3GPP_kState_CREATED != pC->m_State) {
-        M4OSA_TRACE1_1("M4PTO3GPP_Open(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
-        return M4ERR_STATE;
-    }
-
-    /**
-     * Copy the M4PTO3GPP_Params structure */
-    memcpy((void *)(&pC->m_Params),
-                (void *)pParams, sizeof(M4PTO3GPP_Params));
-    M4OSA_TRACE1_1("M4PTO3GPP_Open: outputVideoBitrate = %d", pC->m_Params.OutputVideoBitrate);
-
-    /***********************************/
-    /* Open input file with the reader */
-    /***********************************/
-    if (M4OSA_NULL != pC->m_Params.pInputAudioTrackFile) {
-        /**
-         * Get the reader interface according to the input audio file type */
-        switch(pC->m_Params.AudioFileFormat)
-        {
-#ifdef M4VSS_SUPPORT_READER_AMR
-        case M4VIDEOEDITING_kFileType_AMR:
-        err = M4READER_AMR_getInterfaces( &readerMediaType, &pC->m_pReaderGlobInt,
-                &pC->m_pReaderDataInt);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Open(): M4READER_AMR_getInterfaces returns 0x%x", err);
-            return err;
-        }
-            break;
-#endif
-
-#ifdef AAC_SUPPORTED
-        case M4VIDEOEDITING_kFileType_3GPP:
-            err = M4READER_3GP_getInterfaces( &readerMediaType, &pC->m_pReaderGlobInt,
-                    &pC->m_pReaderDataInt);
-            if (M4NO_ERROR != err)
-            {
-                M4OSA_TRACE1_1("M4PTO3GPP_Open(): M4READER_3GP_getInterfaces returns 0x%x", err);
-                return err;
-            }
-            break;
-#endif
-
-        default:
-            return ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE;
-        }
-
-        /**
-         *  Initializes the reader shell */
-        err = pC->m_pReaderGlobInt->m_pFctCreate(&pC->m_pAudioReaderContext);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctCreate returns 0x%x", err);
-            return err;
-        }
-
-        pC->m_pReaderDataInt->m_readerContext = pC->m_pAudioReaderContext;
-        /**< Link the reader interface to the reader context */
-
-        /**
-         *  Set the reader shell file access functions */
-        err = pC->m_pReaderGlobInt->m_pFctSetOption(pC->m_pAudioReaderContext,
-            M4READER_kOptionID_SetOsaFileReaderFctsPtr,  (M4OSA_DataOption)pC->pOsalFileRead);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctSetOption returns 0x%x", err);
-            return err;
-        }
-
-        /**
-         *  Open the input audio file */
-        err = pC->m_pReaderGlobInt->m_pFctOpen(pC->m_pAudioReaderContext,
-            pC->m_Params.pInputAudioTrackFile);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctOpen returns 0x%x", err);
-            pC->m_pReaderGlobInt->m_pFctDestroy(pC->m_pAudioReaderContext);
-            pC->m_pAudioReaderContext = M4OSA_NULL;
-            return err;
-        }
-
-        /**
-         *  Get the audio streams from the input file */
-        err = M4NO_ERROR;
-        while (M4NO_ERROR == err)
-        {
-            err = pC->m_pReaderGlobInt->m_pFctGetNextStream(pC->m_pAudioReaderContext,
-                &mediaFamily, &pStreamHandler);
-
-            if((err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE)) ||
-                   (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)))
-            {
-                err = M4NO_ERROR;
-                continue;
-            }
-
-            if (M4NO_ERROR == err) /**< One stream found */
-            {
-                /**< Found an audio stream */
-                if ((M4READER_kMediaFamilyAudio == mediaFamily)
-                    && (M4OSA_NULL == pC->m_pReaderAudioStream))
-                {
-                    pC->m_pReaderAudioStream = (M4_AudioStreamHandler*)pStreamHandler;
-                    /**< Keep pointer to the audio stream */
-                    M4OSA_TRACE3_0("M4PTO3GPP_Open(): Found an audio stream in input");
-                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
-
-                    /**
-                     *  Allocate audio AU used for read operations */
-                    pC->m_pReaderAudioAU = (M4_AccessUnit*)M4OSA_32bitAlignedMalloc(sizeof(M4_AccessUnit),
-                        M4PTO3GPP,(M4OSA_Char *)"pReaderAudioAU");
-                    if (M4OSA_NULL == pC->m_pReaderAudioAU)
-                    {
-                        M4OSA_TRACE1_0("M4PTO3GPP_Open(): unable to allocate pReaderAudioAU, \
-                                       returning M4ERR_ALLOC");
-                        return M4ERR_ALLOC;
-                    }
-
-                    /**
-                     *  Initializes an access Unit */
-                    err = pC->m_pReaderGlobInt->m_pFctFillAuStruct(pC->m_pAudioReaderContext,
-                            pStreamHandler, pC->m_pReaderAudioAU);
-                    if (M4NO_ERROR != err)
-                    {
-                        M4OSA_TRACE1_1("M4PTO3GPP_Open():\
-                         pReaderGlobInt->m_pFctFillAuStruct(audio)returns 0x%x", err);
-                        return err;
-                    }
-                }
-                else
-                {
-                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
-                }
-            }
-            else if (M4WAR_NO_MORE_STREAM != err) /**< Unexpected error code */
-            {
-                M4OSA_TRACE1_1("M4PTO3GPP_Open():\
-                     pReaderGlobInt->m_pFctGetNextStream returns 0x%x",
-                    err);
-                return err;
-            }
-        } /* while*/
-    } /*if (M4OSA_NULL != pC->m_Params.pInputAudioTrackFile)*/
-
-    pC->m_VideoState = M4PTO3GPP_kStreamState_STARTED;
-
-    /**
-     * Init the audio stream */
-    if (M4OSA_NULL != pC->m_pReaderAudioStream)
-    {
-        pC->m_AudioState = M4PTO3GPP_kStreamState_STARTED;
-        err = pC->m_pReaderGlobInt->m_pFctReset(pC->m_pAudioReaderContext,
-            (M4_StreamHandler*)pC->m_pReaderAudioStream);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderDataInt->m_pFctReset(audio returns 0x%x",
-                 err);
-            return err;
-        }
-    }
-
-    /**
-     *  Update state automaton */
-    pC->m_State = M4PTO3GPP_kState_OPENED;
-
-    /**
-     * Get the max File size */
-    switch(pC->m_Params.OutputFileMaxSize)
-    {
-    case M4PTO3GPP_k50_KB:  pC->m_MaxFileSize = 50000;  break;
-    case M4PTO3GPP_k75_KB:  pC->m_MaxFileSize = 75000;  break;
-    case M4PTO3GPP_k100_KB: pC->m_MaxFileSize = 100000; break;
-    case M4PTO3GPP_k150_KB: pC->m_MaxFileSize = 150000; break;
-    case M4PTO3GPP_k200_KB: pC->m_MaxFileSize = 200000; break;
-    case M4PTO3GPP_k300_KB: pC->m_MaxFileSize = 300000; break;
-    case M4PTO3GPP_k400_KB: pC->m_MaxFileSize = 400000; break;
-    case M4PTO3GPP_k500_KB: pC->m_MaxFileSize = 500000; break;
-    case M4PTO3GPP_kUNLIMITED:
-    default:                                            break;
-    }
-
-    M4OSA_TRACE3_0("M4PTO3GPP_Open(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext);
- * @brief   Perform one step of trancoding.
- * @note
- * @param   pContext            (IN) M4PTO3GPP context
- * @return  M4NO_ERROR          No error
- * @return  M4ERR_PARAMETER     pContext is M4OSA_NULL
- * @return  M4ERR_STATE:    M4PTO3GPP is not in an appropriate state for this function
- *                           to be called
- * @return  M4PTO3GPP_WAR_END_OF_PROCESSING Encoding completed
- ******************************************************************************
-*/
-/*********************************************************/
-M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext)
-/*********************************************************/
-{
-    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt32 l_uiAudioStepCount = 0;
-    M4OSA_Int32  JumpToTime = 0;
-    M4OSA_Time  mtIncCts;
-
-    /**
-     *  Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL==pContext), M4ERR_PARAMETER,
-                "M4PTO3GPP_Step: pContext is M4OSA_NULL");
-
-    /**
-     *  Check state automaton */
-    if ( !((M4PTO3GPP_kState_OPENED == pC->m_State) || (M4PTO3GPP_kState_READY == pC->m_State)) )
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Step(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
-        return M4ERR_STATE;
-    }
-
-    /******************************************************************/
-    /**
-     *  In case this is the first step, we prepare the decoder, the encoder and the writer */
-    if (M4PTO3GPP_kState_OPENED == pC->m_State)
-    {
-        M4OSA_TRACE2_0("M4PTO3GPP_Step(): This is the first step, \
-                       calling M4PTO3GPP_Ready4Processing");
-
-        /**
-         *  Prepare the reader, the decoder, the encoder, the writer... */
-        err = M4PTO3GPP_Ready4Processing(pC);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Step(): M4PTO3GPP_Ready4Processing() returns 0x%x", err);
-            return err;
-        }
-
-        /**
-         *  Update state automaton */
-        pC->m_State = M4PTO3GPP_kState_READY;
-
-        M4OSA_TRACE3_0("M4PTO3GPP_Step(): returning M4NO_ERROR (a)");
-        return M4NO_ERROR; /**< we only do that in the first step, \
-                           first REAL step will be the next one */
-    }
-
-
-    /*
-     * Check if we reached the targeted file size.
-     * We do that before the encoding, because the core encoder has to know if this is
-     * the last frame to encode */
-    err = pC->m_pWriterGlobInt->pFctGetOption(pC->m_p3gpWriterContext,
-        M4WRITER_kFileSizeAudioEstimated, (M4OSA_DataOption) &pC->m_CurrentFileSize);
-    if ((0 != pC->m_MaxFileSize) &&
-        /**< Add a marge to the file size in order to never exceed the max file size */
-       ((pC->m_CurrentFileSize + M4PTO3GPP_MARGE_OF_FILE_SIZE) >= pC->m_MaxFileSize))
-    {
-        pC->m_IsLastPicture = M4OSA_TRUE;
-    }
-
-    /******************************************************************
-    *  At that point we are in M4PTO3GPP_kState_READY state
-    *  We perform one step of video encoding
-    ******************************************************************/
-
-    /************* VIDEO ENCODING ***************/
-    if (M4PTO3GPP_kStreamState_STARTED == pC->m_VideoState) /**<If the video encoding is going on*/
-    {   /**
-         * Call the encoder  */
-        pC->m_NbCurrentFrame++;
-
-        /* Check if it is the last frame the to encode */
-        if((pC->m_Params.NbVideoFrames > 0) \
-            && (pC->m_NbCurrentFrame >= pC->m_Params.NbVideoFrames))
-        {
-            pC->m_IsLastPicture = M4OSA_TRUE;
-        }
-
-        M4OSA_TRACE2_2("M4PTO3GPP_Step(): Calling pEncoderInt->pFctEncode with videoCts = %.2f\
-                       nb = %lu", pC->m_mtCts, pC->m_NbCurrentFrame);
-
-        err = pC->m_pEncoderInt->pFctEncode(pC->m_pMp4EncoderContext, M4OSA_NULL,
-            /**< The input plane is null because the input Picture will be obtained by the\
-            VPP filter from the context */
-                                        pC->m_mtCts,
-                                        (pC->m_IsLastPicture ?
-                                        M4ENCODER_kLastFrame : M4ENCODER_kNormalFrame) );
-        /**< Last param set to M4OSA_TRUE signals that this is the last frame to be encoded,\
-        M4OSA_FALSE else */
-
-        M4OSA_TRACE3_2("M4PTO3GPP_Step(): pEncoderInt->pFctEncode returns 0x%x, vidFormat =0x%x",
-            err, pC->m_Params.OutputVideoFormat);
-        if((M4NO_ERROR == err) && (M4VIDEOEDITING_kH264 == pC->m_Params.OutputVideoFormat))
-        {
-            /* Check if last frame.*
-            *                  */
-            if(M4OSA_TRUE == pC->m_IsLastPicture)
-            {
-                M4OSA_TRACE3_0("M4PTO3GPP_Step(): Last picture");
-                pC->m_VideoState = M4PTO3GPP_kStreamState_FINISHED;
-            }
-
-        }
-
-        if (M4WAR_NO_MORE_AU == err) /**< The video encoding is finished */
-        {
-            M4OSA_TRACE3_0("M4PTO3GPP_Step(): pEncoderInt->pFctEncode returns M4WAR_NO_MORE_AU");
-            pC->m_VideoState = M4PTO3GPP_kStreamState_FINISHED;
-        }
-        else if (M4NO_ERROR != err)     /**< Unexpected error code */
-        {
-            if( (((M4OSA_UInt32)M4WAR_WRITER_STOP_REQ) == err) ||
-                    (((M4OSA_UInt32)M4ERR_ALLOC) == err) )
-            {
-                M4OSA_TRACE1_0("M4PTO3GPP_Step: returning ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR");
-                return ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR;
-            }
-            else
-            {
-                M4OSA_TRACE1_1("M4PTO3GPP_Step(): pEncoderInt->pFctEncode(last) (a) returns 0x%x",
-                    err);
-                return err;
-            }
-        }
-    } /**< End of video encoding */
-
-
-    /****** AUDIO TRANSCODING (read + null encoding + write) ******/
-    if (M4PTO3GPP_kStreamState_STARTED == pC->m_AudioState)
-    {
-        while ( (M4PTO3GPP_kStreamState_STARTED == pC->m_AudioState) &&
-                (pC->m_mtAudioCts < pC->m_mtNextCts))
-
-        {
-            l_uiAudioStepCount++;
-            if (M4OSA_FALSE == pC->m_bAudioPaddingSilence)
-            {
-                /**< Read the next audio AU in the input Audio file */
-                err = pC->m_pReaderDataInt->m_pFctGetNextAu(pC->m_pAudioReaderContext,
-                    (M4_StreamHandler*)pC->m_pReaderAudioStream, pC->m_pReaderAudioAU);
-                pC->m_mtAudioCts = pC->m_pReaderAudioAU->m_CTS + pC->m_AudioOffSet;
-
-                if (M4WAR_NO_MORE_AU == err)    /* The audio transcoding is finished */
-                {
-                    M4OSA_TRACE2_0("M4PTO3GPP_Step():\
-                                  pReaderDataInt->m_pFctGetNextAu(audio) returns \
-                                    M4WAR_NO_MORE_AU");
-                    switch(pC->m_Params.AudioPaddingMode)
-                    {
-                        case M4PTO3GPP_kAudioPaddingMode_None:
-
-                            pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
-                            break;
-
-                        case M4PTO3GPP_kAudioPaddingMode_Silence:
-
-                            if (M4DA_StreamTypeAudioAmrNarrowBand
-                                != pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
-                                /**< Do nothing if the input audio file format is not AMR */
-                            {
-                                pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
-                            }
-                            else
-                            {
-                                pC->m_bAudioPaddingSilence = M4OSA_TRUE;
-                            }
-                            break;
-
-                        case M4PTO3GPP_kAudioPaddingMode_Loop:
-
-                            /**< Jump to the beginning of the audio file */
-                            err = pC->m_pReaderGlobInt->m_pFctJump(pC->m_pAudioReaderContext,
-                                (M4_StreamHandler*)pC->m_pReaderAudioStream, &JumpToTime);
-
-                            if (M4NO_ERROR != err)
-                            {
-                                M4OSA_TRACE1_1("M4PTO3GPP_Step(): \
-                                              pReaderDataInt->m_pFctReset(audio returns 0x%x",
-                                               err);
-                                return err;
-                            }
-
-                            if (M4DA_StreamTypeAudioAmrNarrowBand
-                                == pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
-                            {
-                                pC->m_mtAudioCts += 20; /*< SEMC bug fixed at Lund */
-                                pC->m_AudioOffSet = pC->m_mtAudioCts;
-
-                                /**
-                                 * 'BZZZ' bug fix:
-                                 * add a silence frame */
-                                mtIncCts = (M4OSA_Time)((pC->m_mtAudioCts) *
-                                    (pC->m_pWriterAudioStream->timeScale / 1000.0));
-                                err = M4PTO3GPP_writeAmrSilence122Frame(pC->m_pWriterDataInt,
-                                    pC->m_p3gpWriterContext, &pC->m_WriterAudioAU, mtIncCts);
-
-                                if (M4NO_ERROR != err)
-                                {
-                                    M4OSA_TRACE1_1("M4PTO3GPP_Step(): \
-                                                   M4PTO3GPP_AddAmrSilenceSid returns 0x%x", err);
-                                    return err;
-                                }/**< Add => no audio cts increment...*/
-                            }
-                            else
-                            {
-                                pC->m_AudioOffSet = pC->m_mtAudioCts + pC->m_DeltaAudioCts;
-                            }
-                            break;
-                    } /* end of: switch */
-                }
-                else if (M4NO_ERROR != err)
-                {
-                    M4OSA_TRACE1_1("M4PTO3GPP_Step(): pReaderDataInt->m_pFctGetNextAu(Audio)\
-                                   returns 0x%x", err);
-                    return err;
-                }
-                else
-                {
-                    /**
-                     * Save the delta Cts (AAC only) */
-                    pC->m_DeltaAudioCts = pC->m_pReaderAudioAU->m_CTS - pC->m_PrevAudioCts;
-                    pC->m_PrevAudioCts  = pC->m_pReaderAudioAU->m_CTS;
-
-                    /**
-                     *  Prepare the writer AU */
-                    err = pC->m_pWriterDataInt->pStartAU(pC->m_p3gpWriterContext, 1,
-                        &pC->m_WriterAudioAU);
-                    if (M4NO_ERROR != err)
-                    {
-                        M4OSA_TRACE1_1("M4PTO3GPP_Step(): pWriterDataInt->pStartAU(Audio)\
-                                       returns 0x%x", err);
-                        return err;
-                    }
-
-                    /**
-                     *  Copy audio data from reader AU to writer AU */
-                    M4OSA_TRACE2_1("M4PTO3GPP_Step(): Copying audio AU: size=%d",
-                        pC->m_pReaderAudioAU->m_size);
-                    memcpy((void *)pC->m_WriterAudioAU.dataAddress,
-                        (void *)pC->m_pReaderAudioAU->m_dataAddress,
-                        pC->m_pReaderAudioAU->m_size);
-                    pC->m_WriterAudioAU.size = pC->m_pReaderAudioAU->m_size;
-
-                    /**
-                     *  Convert CTS unit from milliseconds to timescale */
-                    if (M4DA_StreamTypeAudioAmrNarrowBand
-                        != pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
-                    {
-                        pC->m_WriterAudioAU.CTS  = (M4OSA_Time)
-                            ((pC->m_AudioOffSet + pC->m_pReaderAudioAU->m_CTS)
-                            * pC->m_pWriterAudioStream->timeScale / 1000.0);
-                    }
-                    else
-                    {
-                        pC->m_WriterAudioAU.CTS = (M4OSA_Time)(pC->m_mtAudioCts *
-                            (pC->m_pWriterAudioStream->timeScale / 1000.0));
-                    }
-                    pC->m_WriterAudioAU.nbFrag = 0;
-                    M4OSA_TRACE2_1("M4PTO3GPP_Step(): audio AU: CTS=%d ms", pC->m_mtAudioCts
-                        /*pC->m_pReaderAudioAU->m_CTS*/);
-
-                    /**
-                     *  Write it to the output file */
-                    err = pC->m_pWriterDataInt->pProcessAU(pC->m_p3gpWriterContext, 1,
-                        &pC->m_WriterAudioAU);
-
-                    if (M4NO_ERROR != err)
-                    {
-                        M4OSA_TRACE1_1("M4PTO3GPP_Step(): pWriterDataInt->pProcessAU(Audio)\
-                                       returns 0x%x", err);
-                        return err;
-                    }
-                }
-            }
-            else /**< M4OSA_TRUE == pC->m_bAudioPaddingSilence */
-            {
-                if (M4DA_StreamTypeAudioAmrNarrowBand ==
-                    pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
-                {
-                    /**
-                     * Fill in audio au with silence */
-                    pC->m_mtAudioCts += 20;
-
-                    /**
-                     * Padd with silence */
-                    mtIncCts = (M4OSA_Time)(pC->m_mtAudioCts
-                        * (pC->m_pWriterAudioStream->timeScale / 1000.0));
-                    err = M4PTO3GPP_writeAmrSilence048Frame(pC->m_pWriterDataInt,
-                        pC->m_p3gpWriterContext, &pC->m_WriterAudioAU, mtIncCts);
-
-                    if (M4NO_ERROR != err)
-                    {
-                        M4OSA_TRACE1_1("M4PTO3GPP_Step(): M4PTO3GPP_AddAmrSilenceSid returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-                else /**< Do nothing if the input audio file format is not AMR */
-                {
-                    pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
-                }
-
-            }
-        } /**< while */
-    } /**< End of audio encoding */
-
-    pC->m_mtCts = pC->m_mtNextCts;
-
-    /**
-     *  The transcoding is finished when no stream is being encoded anymore */
-    if (M4PTO3GPP_kStreamState_FINISHED == pC->m_VideoState)
-    {
-        pC->m_State = M4PTO3GPP_kState_FINISHED;
-        M4OSA_TRACE2_0("M4PTO3GPP_Step(): transcoding finished, returning M4WAR_NO_MORE_AU");
-        return M4PTO3GPP_WAR_END_OF_PROCESSING;
-    }
-
-    M4OSA_TRACE3_0("M4PTO3GPP_Step(): returning M4NO_ERROR (b)");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext);
- * @brief   Finish the M4PTO3GPP transcoding.
- * @note    The output 3GPP file is ready to be played after this call
- * @param   pContext            (IN) M4PTO3GPP context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
- * @return  M4ERR_STATE:    M4PTO3GPP is not in an appropriate state for this function to be called
- ******************************************************************************
-*/
-/*********************************************************/
-M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext)
-/*********************************************************/
-{
-    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
-    M4OSA_ERR    osaErr = M4NO_ERROR;
-    M4OSA_UInt32 lastCTS;
-    M4ENCODER_Header* encHeader;
-    M4SYS_StreamIDmemAddr streamHeader;
-
-    M4OSA_TRACE3_1("M4PTO3GPP_Close called with pContext=0x%x", pContext);
-
-    /**
-     *  Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL==pContext), M4ERR_PARAMETER, "M4PTO3GPP_Close:\
-                                                             pContext is M4OSA_NULL");
-
-    /* Check state automaton */
-    if ((pC->m_State != M4PTO3GPP_kState_OPENED) &&
-        (pC->m_State != M4PTO3GPP_kState_READY) &&
-        (pC->m_State != M4PTO3GPP_kState_FINISHED))
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Close(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
-        return M4ERR_STATE;
-    }
-
-    /*************************************/
-    /******** Finish the encoding ********/
-    /*************************************/
-    if (M4PTO3GPP_kState_READY == pC->m_State)
-    {
-        pC->m_State = M4PTO3GPP_kState_FINISHED;
-    }
-
-    if (M4PTO3GPP_kEncoderRunning == pC->m_eEncoderState)
-    {
-        if (pC->m_pEncoderInt->pFctStop != M4OSA_NULL)
-        {
-            osaErr = pC->m_pEncoderInt->pFctStop(pC->m_pMp4EncoderContext);
-            if (M4NO_ERROR != osaErr)
-            {
-                M4OSA_TRACE1_1("M4PTO3GPP_close: m_pEncoderInt->pFctStop returns 0x%x", osaErr);
-                /* Well... how the heck do you handle a failed cleanup? */
-            }
-        }
-
-        pC->m_eEncoderState = M4PTO3GPP_kEncoderStopped;
-    }
-
-    /* Has the encoder actually been opened? Don't close it if that's not the case. */
-    if (M4PTO3GPP_kEncoderStopped == pC->m_eEncoderState)
-    {
-        osaErr = pC->m_pEncoderInt->pFctClose(pC->m_pMp4EncoderContext);
-        if (M4NO_ERROR != osaErr)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_close: m_pEncoderInt->pFctClose returns 0x%x", osaErr);
-            /* Well... how the heck do you handle a failed cleanup? */
-        }
-
-        pC->m_eEncoderState = M4PTO3GPP_kEncoderClosed;
-    }
-
-    /*******************************/
-    /******** Close 3GP out ********/
-    /*******************************/
-
-    if (M4OSA_NULL != pC->m_p3gpWriterContext)  /* happens in state _SET */
-    {
-        /* HW encoder: fetch the DSI from the shell video encoder, and feed it to the writer before
-        closing it. */
-        if ((M4VIDEOEDITING_kMPEG4 == pC->m_Params.OutputVideoFormat)
-            || (M4VIDEOEDITING_kH264 == pC->m_Params.OutputVideoFormat))
-        {
-            osaErr = pC->m_pEncoderInt->pFctGetOption(pC->m_pMp4EncoderContext,
-                M4ENCODER_kOptionID_EncoderHeader,
-                                                            (M4OSA_DataOption)&encHeader);
-            if ( (M4NO_ERROR != osaErr) || (M4OSA_NULL == encHeader->pBuf) )
-            {
-                M4OSA_TRACE1_1("M4PTO3GPP_close: failed to get the encoder header (err 0x%x)",
-                    osaErr);
-                /**< no return here, we still have stuff to deallocate after close, even if \
-                it fails. */
-            }
-            else
-            {
-                /* set this header in the writer */
-                streamHeader.streamID = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
-                streamHeader.size = encHeader->Size;
-                streamHeader.addr = (M4OSA_MemAddr32)encHeader->pBuf;
-                osaErr = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
-                    M4WRITER_kDSI, &streamHeader);
-                if (M4NO_ERROR != osaErr)
-                {
-                    M4OSA_TRACE1_1("M4PTO3GPP_close: failed to set the DSI in the writer \
-                                (err 0x%x)   ", osaErr);
-                }
-            }
-        }
-
-        /* Update last Video CTS */
-        lastCTS = (M4OSA_UInt32)pC->m_mtCts;
-
-        osaErr = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
-            (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
-        if (M4NO_ERROR != osaErr)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Close: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
-                osaErr);
-        }
-
-        /* Write and close the 3GP output file */
-        osaErr = pC->m_pWriterGlobInt->pFctCloseWrite(pC->m_p3gpWriterContext);
-        if (M4NO_ERROR != osaErr)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Close: pWriterGlobInt->pFctCloseWrite returns 0x%x", osaErr);
-            /**< don't return yet, we have to close other things */
-        }
-        pC->m_p3gpWriterContext = M4OSA_NULL;
-    }
-
-    /**
-     * State transition */
-    pC->m_State = M4PTO3GPP_kState_CLOSED;
-
-    M4OSA_TRACE3_1("M4PTO3GPP_Close(): returning 0x%x", osaErr);
-    return osaErr;
-}
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext);
- * @brief   Free all resources used by the M4PTO3GPP.
- * @note    The context is no more valid after this call
- * @param   pContext            (IN) M4PTO3GPP context
- * @return  M4NO_ERROR:         No error
- * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
- ******************************************************************************
-*/
-/*********************************************************/
-M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext)
-/*********************************************************/
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
-
-    M4OSA_TRACE3_1("M4PTO3GPP_CleanUp called with pContext=0x%x", pContext);
-
-    /**
-     *  Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL==pContext),M4ERR_PARAMETER, "M4PTO3GPP_CleanUp: pContext \
-                                                            is M4OSA_NULL");
-
-    /**
-     *  First call Close, if needed, to clean the video encoder */
-
-    if ((M4PTO3GPP_kState_OPENED == pC->m_State) || (M4PTO3GPP_kState_READY == pC->m_State)
-        || (M4PTO3GPP_kState_FINISHED == pC->m_State))
-    {
-        err = M4PTO3GPP_Close(pContext);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: M4PTO3GPP_Close returns 0x%x", err);
-            /**< don't return, we have to free other components */
-        }
-    }
-
-    /**
-     *  Free Audio reader stuff, if needed */
-
-    if (M4OSA_NULL != pC->m_pAudioReaderContext) /**< may be M4OSA_NULL if M4PTO3GPP_Open was not\
-                                                 called */
-    {
-
-        err = pC->m_pReaderGlobInt->m_pFctClose(pC->m_pAudioReaderContext);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pReaderGlobInt->m_pFctClose returns 0x%x", err);
-            /**< don't return, we have to free other components */
-        }
-        err = pC->m_pReaderGlobInt->m_pFctDestroy(pC->m_pAudioReaderContext);
-        pC->m_pAudioReaderContext = M4OSA_NULL;
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pReaderGlobInt->m_pFctDestroy returns 0x%x", err);
-            /**< don't return, we have to free other components */
-        }
-    }
-
-    if (M4OSA_NULL != pC->m_pReaderAudioAU)
-    {
-        free(pC->m_pReaderAudioAU);
-        pC->m_pReaderAudioAU = M4OSA_NULL;
-    }
-
-    /**
-     *  Free video encoder stuff, if needed */
-    if (M4OSA_NULL != pC->m_pMp4EncoderContext)
-    {
-        err = pC->m_pEncoderInt->pFctCleanup(pC->m_pMp4EncoderContext);
-        pC->m_pMp4EncoderContext = M4OSA_NULL;
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pEncoderInt->pFctDestroy returns 0x%x", err);
-            /**< don't return, we have to free other components */
-        }
-    }
-
-    if (M4OSA_NULL != pC->m_pWriterVideoStream)
-    {
-        free(pC->m_pWriterVideoStream);
-        pC->m_pWriterVideoStream = M4OSA_NULL;
-    }
-    if (M4OSA_NULL != pC->m_pWriterAudioStream)
-    {
-        free(pC->m_pWriterAudioStream);
-        pC->m_pWriterAudioStream = M4OSA_NULL;
-    }
-    if (M4OSA_NULL != pC->m_pWriterVideoStreamInfo)
-    {
-        free(pC->m_pWriterVideoStreamInfo);
-        pC->m_pWriterVideoStreamInfo = M4OSA_NULL;
-    }
-    if (M4OSA_NULL != pC->m_pWriterAudioStreamInfo)
-    {
-        free(pC->m_pWriterAudioStreamInfo);
-        pC->m_pWriterAudioStreamInfo = M4OSA_NULL;
-    }
-
-
-    /**
-     *  Free the shells interfaces */
-    if (M4OSA_NULL != pC->m_pReaderGlobInt)
-    {
-        free(pC->m_pReaderGlobInt);
-        pC->m_pReaderGlobInt = M4OSA_NULL;
-    }
-    if (M4OSA_NULL != pC->m_pReaderDataInt)
-    {
-        free(pC->m_pReaderDataInt);
-        pC->m_pReaderDataInt = M4OSA_NULL;
-    }
-
-    if(M4OSA_NULL != pC->m_pEncoderInt)
-    {
-        free(pC->m_pEncoderInt);
-        pC->m_pEncoderInt = M4OSA_NULL;
-    }
-    if(M4OSA_NULL != pC->m_pWriterGlobInt)
-    {
-        free(pC->m_pWriterGlobInt);
-        pC->m_pWriterGlobInt = M4OSA_NULL;
-    }
-    if(M4OSA_NULL != pC->m_pWriterDataInt)
-    {
-        free(pC->m_pWriterDataInt);
-        pC->m_pWriterDataInt = M4OSA_NULL;
-    }
-    /**< Do not free pC->pOsaMemoryPtrFct and pC->pOsaMemoryPtrFct, because it's owned by the \
-    application */
-
-    /**
-     *  Free the context itself */
-    free(pC);
-    pC = M4OSA_NULL;
-
-    M4OSA_TRACE3_0("M4PTO3GPP_CleanUp(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/********************* INTERNAL FUNCTIONS *********************/
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC);
- * @brief   Prepare all resources and interfaces for the transcoding.
- * @note    It is called by the first M4OSA_Step() call
- * @param   pC          (IN) M4PTO3GPP private context
- * @return  M4NO_ERROR: No error
- * @return  Any error returned by an underlaying module
- ******************************************************************************
-*/
-/******************************************************/
-M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC)
-/******************************************************/
-{
-    M4OSA_ERR               err = M4NO_ERROR;
-    M4WRITER_OutputFileType outputFileType;
-    M4OSA_UInt32            uiVersion;
-    M4ENCODER_Format        encFormat;
-    M4ENCODER_AdvancedParams   EncParams;    /**< Encoder advanced parameters */
-    M4SYS_StreamIDValue     optionValue;
-
-    M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing called with pC=0x%x", pC);
-
-    /******************************/
-    /******************************/
-
-    /********************************************/
-    /********                            ********/
-    /******** Video Encoder Parames init ********/
-    /********                            ********/
-    /********************************************/
-
-    /**
-     *  Get the correct encoder interface */
-    switch(pC->m_Params.OutputVideoFormat)
-    {
-        case M4VIDEOEDITING_kMPEG4:
-#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
-                err = VideoEditorVideoEncoder_getInterface_MPEG4(&encFormat, &pC->m_pEncoderInt,
-                    M4ENCODER_OPEN_ADVANCED);
-#else /* software MPEG4 encoder not available! */
-                M4OSA_TRACE1_0("No MPEG4 encoder available! Did you forget to register one?");
-                err = M4ERR_STATE;
-#endif /* software MPEG4 encoder available? */
-            break;
-        case M4VIDEOEDITING_kH263:
-#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
-                err = VideoEditorVideoEncoder_getInterface_H263(&encFormat, &pC->m_pEncoderInt,
-                    M4ENCODER_OPEN_ADVANCED);
-#else /* software H263 encoder not available! */
-                M4OSA_TRACE1_0("No H263 encoder available! Did you forget to register one?");
-                err = M4ERR_STATE;
-#endif /* software H263 encoder available? */
-            break;
-        case M4VIDEOEDITING_kH264:
-#ifdef M4VSS_SUPPORT_ENCODER_AVC
-                err = VideoEditorVideoEncoder_getInterface_H264(&encFormat, &pC->m_pEncoderInt,
-                    M4ENCODER_OPEN_ADVANCED);
-#else /* software H264 encoder not available! */
-                M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing: No H264 encoder available!\
-                               Did you forget to register one?");
-                err = M4ERR_STATE;
-#endif /* software H264 encoder available? */
-            break;
-        default:
-            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
-                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
-                           pC->m_Params.OutputVideoFormat);
-            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
-    }
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("switch(pC->m_Params.OutputVideoFormat): getInterfaces returns 0x%x", err);
-        return err;
-    }
-
-    /**
-     *  Fill encoder parameters according to M4PTO3GPP settings */
-
-    /**
-     * Video frame size */
-    switch(pC->m_Params.OutputVideoFrameSize)
-    {
-        case M4VIDEOEDITING_kSQCIF :
-            EncParams.FrameHeight = M4ENCODER_SQCIF_Height;
-            EncParams.FrameWidth  = M4ENCODER_SQCIF_Width;
-            break;
-        case M4VIDEOEDITING_kQQVGA :
-            EncParams.FrameHeight = M4ENCODER_QQVGA_Height;
-            EncParams.FrameWidth  = M4ENCODER_QQVGA_Width;
-            break;
-        case M4VIDEOEDITING_kQCIF :
-            EncParams.FrameHeight = M4ENCODER_QCIF_Height;
-            EncParams.FrameWidth  = M4ENCODER_QCIF_Width;
-            break;
-        case M4VIDEOEDITING_kQVGA :
-            EncParams.FrameHeight = M4ENCODER_QVGA_Height;
-            EncParams.FrameWidth  = M4ENCODER_QVGA_Width;
-            break;
-        case M4VIDEOEDITING_kCIF :
-            EncParams.FrameHeight = M4ENCODER_CIF_Height;
-            EncParams.FrameWidth  = M4ENCODER_CIF_Width;
-            break;
-        case M4VIDEOEDITING_kVGA :
-            EncParams.FrameHeight = M4ENCODER_VGA_Height;
-            EncParams.FrameWidth  = M4ENCODER_VGA_Width;
-            break;
-/* +PR LV5807 */
-        case M4VIDEOEDITING_kWVGA :
-            EncParams.FrameHeight = M4ENCODER_WVGA_Height;
-            EncParams.FrameWidth  = M4ENCODER_WVGA_Width;
-            break;
-        case M4VIDEOEDITING_kNTSC:
-            EncParams.FrameHeight = M4ENCODER_NTSC_Height;
-            EncParams.FrameWidth  = M4ENCODER_NTSC_Width;
-            break;
-/* -PR LV5807 */
-/* +CR Google */
-        case M4VIDEOEDITING_k640_360:
-            EncParams.FrameHeight = M4ENCODER_640_360_Height;
-            EncParams.FrameWidth  = M4ENCODER_640_360_Width;
-            break;
-
-        case M4VIDEOEDITING_k854_480:
-            EncParams.FrameHeight = M4ENCODER_854_480_Height;
-            EncParams.FrameWidth  = M4ENCODER_854_480_Width;
-            break;
-
-        case M4VIDEOEDITING_k1280_720:
-            EncParams.FrameHeight = M4ENCODER_1280_720_Height;
-            EncParams.FrameWidth  = M4ENCODER_1280_720_Width;
-            break;
-
-        case M4VIDEOEDITING_k1080_720:
-            EncParams.FrameHeight = M4ENCODER_1080_720_Height;
-            EncParams.FrameWidth  = M4ENCODER_1080_720_Width;
-            break;
-
-        case M4VIDEOEDITING_k960_720:
-            EncParams.FrameHeight = M4ENCODER_960_720_Height;
-            EncParams.FrameWidth  = M4ENCODER_960_720_Width;
-            break;
-
-        case M4VIDEOEDITING_k1920_1080:
-            EncParams.FrameHeight = M4ENCODER_1920_1080_Height;
-            EncParams.FrameWidth  = M4ENCODER_1920_1080_Width;
-            break;
-/* -CR Google */
-        default :
-            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
-                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE",
-                           pC->m_Params.OutputVideoFrameSize);
-            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
-    }
-
-    EncParams.InputFormat = M4ENCODER_kIYUV420;
-
-    /**
-     * Video bitrate */
-    switch(pC->m_Params.OutputVideoBitrate)
-    {
-        case M4VIDEOEDITING_k16_KBPS:
-        case M4VIDEOEDITING_k24_KBPS:
-        case M4VIDEOEDITING_k32_KBPS:
-        case M4VIDEOEDITING_k48_KBPS:
-        case M4VIDEOEDITING_k64_KBPS:
-        case M4VIDEOEDITING_k96_KBPS:
-        case M4VIDEOEDITING_k128_KBPS:
-        case M4VIDEOEDITING_k192_KBPS:
-        case M4VIDEOEDITING_k256_KBPS:
-        case M4VIDEOEDITING_k288_KBPS:
-        case M4VIDEOEDITING_k384_KBPS:
-        case M4VIDEOEDITING_k512_KBPS:
-        case M4VIDEOEDITING_k800_KBPS:
-/*+ New Encoder bitrates */
-        case M4VIDEOEDITING_k2_MBPS:
-        case M4VIDEOEDITING_k5_MBPS:
-        case M4VIDEOEDITING_k8_MBPS:
-/*- New Encoder bitrates */
-            EncParams.Bitrate = pC->m_Params.OutputVideoBitrate;
-            break;
-
-        case M4VIDEOEDITING_kVARIABLE_KBPS:
-/*+ New Encoder bitrates */
-            EncParams.Bitrate = M4VIDEOEDITING_k8_MBPS;
-/*- New Encoder bitrates */
-            break;
-
-        default :
-            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
-                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
-                           pC->m_Params.OutputVideoBitrate);
-            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
-    }
-
-    /**
-     * Video format */
-    switch(pC->m_Params.OutputVideoFormat)
-    {
-        case M4VIDEOEDITING_kMPEG4 :
-            EncParams.Format    = M4ENCODER_kMPEG4;
-            break;
-        case M4VIDEOEDITING_kH263 :
-            EncParams.Format    = M4ENCODER_kH263;
-            break;
-        case M4VIDEOEDITING_kH264:
-            EncParams.Format    = M4ENCODER_kH264;
-            break;
-        default :
-            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
-                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
-                           pC->m_Params.OutputVideoFormat);
-            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
-    }
-
-    /**
-     * Video frame rate (set it to max = 30 fps) */
-    EncParams.uiTimeScale = 30;
-    EncParams.uiRateFactor = 1;
-
-    EncParams.FrameRate = M4ENCODER_k30_FPS;
-
-
-    /******************************/
-    /******** 3GP out init ********/
-    /******************************/
-
-    /* Get the 3GPP writer interface */
-    err = M4WRITER_3GP_getInterfaces(&outputFileType, &pC->m_pWriterGlobInt, &pC->m_pWriterDataInt);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4WRITER_3GP_getInterfaces: M4WRITER_3GP_getInterfaces returns 0x%x", err);
-        return err;
-    }
-
-    /* Init the 3GPP writer */
-    err = pC->m_pWriterGlobInt->pFctOpen(&pC->m_p3gpWriterContext, pC->m_Params.pOutput3gppFile,
-        pC->pOsalFileWrite, pC->m_Params.pTemporaryFile, pC->pOsalFileRead);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctOpen returns 0x%x", err);
-        return err;
-    }
-
-    /**
-     *  Link to the writer context in the writer interface */
-    pC->m_pWriterDataInt->pWriterContext = pC->m_p3gpWriterContext;
-
-    /**
-     *  Set the product description string in the written file */
-    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext, M4WRITER_kEmbeddedString,
-        (M4OSA_DataOption)M4PTO3GPP_SIGNATURE);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: \
-                       pWriterGlobInt->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x", err);
-        return err;
-    }
-
-    /**
-     *  Set the product version in the written file */
-    uiVersion = M4VIDEOEDITING_VERSION_MAJOR*100 + M4VIDEOEDITING_VERSION_MINOR*10
-        + M4VIDEOEDITING_VERSION_REVISION;
-    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext, M4WRITER_kEmbeddedVersion,
-        (M4OSA_DataOption)&uiVersion);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: \
-                       pWriterGlobInt->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x", err);
-        return err;
-    }
-
-    /**
-     *  Allocate and fill the video stream structures for the writer */
-    pC->m_pWriterVideoStream =
-        (M4SYS_StreamDescription*)M4OSA_32bitAlignedMalloc(sizeof(M4SYS_StreamDescription), M4PTO3GPP,
-        (M4OSA_Char *)"pWriterVideoStream");
-    if (M4OSA_NULL == pC->m_pWriterVideoStream)
-    {
-        M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterVideoStream, \
-                       returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-    pC->m_pWriterVideoStreamInfo =
-        (M4WRITER_StreamVideoInfos*)M4OSA_32bitAlignedMalloc(sizeof(M4WRITER_StreamVideoInfos), M4PTO3GPP,
-        (M4OSA_Char *)"pWriterVideoStreamInfo");
-    if (M4OSA_NULL == pC->m_pWriterVideoStreamInfo)
-    {
-        M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterVideoStreamInfo,\
-                       returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-    /**
-     * Fill Video properties structure for the AddStream method */
-    pC->m_pWriterVideoStreamInfo->height        = EncParams.FrameHeight;
-    pC->m_pWriterVideoStreamInfo->width         = EncParams.FrameWidth;
-    pC->m_pWriterVideoStreamInfo->fps           = 0;        /**< Not used by the core writer */
-    pC->m_pWriterVideoStreamInfo->Header.pBuf   = M4OSA_NULL;
-    /** No header, will be set by setOption */
-    pC->m_pWriterVideoStreamInfo->Header.Size   = 0;
-
-    /**
-     *  Fill Video stream description structure for the AddStream method */
-    pC->m_pWriterVideoStream->streamID = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
-
-    /**
-     * Video format */
-    switch(pC->m_Params.OutputVideoFormat)
-    {
-        case M4VIDEOEDITING_kMPEG4:
-            pC->m_pWriterVideoStream->streamType = M4SYS_kMPEG_4;   break;
-        case M4VIDEOEDITING_kH263:
-            pC->m_pWriterVideoStream->streamType = M4SYS_kH263;     break;
-        case M4VIDEOEDITING_kH264:
-            pC->m_pWriterVideoStream->streamType = M4SYS_kH264;     break;
-        default :
-            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
-                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
-                           pC->m_Params.OutputVideoFormat);
-            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
-    }
-
-    /**
-     * Video bitrate */
-    switch(pC->m_Params.OutputVideoBitrate)
-    {
-        case M4VIDEOEDITING_k16_KBPS:
-        case M4VIDEOEDITING_k24_KBPS:
-        case M4VIDEOEDITING_k32_KBPS:
-        case M4VIDEOEDITING_k48_KBPS:
-        case M4VIDEOEDITING_k64_KBPS:
-        case M4VIDEOEDITING_k96_KBPS:
-        case M4VIDEOEDITING_k128_KBPS:
-        case M4VIDEOEDITING_k192_KBPS:
-        case M4VIDEOEDITING_k256_KBPS:
-        case M4VIDEOEDITING_k288_KBPS:
-        case M4VIDEOEDITING_k384_KBPS:
-        case M4VIDEOEDITING_k512_KBPS:
-        case M4VIDEOEDITING_k800_KBPS:
-/*+ New Encoder bitrates */
-        case M4VIDEOEDITING_k2_MBPS:
-        case M4VIDEOEDITING_k5_MBPS:
-        case M4VIDEOEDITING_k8_MBPS:
-/*- New Encoder bitrates */
-            pC->m_pWriterVideoStream->averageBitrate = pC->m_Params.OutputVideoBitrate;
-            break;
-
-        case M4VIDEOEDITING_kVARIABLE_KBPS :
-            pC->m_pWriterVideoStream->averageBitrate = 0;
-            break;
-
-        default :
-            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
-                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
-                           pC->m_Params.OutputVideoBitrate);
-            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
-    }
-
-    pC->m_pWriterVideoStream->duration                  = 0;        /**< Duration is not known */
-    pC->m_pWriterVideoStream->timeScale                 = 0;    /**< Not used by the core writer */
-    pC->m_pWriterVideoStream->maxBitrate                = pC->m_pWriterVideoStream->averageBitrate;
-    pC->m_pWriterVideoStream->profileLevel              = 0;    /**< Not used by the core writer */
-    pC->m_pWriterVideoStream->decoderSpecificInfo       = (M4OSA_MemAddr32)
-                                                            (pC->m_pWriterVideoStreamInfo);
-    pC->m_pWriterVideoStream->decoderSpecificInfoSize   = sizeof(M4WRITER_StreamVideoInfos);
-
-    /**
-     * Update AU properties for video stream */
-    pC->m_WriterVideoAU.CTS         = pC->m_WriterVideoAU.DTS = 0;  /** Reset time */
-    pC->m_WriterVideoAU.size        = 0;
-    pC->m_WriterVideoAU.frag        = M4OSA_NULL;
-    pC->m_WriterVideoAU.nbFrag      = 0;                            /** No fragment */
-    pC->m_WriterVideoAU.stream      = pC->m_pWriterVideoStream;
-    pC->m_WriterVideoAU.attribute   = AU_RAP;
-    pC->m_WriterVideoAU.dataAddress = M4OSA_NULL;
-
-    /**
-     *  If there is an audio input, allocate and fill the audio stream structures for the writer */
-    if(M4OSA_NULL != pC->m_pReaderAudioStream)
-    {
-        pC->m_pWriterAudioStream =
-            (M4SYS_StreamDescription*)M4OSA_32bitAlignedMalloc(sizeof(M4SYS_StreamDescription), M4PTO3GPP,
-            (M4OSA_Char *)"pWriterAudioStream");
-        if (M4OSA_NULL == pC->m_pWriterAudioStream)
-        {
-            M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterAudioStream, \
-                           returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-        pC->m_pWriterAudioStreamInfo =
-            (M4WRITER_StreamAudioInfos*)M4OSA_32bitAlignedMalloc(sizeof(M4WRITER_StreamAudioInfos), M4PTO3GPP,
-            (M4OSA_Char *)"pWriterAudioStreamInfo");
-        if (M4OSA_NULL == pC->m_pWriterAudioStreamInfo)
-        {
-            M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate \
-                           pWriterAudioStreamInfo, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-
-        pC->m_pWriterAudioStreamInfo->nbSamplesPerSec = 0; /**< unused by our shell writer */
-        pC->m_pWriterAudioStreamInfo->nbBitsPerSample = 0; /**< unused by our shell writer */
-        pC->m_pWriterAudioStreamInfo->nbChannels = 1;      /**< unused by our shell writer */
-
-        if( (M4OSA_NULL != pC->m_pReaderAudioStream) && /* audio could have been discarded */
-            (M4OSA_NULL != pC->m_pReaderAudioStream->m_basicProperties.m_pDecoderSpecificInfo) )
-        {
-            /* If we copy the stream from the input, we copy its DSI */
-            pC->m_pWriterAudioStreamInfo->Header.Size =
-                pC->m_pReaderAudioStream->m_basicProperties.m_decoderSpecificInfoSize;
-            pC->m_pWriterAudioStreamInfo->Header.pBuf =
-                (M4OSA_MemAddr8)pC->m_pReaderAudioStream->m_basicProperties.m_pDecoderSpecificInfo;
-        }
-        else
-        {
-            /* Writer will put a default DSI */
-            pC->m_pWriterAudioStreamInfo->Header.Size = 0;
-            pC->m_pWriterAudioStreamInfo->Header.pBuf = M4OSA_NULL;
-        }
-
-        /**
-         * Add the audio stream */
-        switch (pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
-        {
-            case M4DA_StreamTypeAudioAmrNarrowBand:
-                pC->m_pWriterAudioStream->streamType = M4SYS_kAMR;
-                break;
-            case M4DA_StreamTypeAudioAac:
-                pC->m_pWriterAudioStream->streamType = M4SYS_kAAC;
-                break;
-            case M4DA_StreamTypeAudioEvrc:
-                pC->m_pWriterAudioStream->streamType = M4SYS_kEVRC;
-                break;
-            default:
-                M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unhandled audio format (0x%x),\
-                               returning ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
-                               pC->m_pReaderAudioStream->m_basicProperties.m_streamType);
-                return ERR_PTO3GPP_UNDEFINED_OUTPUT_AUDIO_FORMAT;
-        }
-
-        /*
-         * Fill Audio stream description structure for the AddStream method */
-        pC->m_pWriterAudioStream->streamID                  = M4PTO3GPP_WRITER_AUDIO_STREAM_ID;
-        pC->m_pWriterAudioStream->duration                  = 0;/**< Duration is not known yet */
-        pC->m_pWriterAudioStream->timeScale                 = M4PTO3GPP_WRITER_AUDIO_AMR_TIME_SCALE;
-        pC->m_pWriterAudioStream->profileLevel              = M4PTO3GPP_WRITER_AUDIO_PROFILE_LEVEL;
-        pC->m_pWriterAudioStream->averageBitrate            =
-                                pC->m_pReaderAudioStream->m_basicProperties.m_averageBitRate;
-        pC->m_pWriterAudioStream->maxBitrate                =
-                                pC->m_pWriterAudioStream->averageBitrate;
-
-        /**
-         * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos \
-            in the DSI pointer... */
-        pC->m_pWriterAudioStream->decoderSpecificInfo =
-                    (M4OSA_MemAddr32)pC->m_pWriterAudioStreamInfo;
-
-        /**
-         * Update AU properties for audio stream */
-        pC->m_WriterAudioAU.CTS         = pC->m_WriterAudioAU.DTS = 0;  /** Reset time */
-        pC->m_WriterAudioAU.size        = 0;
-        pC->m_WriterAudioAU.frag        = M4OSA_NULL;
-        pC->m_WriterAudioAU.nbFrag      = 0;                            /** No fragment */
-        pC->m_WriterAudioAU.stream      = pC->m_pWriterAudioStream;
-        pC->m_WriterAudioAU.attribute   = AU_RAP;
-        pC->m_WriterAudioAU.dataAddress = M4OSA_NULL;
-    }
-
-    /************************************/
-    /******** Video Encoder Init ********/
-    /************************************/
-
-    /**
-     * PTO uses its own bitrate regulation, not the "true" core regulation */
-    EncParams.bInternalRegulation = M4OSA_TRUE; //M4OSA_FALSE;
-    EncParams.uiStartingQuantizerValue = M4PTO3GPP_QUANTIZER_STEP;
-
-    EncParams.videoProfile = pC->m_Params.videoProfile;
-    EncParams.videoLevel = pC->m_Params.videoLevel;
-
-    /**
-     * Other encoder settings */
-
-    EncParams.uiHorizontalSearchRange  = 0;             /* use default */
-    EncParams.uiVerticalSearchRange    = 0;             /* use default */
-    EncParams.bErrorResilience         = M4OSA_FALSE;   /* no error resilience */
-    EncParams.uiIVopPeriod             = 15;             /* use default */
-    EncParams.uiMotionEstimationTools  = 0;             /* M4V_MOTION_EST_TOOLS_ALL */
-    EncParams.bAcPrediction            = M4OSA_TRUE;    /* use AC prediction */
-    EncParams.bDataPartitioning        = M4OSA_FALSE;   /* no data partitioning */
-
-
-    /**
-     * Create video encoder */
-    err = pC->m_pEncoderInt->pFctInit(&pC->m_pMp4EncoderContext, pC->m_pWriterDataInt,
-                                    M4PTO3GPP_applyVPP, pC, pC->m_pEncoderExternalAPI,
-                                    pC->m_pEncoderUserData);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctInit returns 0x%x", err);
-        return err;
-    }
-
-    pC->m_eEncoderState = M4PTO3GPP_kEncoderClosed;
-
-    err = pC->m_pEncoderInt->pFctOpen(pC->m_pMp4EncoderContext, &pC->m_WriterVideoAU, &EncParams);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctOpen returns 0x%x", err);
-        return err;
-    }
-
-    pC->m_eEncoderState = M4PTO3GPP_kEncoderStopped;
-
-    if (M4OSA_NULL != pC->m_pEncoderInt->pFctStart)
-    {
-        err = pC->m_pEncoderInt->pFctStart(pC->m_pMp4EncoderContext);
-
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctStart returns 0x%x", err);
-            return err;
-        }
-    }
-
-    pC->m_eEncoderState = M4PTO3GPP_kEncoderRunning;
-
-    /**
-     * No more  setoption on "M4ENCODER_kVideoFragmentSize" here.
-     * It is now automaticly and "smartly" set in the encoder shell. */
-
-    /**************************************/
-    /******** 3GP out add streams  ********/
-    /**************************************/
-
-    err = pC->m_pWriterGlobInt->pFctAddStream(pC->m_p3gpWriterContext, pC->m_pWriterVideoStream);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctAddStream(video) returns\
-                       0x%x", err);
-        return err;
-    }
-
-    /**
-     * Set video max au size */
-    optionValue.streamID    = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
-    optionValue.value = (M4OSA_UInt32)(1.5F * (M4OSA_Float)(pC->m_pWriterVideoStreamInfo->width
-                                                * pC->m_pWriterVideoStreamInfo->height)
-                                                * M4PTO3GPP_VIDEO_MIN_COMPRESSION_RATIO);
-    M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing,M4WRITER_kMaxAUSize: %u",optionValue.value);
-    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
-                                (M4OSA_UInt32)M4WRITER_kMaxAUSize,(M4OSA_DataOption) &optionValue);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(video,\
-                       M4WRITER_kMaxAUSize) returns 0x%x", err);
-        return err;
-    }
-
-    /**
-     * Set video max chunck size */
-    optionValue.value = (M4OSA_UInt32)((M4OSA_Float)optionValue.value
-                        * M4PTO3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO);
-    M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing,M4WRITER_kMaxChunckSize: %u",optionValue.value);
-    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
-                        (M4OSA_UInt32)M4WRITER_kMaxChunckSize,(M4OSA_DataOption) &optionValue);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(video,\
-                       M4WRITER_kMaxChunckSize) returns 0x%x", err);
-        return err;
-    }
-
-    if (M4OSA_NULL != pC->m_pReaderAudioStream)
-    {
-        err = pC->m_pWriterGlobInt->pFctAddStream(pC->m_p3gpWriterContext, pC->m_pWriterAudioStream);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctAddStream(audio) \
-                           returns 0x%x", err);
-            return err;
-        }
-
-        /**
-         * Set audio max au size */
-        optionValue.value       = M4PTO3GPP_AUDIO_MAX_AU_SIZE;
-        optionValue.streamID    = M4PTO3GPP_WRITER_AUDIO_STREAM_ID;
-        err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
-            (M4OSA_UInt32)M4WRITER_kMaxAUSize,(M4OSA_DataOption) &optionValue);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(audio,\
-                           M4WRITER_kMaxAUSize) returns 0x%x", err);
-            return err;
-        }
-
-        /**
-         * Set audio max chunck size */
-        optionValue.value = M4PTO3GPP_AUDIO_MAX_CHUNK_SIZE; /**< Magical */
-        err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
-                        (M4OSA_UInt32)M4WRITER_kMaxChunckSize,(M4OSA_DataOption) &optionValue);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(audio,\
-                           M4WRITER_kMaxChunckSize) returns 0x%x", err);
-            return err;
-        }
-    }
-
-    /*
-     * Close the stream registering in order to be ready to write data */
-    err = pC->m_pWriterGlobInt->pFctStartWriting(pC->m_p3gpWriterContext);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctStartWriting returns 0x%x",
-                        err);
-        return err;
-    }
-
-
-    M4OSA_TRACE3_0("M4PTO3GPP_Ready4Processing: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
-                            M4WRITER_Context* pWriterContext,
-                                      M4SYS_AccessUnit* pWriterAudioAU, M4OSA_Time mtIncCts)
- * @brief   Write an AMR 12.2kbps silence FRAME into the writer
- * @note    Mainly used to fix the 'bzz' bug...
- * @param   pWriterDataIntInterface (IN)    writer data interfaces
- *          pWriterContext          (IN/OUT)writer context
- *          pWriterAudioAU          (OUT)   writer audio access unit
- *          mtIncCts                (IN)    writer CTS
- * @return  M4NO_ERROR: No error
- ******************************************************************************
-*/
-static M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
-                                                   M4WRITER_Context* pWriterContext,
-                                                    M4SYS_AccessUnit* pWriterAudioAU,
-                                                    M4OSA_Time mtIncCts)
-{
-    M4OSA_ERR err;
-
-    err = pWriterDataIntInterface->pStartAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
-                                        pWriterAudioAU);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence122Frame: pWriterDataInt->pStartAU(audio) returns \
-                                                    0x%x!", err);
-        return err;
-    }
-
-    memcpy((void *)pWriterAudioAU->dataAddress,
-     (void *)M4PTO3GPP_AMR_AU_SILENCE_122_FRAME, M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE);
-    pWriterAudioAU->size    = M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE;
-    pWriterAudioAU->CTS     = mtIncCts;
-    pWriterAudioAU->nbFrag  = 0;
-
-    err = pWriterDataIntInterface->pProcessAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
-                                                pWriterAudioAU);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence122Frame: pWriterDataInt->pProcessAU(silence) \
-                       returns 0x%x!", err);
-        return err;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
-                                        M4WRITER_Context* pWriterContext,
-                                      M4SYS_AccessUnit* pWriterAudioAU, M4OSA_Time mtIncCts)
- * @brief   Write an AMR 12.2kbps silence FRAME into the writer
- * @note    Mainly used to fix the 'bzz' bug...
- * @param   pWriterDataIntInterface (IN)    writer data interfaces
- *          pWriterContext          (IN/OUT)writer context
- *          pWriterAudioAU          (OUT)   writer audio access unit
- *          mtIncCts                (IN)    writer CTS
- * @return  M4NO_ERROR: No error
- ******************************************************************************
-*/
-static M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
-                                                   M4WRITER_Context* pWriterContext,
-                                                M4SYS_AccessUnit* pWriterAudioAU,
-                                                M4OSA_Time mtIncCts)
-{
-    M4OSA_ERR err;
-
-    err = pWriterDataIntInterface->pStartAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
-                                                        pWriterAudioAU);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence048Frame: pWriterDataInt->pStartAU(audio)\
-                       returns 0x%x!", err);
-        return err;
-    }
-
-    memcpy((void *)pWriterAudioAU->dataAddress,
-                (void *)M4PTO3GPP_AMR_AU_SILENCE_048_FRAME,
-                M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE);
-    pWriterAudioAU->size    = M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
-    pWriterAudioAU->CTS     = mtIncCts;
-    pWriterAudioAU->nbFrag  = 0;
-
-    err = pWriterDataIntInterface->pProcessAU(pWriterContext,
-                    M4PTO3GPP_WRITER_AUDIO_STREAM_ID, pWriterAudioAU);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence048Frame: \
-                       pWriterDataInt->pProcessAU(silence) returns 0x%x!", err);
-        return err;
-    }
-
-    return M4NO_ERROR;
-}
-
-
diff --git a/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c b/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c
deleted file mode 100755
index 96a6498..0000000
--- a/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4PTO3GPP_VideoPreProcessing.c
- * @brief   Picture to 3gpp Service video preprocessing management.
- ******************************************************************************
- */
-
-/**
- *    OSAL Debug utilities */
-#include "M4OSA_Debug.h"
-
-/**
- *    OSAL Memory management */
-#include "M4OSA_Memory.h"
-
-/**
- *    Definition of the M4PTO3GPP internal context */
-#include "M4PTO3GPP_InternalTypes.h"
-
-/**
- *    Definition of the M4PTO3GPP errors */
-#include "M4PTO3GPP_ErrorCodes.h"
-
-/* If time increment is too low then we have an infinite alloc loop into M4ViEncCaptureFrame() */
-/* Time increment should match 30 fps maximum */
-#define M4PTO3GPP_MIN_TIME_INCREMENT 33.3333334
-
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
- *                                 M4VIFI_ImagePlane* pPlaneOut)
- * @brief    Call an external callback to get the picture to encode
- * @note    It is called by the video encoder
- * @param    pContext    (IN) VPP context, which actually is the M4PTO3GPP internal context
- *                            in our case
- * @param    pPlaneIn    (IN) Contains the image
- * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the
- *                        output YUV420 image read with the m_pPictureCallbackFct
- * @return    M4NO_ERROR:    No error
- * @return    Any error returned by an underlaying module
- ******************************************************************************
- */
-/******************************************************/
-M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
-                             M4VIFI_ImagePlane* pPlaneOut)
-/******************************************************/
-{
-    M4OSA_ERR    err;
-    M4OSA_Double mtDuration;
-    M4OSA_UInt32 i;
-
-    /*** NOTE ***/
-    /* It's OK to get pPlaneIn == M4OSA_NULL here                        */
-    /* since it has been given NULL in the pFctEncode() call.            */
-    /* It's because we use the M4PTO3GPP internal context to            */
-    /* transmit the encoder input data.                                    */
-    /* The input data is the image read from the m_pPictureCallbackFct    */
-
-    /**
-     *    The VPP context is actually the M4PTO3GPP context! */
-    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
-
-    /**
-    *  Get the picture to encode */
-    if (M4OSA_FALSE == pC->m_bLastInternalCallBack)
-    {
-        err = pC->m_Params.pPictureCallbackFct(pC->m_Params.pPictureCallbackCtxt, pPlaneOut,
-             &mtDuration);
-
-        /* In case of error when getting YUV to encode (ex: error when decoding a JPEG) */
-        if((M4NO_ERROR != err) && (((M4OSA_UInt32)M4PTO3GPP_WAR_LAST_PICTURE) != err))
-        {
-            return err;
-        }
-
-        /**
-         * If end of encoding is asked by the size limitation system,
-         * we must end the encoding the same way that when it is asked by the
-         * picture callback (a.k.a. the integrator).
-         * Thus we simulate the LastPicture code return: */
-        if (M4OSA_TRUE == pC->m_IsLastPicture)
-        {
-            err = M4PTO3GPP_WAR_LAST_PICTURE;
-        }
-
-        if(((M4OSA_UInt32)M4PTO3GPP_WAR_LAST_PICTURE) == err)
-        {
-            pC->m_bLastInternalCallBack = M4OSA_TRUE; /* Toggle flag for the final call of the CB*/
-            pC->m_IsLastPicture         = M4OSA_TRUE; /* To stop the encoder */
-            pC->pSavedPlane             = pPlaneOut;  /* Save the last YUV plane ptr */
-            pC->uiSavedDuration         = (M4OSA_UInt32)mtDuration; /* Save the last duration */
-        }
-    }
-    else
-    {
-        /**< Not necessary here because the last frame duration is set to the-last-but-one by
-                the light writer */
-        /**< Only necessary for pC->m_mtNextCts below...*/
-        mtDuration = pC->uiSavedDuration;
-
-
-        /** Copy the last YUV plane into the current one
-         * (the last pic is splited due to the callback extra-call... */
-        for (i=0; i<3; i++)
-        {
-            memcpy((void *)pPlaneOut[i].pac_data,
-                 (void *)pC->pSavedPlane[i].pac_data,
-                     pPlaneOut[i].u_stride * pPlaneOut[i].u_height);
-        }
-    }
-
-    /* TimeIncrement should be 30 fps maximum */
-    if(mtDuration < M4PTO3GPP_MIN_TIME_INCREMENT)
-    {
-        mtDuration = M4PTO3GPP_MIN_TIME_INCREMENT;
-    }
-
-    pC->m_mtNextCts += mtDuration;
-
-    M4OSA_TRACE3_0("M4PTO3GPP_applyVPP: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
diff --git a/libvideoeditor/vss/src/M4READER_Amr.c b/libvideoeditor/vss/src/M4READER_Amr.c
deleted file mode 100755
index 71f0e28..0000000
--- a/libvideoeditor/vss/src/M4READER_Amr.c
+++ /dev/null
@@ -1,790 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ************************************************************************
- * @file   M4READER_Amr.c
- * @brief  Generic encapsulation of the core amr reader
- * @note   This file implements the generic M4READER interface
- *         on top of the AMR reader
- ************************************************************************
-*/
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Debug.h"
-#include "M4OSA_CoreID.h"
-
-#include "M4_Utils.h"
-
-#include "M4AMRR_CoreReader.h"
-#include "M4READER_Amr.h"
-
-/**
- ************************************************************************
- * structure    M4READER_AMR_Context
- * @brief       This structure defines the internal context of a amr reader instance
- * @note        The context is allocated and de-allocated by the reader
- ************************************************************************
-*/
-typedef struct _M4READER_AMR_Context
-{
-    M4OSA_Context           m_pCoreContext;     /**< core amr reader context */
-    M4_AudioStreamHandler*  m_pAudioStream;     /**< pointer on the audio stream
-                                                 description returned by the core */
-    M4SYS_AccessUnit        m_audioAu;          /**< audio access unit to be filled by the core */
-    M4OSA_Time              m_maxDuration;      /**< duration of the audio stream */
-    M4OSA_FileReadPointer*    m_pOsaFileReaderFcts;    /**< OSAL file read functions */
-
-} M4READER_AMR_Context;
-
-
-/**
- ************************************************************************
- * @brief    create an instance of the reader
- * @note     allocates the context
- * @param    pContext:        (OUT)    pointer on a reader context
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_ALLOC                a memory allocation has failed
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set (in DEBUG only)
- ************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_create(M4OSA_Context *pContext)
-{
-    M4READER_AMR_Context* pReaderContext;
-
-    /* Check function parameters */
-    M4OSA_DEBUG_IF1((pContext == 0), M4ERR_PARAMETER,
-         "M4READER_AMR_create: invalid context pointer");
-
-    pReaderContext = (M4READER_AMR_Context*)M4OSA_32bitAlignedMalloc(sizeof(M4READER_AMR_Context),
-         M4READER_AMR, (M4OSA_Char *)"M4READER_AMR_Context");
-    if (pReaderContext == M4OSA_NULL)
-    {
-        return M4ERR_ALLOC;
-    }
-
-    pReaderContext->m_pAudioStream  = M4OSA_NULL;
-    pReaderContext->m_audioAu.dataAddress = M4OSA_NULL;
-    pReaderContext->m_maxDuration = 0;
-    pReaderContext->m_pCoreContext = M4OSA_NULL;
-    pReaderContext->m_pOsaFileReaderFcts = M4OSA_NULL;
-
-    *pContext = pReaderContext;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * @brief    destroy the instance of the reader
- * @note     after this call the context is invalid
- *
- * @param    context:        (IN)    Context of the reader
- *
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- ************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_destroy(M4OSA_Context context)
-{
-    M4READER_AMR_Context*   pC=(M4READER_AMR_Context*)context;
-
-    /* Check function parameters*/
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-         "M4READER_AMR_destroy: invalid context pointer");
-
-    /**
-     *    Check input parameter */
-    if (M4OSA_NULL == pC)
-    {
-        M4OSA_TRACE1_0("M4READER_AMR_destroy(): M4READER_AMR_destroy: context is M4OSA_NULL,\
-             returning M4ERR_PARAMETER");
-        return M4ERR_PARAMETER;
-    }
-
-    free(pC);
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ************************************************************************
- * @brief    open the reader and initializes its created instance
- * @note     this function opens the AMR file
- * @param    context:            (IN)    Context of the reader
- * @param    pFileDescriptor:    (IN)    Pointer to proprietary data identifying the media to open
- * @return    M4NO_ERROR                     there is no error
- * @return    M4ERR_PARAMETER                the context is NULL
- * @return    M4ERR_BAD_CONTEXT            provided context is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_open(M4OSA_Context context, M4OSA_Void* pFileDescriptor)
-{
-    M4READER_AMR_Context*    pC = (M4READER_AMR_Context*)context;
-    M4OSA_ERR                err;
-
-    /* Check function parameters*/
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),              M4ERR_PARAMETER,
-         "M4READER_AMR_open: invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
-         "M4READER_AMR_open: invalid pointer pFileDescriptor");
-
-    err = M4AMRR_openRead( &pC->m_pCoreContext, pFileDescriptor, pC->m_pOsaFileReaderFcts);
-
-    return err;
-}
-
-
-
-/**
- ************************************************************************
- * @brief    close the reader
- * @note
- * @param    context:        (IN)    Context of the reader
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            the context is NULL
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR   M4READER_AMR_close(M4OSA_Context context)
-{
-    M4READER_AMR_Context*    pC = (M4READER_AMR_Context*)context;
-    M4OSA_ERR                err;
-    M4AMRR_State State;
-
-    /* Check function parameters*/
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-         "M4READER_AMR_close: invalid context pointer");
-
-    /**
-     *    Check input parameter */
-    if (M4OSA_NULL == pC)
-    {
-        M4OSA_TRACE1_0("M4READER_AMR_close(): M4READER_AMR_close: context is M4OSA_NULL,\
-             returning M4ERR_PARAMETER");
-        return M4ERR_PARAMETER;
-    }
-
-    if (M4OSA_NULL != pC->m_pAudioStream)
-    {
-        err = M4AMRR_getState(pC->m_pCoreContext, &State,
-                ((M4_StreamHandler*)pC->m_pAudioStream)->m_streamId);
-        if(M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_0("M4READER_AMR_close: error when calling M4AMRR_getState\n");
-            return err;
-        }
-
-        if (M4AMRR_kReading_nextAU == State)
-        {
-            err = M4AMRR_freeAU(pC->m_pCoreContext,
-                ((M4_StreamHandler*)pC->m_pAudioStream)->m_streamId,  &pC->m_audioAu);
-            if (err != M4NO_ERROR)
-            {
-                M4OSA_TRACE1_0("M4READER_AMR_close: error when freeing access unit\n");
-                return err;
-            }
-        }
-
-        /* Delete the DSI if needed */
-        if(M4OSA_NULL != pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo)
-        {
-            free(\
-                pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo);
-
-            pC->m_pAudioStream->m_basicProperties.m_decoderSpecificInfoSize = 0;
-            pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo = M4OSA_NULL;
-        }
-
-        /* Finally destroy the stream handler */
-        free(pC->m_pAudioStream);
-        pC->m_pAudioStream = M4OSA_NULL;
-    }
-
-    if (M4OSA_NULL != pC->m_pCoreContext)
-    {
-        err = M4AMRR_closeRead(pC->m_pCoreContext);
-        pC->m_pCoreContext = M4OSA_NULL;
-    }
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief    Get the next stream found in the media
- * @note    current version needs to translate M4SYS_Stream to M4_StreamHandler
- *
- * @param    context:        (IN)   Context of the reader
- * @param    pMediaFamily:   (OUT)  pointer to a user allocated M4READER_MediaFamily
- *                                  that will be filled with the media family of the found stream
- * @param    pStreamHandler: (OUT)  pointer to a stream handler that will be
- *                                  allocated and filled with the found stream description
- *
- * @return    M4NO_ERROR            there is no error
- * @return    M4WAR_NO_MORE_STREAM  no more available stream in the media (all streams found)
- * @return    M4ERR_PARAMETER       at least one parameter is not properly set (in DEBUG mode only)
- ************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_getNextStream(M4OSA_Context context, M4READER_MediaFamily *pMediaFamily,
-                                     M4_StreamHandler **pStreamHandlerParam)
-{
-    M4READER_AMR_Context*   pC=(M4READER_AMR_Context*)context;
-    M4OSA_ERR               err;
-    M4SYS_StreamID          streamIdArray[2];
-    M4SYS_StreamDescription streamDesc;
-    M4_AudioStreamHandler*  pAudioStreamHandler;
-    M4_StreamHandler*       pStreamHandler;
-
-    M4OSA_DEBUG_IF1((pC == 0),                  M4ERR_PARAMETER,
-                "M4READER_AMR_getNextStream: invalid context");
-    M4OSA_DEBUG_IF1((pMediaFamily == 0),        M4ERR_PARAMETER,
-                "M4READER_AMR_getNextStream: invalid pointer to MediaFamily");
-    M4OSA_DEBUG_IF1((pStreamHandlerParam == 0), M4ERR_PARAMETER,
-                "M4READER_AMR_getNextStream: invalid pointer to StreamHandler");
-
-    err = M4AMRR_getNextStream( pC->m_pCoreContext, &streamDesc);
-    if (err == M4WAR_NO_MORE_STREAM)
-    {
-        streamIdArray[0] = 0;
-        streamIdArray[1] = 0;
-        err = M4AMRR_startReading(pC->m_pCoreContext, streamIdArray);
-        if ((M4OSA_UInt32)M4ERR_ALLOC == err)
-        {
-            M4OSA_TRACE2_0("M4READER_AMR_getNextStream: M4AMRR_startReading returns M4ERR_ALLOC!");
-            return err;
-        }
-        return M4WAR_NO_MORE_STREAM;
-    }
-    else if (err != M4NO_ERROR)
-    {
-        return err;
-    }
-
-    *pMediaFamily = M4READER_kMediaFamilyAudio;
-
-    pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_32bitAlignedMalloc(sizeof(M4_AudioStreamHandler),
-                        M4READER_AMR, (M4OSA_Char *)"M4_AudioStreamHandler");
-    if (pAudioStreamHandler == M4OSA_NULL)
-    {
-        return M4ERR_ALLOC;
-    }
-    pStreamHandler =(M4_StreamHandler*)(pAudioStreamHandler);
-    *pStreamHandlerParam = pStreamHandler;
-    pC->m_pAudioStream = pAudioStreamHandler;
-
-    pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
-
-    /*
-     * Audio stream handler fields are initialised with 0 value.
-     * They will be properly set by the AMR decoder
-     */
-    pAudioStreamHandler->m_samplingFrequency = 0;
-    pAudioStreamHandler->m_byteFrameLength   = 0;
-    pAudioStreamHandler->m_byteSampleSize    = 0;
-    pAudioStreamHandler->m_nbChannels        = 0;
-
-    pStreamHandler->m_pDecoderSpecificInfo    = (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
-    pStreamHandler->m_decoderSpecificInfoSize = streamDesc.decoderSpecificInfoSize;
-    pStreamHandler->m_streamId                = streamDesc.streamID;
-    pStreamHandler->m_duration                = streamDesc.duration;
-    pStreamHandler->m_pUserData               = (void*)(intptr_t)streamDesc.timeScale; /*trick to change*/
-
-    if (streamDesc.duration > pC->m_maxDuration)
-    {
-        pC->m_maxDuration = streamDesc.duration;
-    }
-    pStreamHandler->m_averageBitRate          = streamDesc.averageBitrate;
-
-    M4AMRR_getmaxAUsize(pC->m_pCoreContext, &pStreamHandler->m_maxAUSize);
-
-    switch (streamDesc.streamType)
-    {
-    case M4SYS_kAMR:
-        pStreamHandler->m_streamType = M4DA_StreamTypeAudioAmrNarrowBand;
-        break;
-    case M4SYS_kAMR_WB:
-        pStreamHandler->m_streamType = M4DA_StreamTypeAudioAmrWideBand;
-        break;
-    default:
-        break;
-    }
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief    fill the access unit structure with initialization values
- * @note
- * @param    context:        (IN)     Context of the reader
- * @param    pStreamHandler: (IN)     pointer to the stream handler to
- *                                    which the access unit will be associated
- * @param    pAccessUnit:    (IN/OUT) pointer to the access unit (allocated by the caller)
- *                                      to initialize
- *
- * @return    M4NO_ERROR              there is no error
- * @return    M4ERR_PARAMETER         at least one parameter is not properly set
- ************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
-                                     M4_AccessUnit *pAccessUnit)
-{
-    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
-    M4SYS_AccessUnit*       pAu;
-
-    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
-         "M4READER_AMR_fillAuStruct: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-         "M4READER_AMR_fillAuStruct: invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
-         "M4READER_AMR_fillAuStruct: invalid pointer to M4_AccessUnit");
-
-    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
-    {
-        pAu = &pC->m_audioAu;
-    }
-    else
-    {
-        M4OSA_TRACE1_0("M4READER_AMR_fillAuStruct: passed StreamHandler is not known\n");
-        return M4ERR_PARAMETER;
-    }
-
-    pAu->dataAddress = M4OSA_NULL;
-    pAu->size        = 0;
-    /* JC: bug fix 1197 (set CTS to -20 in order the first AU CTS is 0) */
-    pAu->CTS         = -20;
-    pAu->DTS         = -20;
-    pAu->attribute   = 0;
-    pAu->nbFrag      = 0;
-
-    pAccessUnit->m_size         = 0;
-    /* JC: bug fix 1197 (set CTS to -20 in order the first AU CTS is 0) */
-    pAccessUnit->m_CTS          = -20;
-    pAccessUnit->m_DTS          = -20;
-    pAccessUnit->m_attribute    = 0;
-    pAccessUnit->m_dataAddress  = M4OSA_NULL;/*pBuffer;*/
-    pAccessUnit->m_maxsize      = pStreamHandler->m_maxAUSize;
-    pAccessUnit->m_streamID     = pStreamHandler->m_streamId;
-    pAccessUnit->m_structSize   = sizeof(M4_AccessUnit);
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * @brief    get an option value from the reader
- * @note    this function follows the set/get option mechanism described in OSAL 3.0
- *          it allows the caller to retrieve a property value:
- *          - the duration of the longest stream of the media
- *          - the version number of the reader (not implemented yet)
- *
- * @param    context:        (IN)    Context of the reader
- * @param    optionId:        (IN)    indicates the option to get
- * @param    pValue:            (OUT)    pointer to structure or value (allocated by user)
- *                                       where option is stored
- *
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
- ************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_getOption(M4OSA_Context context, M4OSA_OptionID optionId,
-                                 M4OSA_DataOption pValue)
-
-{
-    M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    /* Check function parameters */
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),     M4ERR_PARAMETER, "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER, "invalid value pointer");
-
-    switch(optionId)
-    {
-    case M4READER_kOptionID_Duration :
-        {
-            *(M4OSA_Time*)pValue = pC->m_maxDuration;
-        }
-        break;
-
-    case M4READER_kOptionID_Bitrate:
-        {
-            M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
-            if (M4OSA_NULL != pC->m_pAudioStream)
-            {
-                *pBitrate = pC->m_pAudioStream->m_basicProperties.m_averageBitRate;
-            }
-            else
-            {
-                pBitrate = 0;
-                err = M4ERR_PARAMETER;
-            }
-
-        }
-        break;
-    case M4READER_kOptionID_Version:
-        {
-            err = M4AMRR_getVersion((M4_VersionInfo*)pValue);
-        }
-        break;
-
-    default :
-        {
-            err = M4ERR_PARAMETER;
-        }
-    }
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief   set en option value of the readder
- * @note    this function follows the set/get option mechanism described in OSAL 3.0
- *          it allows the caller to set a property value:
- *          - the OSAL file read functions
- *
- * @param   context:    (IN)        Context of the decoder
- * @param   optionId:   (IN)        Identifier indicating the option to set
- * @param   pValue:     (IN)        Pointer to structure or value (allocated by user)
- *                                  where option is stored
- *
- * @return  M4NO_ERROR              There is no error
- * @return  M4ERR_BAD_OPTION_ID     The option ID is not a valid one
- * @return  M4ERR_STATE             State automaton is not applied
- * @return  M4ERR_PARAMETER         The option parameter is invalid
- ************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_setOption(M4OSA_Context context, M4OSA_OptionID optionId,
-                                 M4OSA_DataOption pValue)
-{
-    M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    /* Check function parameters */
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),     M4ERR_PARAMETER, "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER, "invalid value pointer");
-
-    switch(optionId)
-    {
-    case M4READER_kOptionID_SetOsaFileReaderFctsPtr :
-        {
-            pC->m_pOsaFileReaderFcts = (M4OSA_FileReadPointer*)pValue;
-        }
-        break;
-    default :
-        {
-            err = M4ERR_PARAMETER;
-        }
-    }
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief    reset the stream, that is seek it to beginning and make it ready to be read
- * @note    this function is to be deprecated in next versions
- *
- * @param    context:        (IN)    Context of the reader
- * @param    pStreamHandler    (IN)    The stream handler of the stream to reset
- *
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4ERR_ALLOC                there is no more memory available
- * @return    M4ERR_BAD_STREAM_ID        the streamID does not exist
- * @return    M4ERR_STATE    this function cannot be called now
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- * @return    M4WAR_INVALID_TIME        beginning of the stream can not be reached
- ************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_reset(M4OSA_Context context, M4_StreamHandler *pStreamHandler)
-{
-    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
-    M4SYS_StreamID          streamIdArray[2];
-    M4OSA_ERR               err;
-    M4SYS_AccessUnit*       pAu;
-    M4OSA_Time              time64 = 0;
-    M4AMRR_State            State;
-
-    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_AMR_reset: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-         "M4READER_AMR_reset: invalid pointer to M4_StreamHandler");
-
-    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
-    {
-        pAu = &pC->m_audioAu;
-    }
-    else
-    {
-        M4OSA_TRACE1_0("M4READER_AMR_reset: passed StreamHandler is not known\n");
-        return M4ERR_PARAMETER;
-    }
-
-    err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
-    if (M4AMRR_kReading_nextAU == State)
-    {
-        err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
-        if (err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_0("M4READER_AMR_reset: error when freeing access unit\n");
-            return err;
-        }
-        pAu->dataAddress = M4OSA_NULL;
-    }
-
-    streamIdArray[0] = pStreamHandler->m_streamId;
-    streamIdArray[1] = 0;
-
-    err = M4NO_ERROR;
-
-    /* for reset during playback */
-    /* (set CTS to -20 in order the first AU CTS is 0) */
-    pAu->CTS = -20;
-    pAu->DTS = -20;
-
-    err = M4AMRR_seek(pC->m_pCoreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_0("M4READER_AMR_reset: error when calling M4AMRR_seek()\n");
-        return err;
-    }
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief    jump into the stream at the specified time
- * @note
- * @param    context:        (IN)     Context of the reader
- * @param    pStreamHandler    (IN)     the stream description of the stream to make jump
- * @param    pTime            (IN/OUT) IN:  the time to jump to (in ms)
- *                                     OUT: the time to which the stream really jumped
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- * @return    M4ERR_PARAMETER            at least one parameter is not properly set
- * @return    M4ERR_ALLOC                there is no more memory available
- * @return    M4WAR_INVALID_TIME        the time can not be reached
- ************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_jump(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
-                             M4OSA_Int32* pTime)
-{
-    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
-    M4SYS_StreamID          streamIdArray[2];
-    M4OSA_ERR               err;
-    M4SYS_AccessUnit*       pAu;
-    M4OSA_Time              time64 = (M4OSA_Time)*pTime;
-    M4AMRR_State            State;
-
-    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_AMR_reset: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-         "M4READER_AMR_reset: invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER, "M4READER_3GP_jump: invalid time pointer");
-
-    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
-    {
-        pAu = &pC->m_audioAu;
-    }
-    else
-    {
-        M4OSA_TRACE1_0("M4READER_AMR_jump: passed StreamHandler is not known\n");
-        return M4ERR_PARAMETER;
-    }
-
-    err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
-    if (M4AMRR_kReading_nextAU == State)
-    {
-        err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
-        if (err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_0("M4READER_AMR_jump: error when freeing access unit\n");
-            return err;
-        }
-        pAu->dataAddress = M4OSA_NULL;
-    }
-
-    streamIdArray[0] = pStreamHandler->m_streamId;
-    streamIdArray[1] = 0;
-
-    pAu->CTS = time64;
-    pAu->DTS = time64;
-    err = M4AMRR_seek(pC->m_pCoreContext, streamIdArray, time64, M4SYS_kNoRAPprevious, &time64);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_0("M4READER_AMR_jump: error when calling M4AMRR_seek()\n");
-        return err;
-    }
-
-    *pTime = (M4OSA_Int32)time64;
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief   Gets an access unit (AU) from the stream handler source.
- * @note    An AU is the smallest possible amount of data to be decoded by a decoder (audio/video).
- *          In the current version, we need to translate M4OSA_AccessUnit to M4_AccessUnit
- *
- * @param    context:        (IN)        Context of the reader
- * @param    pStreamHandler  (IN)        The stream handler of the stream to make jump
- * @param    pAccessUnit     (IN/OUT)    Pointer to an access unit to fill with read data (the au
-                                         structure is allocated by the user, and must be
-                                         initialized by calling M4READER_fillAuStruct_fct after
-                                         creation)
- * @return    M4NO_ERROR              there is no error
- * @return    M4ERR_BAD_CONTEXT       provided context is not a valid one
- * @return    M4ERR_PARAMETER         at least one parameter is not properly set
- * @return    M4ERR_ALLOC             memory allocation failed
- * @return    M4ERR_BAD_STREAM_ID     at least one of the stream Id. does not exist.
- * @return    M4WAR_NO_MORE_AU        there are no more access unit in the stream (end of stream)
- ************************************************************************
-*/
-M4OSA_ERR M4READER_AMR_getNextAu(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
-                                M4_AccessUnit *pAccessUnit)
-{
-    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
-    M4OSA_ERR               err = M4NO_ERROR;
-    M4SYS_AccessUnit*       pAu;
-    M4_MediaTime            timeScale;
-    M4AMRR_State            State;
-
-    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
-         "M4READER_AMR_getNextAu: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-         "M4READER_AMR_getNextAu: invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
-         "M4READER_AMR_getNextAu: invalid pointer to M4_AccessUnit");
-
-    /* keep trace of the allocated buffers in AU to be able to free them at destroy()
-       but be aware that system is risky and would need upgrade if more than
-       one video and one audio AU is needed */
-    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
-    {
-        pAu = &pC->m_audioAu;
-    }
-    else
-    {
-        M4OSA_TRACE1_0("M4READER_AMR_getNextAu: passed StreamHandler is not known\n");
-        return M4ERR_PARAMETER;
-    }
-
-    err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
-    if (M4AMRR_kReading_nextAU == State)
-    {
-        err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
-        if (err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_0("M4READER_AVI_getNextAu: error when freeing access unit\n");
-            return err;
-        }
-        pAu->dataAddress = M4OSA_NULL;
-    }
-
-    pAu->nbFrag = 0;
-    err = M4AMRR_nextAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
-
-    if (err == M4NO_ERROR)
-    {
-        timeScale = (M4OSA_Float)(M4OSA_Int32)(intptr_t)(pStreamHandler->m_pUserData)/1000;
-        pAccessUnit->m_dataAddress = (M4OSA_MemAddr8)pAu->dataAddress;
-        pAccessUnit->m_size = pAu->size;
-        pAccessUnit->m_CTS  = (M4_MediaTime)pAu->CTS/*/timeScale*/;
-        pAccessUnit->m_DTS  = (M4_MediaTime)pAu->DTS/*/timeScale*/;
-        pAccessUnit->m_attribute = pAu->attribute;
-    }
-    else
-    {
-        pAccessUnit->m_size=0;
-    }
-
-    return err;
-}
-
-/**
-*************************************************************************
-* @brief Retrieves the generic interfaces implemented by the reader
-*
-* @param pMediaType          : Pointer on a M4READER_MediaType (allocated by the caller)
-*                              that will be filled with the media type supported by this reader
-* @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface implemented
-*                              by this reader. The interface is a structure allocated by the function and must
-*                              be un-allocated by the caller.
-* @param pRdrDataInterface   : Address of a pointer that will be set to the data interface implemented
-*                              by this reader. The interface is a structure allocated by the function and must
-*                              be un-allocated by the caller.
-*
-* @returns : M4NO_ERROR     if OK
-*            ERR_ALLOC      if an allocation failed
-*            ERR_PARAMETER  at least one parameter is not properly set (in DEBUG only)
-*************************************************************************
-*/
-M4OSA_ERR   M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
-                                         M4READER_GlobalInterface **pRdrGlobalInterface,
-                                         M4READER_DataInterface **pRdrDataInterface)
-{
-    M4OSA_DEBUG_IF1((pMediaType == 0),          M4ERR_PARAMETER,
-         "M4READER_AMR_getInterfaces: invalid pointer to MediaType");
-    M4OSA_DEBUG_IF1((pRdrGlobalInterface == 0), M4ERR_PARAMETER,
-         "M4READER_AMR_getInterfaces: invalid pointer to M4READER_GlobalInterface");
-    M4OSA_DEBUG_IF1((pRdrDataInterface == 0),   M4ERR_PARAMETER,
-         "M4READER_AMR_getInterfaces: invalid pointer to M4READER_DataInterface");
-
-    *pRdrGlobalInterface =
-         (M4READER_GlobalInterface*)M4OSA_32bitAlignedMalloc( sizeof(M4READER_GlobalInterface),
-             M4READER_AMR, (M4OSA_Char *)"M4READER_GlobalInterface" );
-    if (M4OSA_NULL == *pRdrGlobalInterface)
-    {
-        *pRdrDataInterface = M4OSA_NULL;
-        return M4ERR_ALLOC;
-    }
-    *pRdrDataInterface = (M4READER_DataInterface*)M4OSA_32bitAlignedMalloc( sizeof(M4READER_DataInterface),
-         M4READER_AMR, (M4OSA_Char *)"M4READER_DataInterface");
-    if (M4OSA_NULL == *pRdrDataInterface)
-    {
-        free(*pRdrGlobalInterface);
-        *pRdrGlobalInterface = M4OSA_NULL;
-        return M4ERR_ALLOC;
-    }
-
-    *pMediaType = M4READER_kMediaTypeAMR;
-
-    (*pRdrGlobalInterface)->m_pFctCreate           = M4READER_AMR_create;
-    (*pRdrGlobalInterface)->m_pFctDestroy          = M4READER_AMR_destroy;
-    (*pRdrGlobalInterface)->m_pFctOpen             = M4READER_AMR_open;
-    (*pRdrGlobalInterface)->m_pFctClose            = M4READER_AMR_close;
-    (*pRdrGlobalInterface)->m_pFctGetOption        = M4READER_AMR_getOption;
-    (*pRdrGlobalInterface)->m_pFctSetOption        = M4READER_AMR_setOption;
-    (*pRdrGlobalInterface)->m_pFctGetNextStream    = M4READER_AMR_getNextStream;
-    (*pRdrGlobalInterface)->m_pFctFillAuStruct     = M4READER_AMR_fillAuStruct;
-    (*pRdrGlobalInterface)->m_pFctStart            = M4OSA_NULL;
-    (*pRdrGlobalInterface)->m_pFctStop             = M4OSA_NULL;
-    (*pRdrGlobalInterface)->m_pFctJump             = M4READER_AMR_jump;
-    (*pRdrGlobalInterface)->m_pFctReset            = M4READER_AMR_reset;
-    (*pRdrGlobalInterface)->m_pFctGetPrevRapTime   = M4OSA_NULL; /*all AUs are RAP*/
-
-    (*pRdrDataInterface)->m_pFctGetNextAu          = M4READER_AMR_getNextAu;
-
-    (*pRdrDataInterface)->m_readerContext = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
diff --git a/libvideoeditor/vss/src/M4READER_Pcm.c b/libvideoeditor/vss/src/M4READER_Pcm.c
deleted file mode 100755
index 392367f..0000000
--- a/libvideoeditor/vss/src/M4READER_Pcm.c
+++ /dev/null
@@ -1,720 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file   M4READER_Wav.c
- * @brief  Generic encapsulation of the core pcm reader
- * @note   This file implements the generic M4READER interface
- *         on top of the PCM reader
- ************************************************************************
-*/
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Debug.h"
-#include "M4OSA_CoreID.h"
-#include "M4TOOL_VersionInfo.h"
-#include "M4PCMR_CoreReader.h"
-#include "M4READER_Pcm.h"
-/**
- ************************************************************************
- * structure    M4READER_WAV_Context
- * @brief       This structure defines the internal context of a wav reader instance
- * @note        The context is allocated and de-allocated by the reader
- ************************************************************************
- */
-typedef struct _M4READER_PCM_Context
-{
-    M4OSA_Context           m_coreContext;        /**< core wav reader context */
-    M4_StreamHandler*       m_pAudioStream;       /**< pointer on the audio stream description
-                                                        returned by the core */
-    M4SYS_AccessUnit        m_audioAu;            /**< audio access unit to be filled by the core */
-    M4OSA_FileReadPointer*  m_pOsaFileReaderFcts; /**< OSAL file read functions */
-
-} M4READER_PCM_Context;
-
-
-/**
- ************************************************************************
- * @brief   Creates a wav reader instance
- * @note    allocates the context
- * @param   pContext:            (OUT)  Pointer to a wav reader context
- * @return  M4NO_ERROR:                 there is no error
- * @return  M4ERR_ALLOC:                a memory allocation has failed
- * @return  M4ERR_PARAMETER:            at least one parameter is not properly set (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_create(M4OSA_Context* pContext)
-{
-    M4READER_PCM_Context*   pReaderContext;
-
-    M4OSA_DEBUG_IF1((pContext == 0),       M4ERR_PARAMETER,
-         "M4READER_PCM_create: invalid context pointer");
-
-    pReaderContext = (M4READER_PCM_Context*)M4OSA_32bitAlignedMalloc(sizeof(M4READER_PCM_Context),
-         M4READER_WAV, (M4OSA_Char *)"M4READER_PCM_Context");
-    if (pReaderContext == M4OSA_NULL)
-    {
-        return M4ERR_ALLOC;
-    }
-
-    pReaderContext->m_coreContext         = M4OSA_NULL;
-    pReaderContext->m_pAudioStream        = M4OSA_NULL;
-    pReaderContext->m_audioAu.dataAddress = M4OSA_NULL;
-    pReaderContext->m_pOsaFileReaderFcts  = M4OSA_NULL;
-
-    *pContext = pReaderContext;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * @brief   Destroy the instance of the reader
- * @note    the context is un-allocated
- * @param   context:         (IN) context of the network reader
- * @return  M4NO_ERROR:           there is no error
- * @return  M4ERR_PARAMETER:      at least one parameter is not properly set (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_destroy(M4OSA_Context context)
-{
-    M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
-
-    /* Check function parameters */
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-         "M4READER_PCM_destroy: invalid context pointer");
-
-    free(pC);
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * @brief   Initializes the reader instance
- * @param   context:           (IN)    context of the network reader
- * @param   pFileDescriptor:   (IN)    Pointer to proprietary data identifying the media to open
- * @return  M4NO_ERROR:                there is no error
- * @return  M4ERR_PARAMETER:           at least one parameter is not properly set (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_open(M4OSA_Context context, M4OSA_Void* pFileDescriptor)
-{
-    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
-    M4OSA_ERR               err;
-
-    /* Check function parameters */
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-         "M4READER_PCM_open: invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor),   M4ERR_PARAMETER,
-         "M4READER_PCM_open: invalid pointer pFileDescriptor");
-
-    err = M4PCMR_openRead(&(pC->m_coreContext), (M4OSA_Char*)pFileDescriptor,
-         pC->m_pOsaFileReaderFcts);
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief     close the reader
- * @note
- * @param     context:        (IN)    Context of the reader
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            the context is NULL
- * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_close(M4OSA_Context context)
-{
-    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
-    M4OSA_ERR               err;
-
-    /* Check function parameters */
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-         "M4READER_PCM_close: invalid context pointer");
-
-    /* Free audio AU and audio stream */
-    if (M4OSA_NULL != pC->m_pAudioStream)
-    {
-        if (M4OSA_NULL != pC->m_audioAu.dataAddress)
-        {
-            err = M4PCMR_freeAU(pC->m_coreContext, pC->m_pAudioStream->m_streamId,
-                 &pC->m_audioAu);
-            if (err != M4NO_ERROR)
-            {
-                M4OSA_TRACE1_0("M4READER_PCM_close: Error when freeing audio access unit");
-                return err;
-            }
-        }
-        free(pC->m_pAudioStream);
-        pC->m_pAudioStream = M4OSA_NULL;
-    }
-
-
-    if (M4OSA_NULL != pC->m_coreContext)
-    {
-        /* Close tha PCM file */
-       err = M4PCMR_closeRead(pC->m_coreContext);
-       pC->m_coreContext = M4OSA_NULL;
-    }
-
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief   set en option value of the reader
- * @note    this function follows the set/get option mechanism described in OSAL 3.0
- *          it allows the caller to set a property value:
- * @param    context:        (IN)    Context of the reader
- * @param    optionId:       (IN)    indicates the option to set
- * @param    pValue:         (IN)    pointer to structure or value (allocated by user)
- *                                    where option is stored
- *
- * @return    M4NO_ERROR             there is no error
- * @return    M4ERR_BAD_CONTEXT      provided context is not a valid one
- * @return    M4ERR_PARAMETER        at least one parameter is not properly set
- * @return    M4ERR_BAD_OPTION_ID    when the option ID is not a valid one
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_setOption(M4OSA_Context context, M4OSA_OptionID optionId, void* pValue)
-{
-    M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    /* Check function parameters */
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),     M4ERR_PARAMETER,
-         "M4READER_PCM_setOption: invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
-         "M4READER_PCM_setOption: invalid value pointer");
-
-    switch(optionId)
-    {
-    case M4READER_kOptionID_SetOsaFileReaderFctsPtr :
-        {
-            pC->m_pOsaFileReaderFcts = (M4OSA_FileReadPointer*)pValue;
-        }
-        break;
-    default :
-        {
-            err = M4ERR_PARAMETER;
-        }
-    }
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief   Retrieves the an option value from the reader, given an option ID.
- * @note    this function follows the set/get option mechanism described in OSAL 3.0
- *          it allows the caller to retrieve a property value:
- *
- * @param   context:  (IN) context of the network reader
- * @param   optionId: (IN) option identificator whose option value is to be retrieved.
- * @param   pValue:  (OUT) option value retrieved.
- *
- * @return  M4NO_ERROR:          there is no error
- * @return  M4ERR_PARAMETER:     at least one parameter is not properly set (in DEBUG only)
- * @return  M4ERR_BAD_OPTION_ID: the required option identificator is unknown
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_getOption(M4OSA_Context context, M4OSA_OptionID optionId, void* pValue)
-{
-    M4READER_PCM_Context*   pContext = (M4READER_PCM_Context*)context;
-    M4OSA_ERR               err      = M4NO_ERROR;
-
-    /* no check of context at this level because some option does not need it */
-    M4OSA_DEBUG_IF1((pValue == 0), M4ERR_PARAMETER,
-         "M4READER_PCM_getOption: invalid pointer on value");
-
-    switch (optionId)
-    {
-    case M4READER_kOptionID_Duration:
-        *((M4OSA_UInt32*)pValue) = pContext->m_pAudioStream->m_duration;
-        break;
-
-    case M4READER_kOptionID_Version:
-        err = M4PCMR_getVersion((M4_VersionInfo*)pValue);
-        break;
-
-    case M4READER_kOptionID_Copyright:
-        return M4ERR_NOT_IMPLEMENTED;
-        break;
-
-    case M4READER_kOptionID_Bitrate:
-        {
-            M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
-            if (M4OSA_NULL != pContext->m_pAudioStream)
-            {
-                *pBitrate = pContext->m_pAudioStream->m_averageBitRate;
-            }
-            else
-            {
-                pBitrate = 0;
-                err = M4ERR_PARAMETER;
-            }
-        }
-        break;
-
-    default:
-        err = M4ERR_BAD_OPTION_ID;
-        M4OSA_TRACE1_0("M4READER_PCM_getOption: unsupported optionId");
-        break;
-    }
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief   Get the next stream found in the media
- * @note
- *
- * @param   context:        (IN)  context of the network reader
- * @param   pMediaFamily:   (OUT) pointer to a user allocated M4READER_MediaFamily that will
- *                                be filled
- * @param   pStreamHandler: (OUT) pointer to a stream handler that will be allocated and filled
- *                                with the found stream description
- *
- * @return  M4NO_ERROR:       there is no error.
- * @return  M4ERR_PARAMETER:  at least one parameter is not properly set (in DEBUG only)
- * @return  M4WAR_NO_MORE_STREAM    no more available stream in the media (all streams found)
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_getNextStream(M4OSA_Context context, M4READER_MediaFamily *pMediaFamily,
-                                     M4_StreamHandler **pStreamHandler)
-{
-    M4READER_PCM_Context*   pC=(M4READER_PCM_Context*)context;
-    M4OSA_ERR               err;
-/*    M4_StreamHandler*       pStreamHandler = M4OSA_NULL;*/
-    M4SYS_StreamDescription streamDesc;
-    M4_AudioStreamHandler*  pAudioStreamHandler;
-    M4OSA_Double            fDuration;
-    M4SYS_StreamID          streamIdArray[2];
-    M4PCMC_DecoderSpecificInfo* pDsi;
-
-    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
-         "M4READER_PCM_getNextStream: invalid context");
-    M4OSA_DEBUG_IF1((pMediaFamily == 0),   M4ERR_PARAMETER,
-         "M4READER_PCM_getNextStream: invalid pointer to MediaFamily");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-         "M4READER_PCM_getNextStream: invalid pointer to StreamHandler");
-
-    err = M4PCMR_getNextStream( pC->m_coreContext, &streamDesc);
-    if (err == M4WAR_NO_MORE_STREAM)
-    {
-        streamIdArray[0] = 0;
-        streamIdArray[1] = 0;
-        err = M4PCMR_startReading(pC->m_coreContext, streamIdArray); /*to put in open function*/
-
-        return M4WAR_NO_MORE_STREAM;
-    }
-    else if (M4NO_ERROR != err)
-    {
-        return err; /*also return M4WAR_NO_MORE_STREAM*/
-    }
-
-    switch (streamDesc.streamType)
-    {
-        case M4SYS_kAudioUnknown:
-        case M4SYS_kPCM_16bitsS:
-        case M4SYS_kPCM_16bitsU:
-        case M4SYS_kPCM_8bitsU:
-            *pMediaFamily = M4READER_kMediaFamilyAudio;
-            M4OSA_TRACE2_0("M4READER_PCM_getNextStream: found audio stream");
-            break;
-        default:
-            *pMediaFamily = M4READER_kMediaFamilyUnknown;
-            M4OSA_TRACE2_0("M4READER_PCM_getNextStream: found UNKNOWN stream");
-            return M4NO_ERROR;
-    }
-
-    pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_32bitAlignedMalloc(sizeof(M4_AudioStreamHandler),
-         M4READER_WAV, (M4OSA_Char *)"M4_AudioStreamHandler");
-    if (pAudioStreamHandler == M4OSA_NULL)
-    {
-        return M4ERR_ALLOC;
-    }
-    pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
-    pC->m_pAudioStream = (M4_StreamHandler*)(pAudioStreamHandler);
-
-    pDsi = (M4PCMC_DecoderSpecificInfo*)(streamDesc.decoderSpecificInfo);
-    M4OSA_DEBUG_IF1((pDsi == 0), M4ERR_PARAMETER,
-         "M4READER_PCM_getNextStream: invalid decoder specific info in stream");
-
-    pAudioStreamHandler->m_samplingFrequency = pDsi->SampleFrequency;
-    pAudioStreamHandler->m_byteSampleSize    = (M4OSA_UInt32)(pDsi->BitsPerSample/8);
-    /* m_byteFrameLength is badly named: it is not in bytes but in samples number */
-    if(pAudioStreamHandler->m_samplingFrequency == 8000)
-    {
-        /* AMR case */
-        pAudioStreamHandler->m_byteFrameLength   =
-             (((streamDesc.averageBitrate/8)/50)/pDsi->nbChannels)\
-                /pAudioStreamHandler->m_byteSampleSize;/*/50 to get around 20 ms of audio*/
-    }
-    else
-    {
-        /* AAC Case */
-        pAudioStreamHandler->m_byteFrameLength =
-             (M4OSA_UInt32)(((streamDesc.averageBitrate/8)/15.625)/pDsi->nbChannels)\
-                /pAudioStreamHandler->m_byteSampleSize;
-    }
-
-    pAudioStreamHandler->m_nbChannels        = pDsi->nbChannels;
-
-    M4OSA_TIME_TO_MS( fDuration, streamDesc.duration, streamDesc.timeScale);
-    pC->m_pAudioStream->m_duration                = (M4OSA_Int32)fDuration;
-    pC->m_pAudioStream->m_pDecoderSpecificInfo    = (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
-    pC->m_pAudioStream->m_decoderSpecificInfoSize = streamDesc.decoderSpecificInfoSize;
-    pC->m_pAudioStream->m_streamId                = streamDesc.streamID;
-    pC->m_pAudioStream->m_pUserData               =
-        (void*)(intptr_t)streamDesc.timeScale; /*trick to change*/
-    pC->m_pAudioStream->m_averageBitRate          = streamDesc.averageBitrate;
-    pC->m_pAudioStream->m_maxAUSize               =
-         pAudioStreamHandler->m_byteFrameLength*pAudioStreamHandler->m_byteSampleSize\
-            *pAudioStreamHandler->m_nbChannels;
-    pC->m_pAudioStream->m_streamType              = M4DA_StreamTypeAudioPcm;
-
-    *pStreamHandler = pC->m_pAudioStream;
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief   fill the access unit structure with initialization values
- * @note
- *
- * @param   context:        (IN) context of the network reader
- * @param   pStreamHandler: (IN) pointer to the stream handler to which the access unit will
- *                                 be associated
- * @param   pAccessUnit:    (IN) pointer to the access unit(allocated by the caller) to initialize
- * @return  M4NO_ERROR:       there is no error.
- * @return  M4ERR_PARAMETER:  at least one parameter is not properly set (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
-                                     M4_AccessUnit *pAccessUnit)
-{
-    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
-    M4SYS_AccessUnit*       pAu;
-
-    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
-         "M4READER_PCM_fillAuStruct: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-         "M4READER_PCM_fillAuStruct: invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
-         "M4READER_PCM_fillAuStruct: invalid pointer to M4_AccessUnit");
-
-    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
-    {
-        pAu = &pC->m_audioAu;
-    }
-    else
-    {
-        M4OSA_TRACE1_0("M4READER_PCM_fillAuStruct: passed StreamHandler is not known");
-        return M4ERR_PARAMETER;
-    }
-
-    pAu->dataAddress = M4OSA_NULL;
-    pAu->size        = 0;
-    pAu->CTS         = 0;
-    pAu->DTS         = 0;
-    pAu->attribute   = 0;
-    pAu->nbFrag      = 0;
-
-    pAccessUnit->m_size         = 0;
-    pAccessUnit->m_CTS          = 0;
-    pAccessUnit->m_DTS          = 0;
-    pAccessUnit->m_attribute    = 0;
-    pAccessUnit->m_dataAddress  = M4OSA_NULL;/*pBuffer;*/
-    pAccessUnit->m_maxsize      = pStreamHandler->m_maxAUSize;
-    pAccessUnit->m_streamID     = pStreamHandler->m_streamId;
-    pAccessUnit->m_structSize   = sizeof(M4_AccessUnit);
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * @brief   reset the stream, that is: seek it to beginning and make it ready to be read
- * @note
- * @param   context:        (IN) context of the network reader
- * @param   pStreamHandler: (IN) The stream handler of the stream to reset
- * @return  M4NO_ERROR: there is no error.
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_reset(M4OSA_Context context, M4_StreamHandler *pStreamHandler)
-{
-    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
-    M4SYS_StreamID          streamIdArray[2];
-    M4OSA_ERR               err;
-    M4SYS_AccessUnit*       pAu;
-    M4OSA_Time                time64 = 0;
-
-    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_PCM_reset: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-         "M4READER_PCM_reset: invalid pointer to M4_StreamHandler");
-
-    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
-    {
-        pAu = &pC->m_audioAu;
-    }
-    else
-    {
-        M4OSA_TRACE1_0("M4READER_PCM_reset: passed StreamHandler is not known");
-        return M4ERR_PARAMETER;
-    }
-
-    if (pAu->dataAddress != M4OSA_NULL)
-    {
-        err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
-        if (err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_0("M4READER_PCM_reset: error when freeing access unit");
-            return err;
-        }
-        pAu->dataAddress = M4OSA_NULL;
-    }
-
-    streamIdArray[0] = pStreamHandler->m_streamId;
-    streamIdArray[1] = 0;
-
-    pAu->CTS = 0;
-    pAu->DTS = 0;
-
-    /* This call is needed only when replay during playback */
-    err = M4PCMR_seek(pC->m_coreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
-
-    return err;
-}
-
-/**
- ************************************************************************
- * @brief   Get the next access unit of the specified stream
- * @note
- * @param   context:        (IN)        Context of the reader
- * @param   pStreamHandler  (IN)        The stream handler of the stream to make jump
- * @param   pAccessUnit     (IN/OUT)    Pointer to an access unit to fill with read data
- *                                      (the au structure is allocated by the user, and must be
- *                                        initialized
- *                                      by calling M4READER_fillAuStruct_fct after creation)
- * @return  M4NO_ERROR                  there is no error
- * @return  M4ERR_BAD_CONTEXT           provided context is not a valid one
- * @return  M4ERR_PARAMETER             at least one parameter is not properly set
- * @returns M4ERR_ALLOC                 memory allocation failed
- * @returns M4ERR_BAD_STREAM_ID         at least one of the stream Id. does not exist.
- * @returns M4WAR_NO_DATA_YET           there is no enough data on the stream for new access unit
- * @returns M4WAR_NO_MORE_AU            there are no more access unit in the stream (end of stream)
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_getNextAu(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
-                                 M4_AccessUnit *pAccessUnit)
-{
-    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
-    M4OSA_ERR               err = M4NO_ERROR;
-    M4SYS_AccessUnit*       pAu;
-
-    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
-         "M4READER_PCM_getNextAu: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-         "M4READER_PCM_getNextAu: invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
-         "M4READER_PCM_getNextAu: invalid pointer to M4_AccessUnit");
-
-    /* keep trace of the allocated buffers in AU to be able to free them at destroy()
-       but be aware that system is risky and would need upgrade if more than
-       one video and one audio AU is needed */
-    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
-    {
-        pAu = &pC->m_audioAu;
-    }
-    else
-    {
-        M4OSA_TRACE1_0("M4READER_PCM_getNextAu: passed StreamHandler is not known");
-        return M4ERR_PARAMETER;
-    }
-
-    if (pAu->dataAddress != M4OSA_NULL)
-    {
-        err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
-        if (err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_0("M4READER_PCM_getNextAu: error when freeing access unit");
-            return err;
-        }
-    }
-
-    pAu->nbFrag = 0;
-    err = M4PCMR_nextAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
-
-    if (err == M4NO_ERROR)
-    {
-        pAccessUnit->m_dataAddress = (M4OSA_MemAddr8)pAu->dataAddress;
-        pAccessUnit->m_size = pAu->size;
-        pAccessUnit->m_CTS  = (M4OSA_Double)pAu->CTS;
-        pAccessUnit->m_DTS  = (M4OSA_Double)pAu->DTS;
-        pAccessUnit->m_attribute = pAu->attribute;
-    }
-    else
-    {
-        pAccessUnit->m_size=0;
-    }
-
-    return err;
-}
-
-
-/**
- ************************************************************************
- * @brief   jump into the stream at the specified time
- * @note
- * @param   context:        (IN)     Context of the reader
- * @param   pStreamHandler  (IN)     the stream handler of the stream to make jump
- * @param   pTime           (IN/OUT) IN:  the time to jump to (in ms)
- *                                   OUT: the time to which the stream really jumped
- *                                        But in this reader, we do not modify the time
- * @return  M4NO_ERROR              there is no error
- * @return  M4ERR_BAD_CONTEXT       provided context is not a valid one
- * @return  M4ERR_PARAMETER         at least one parameter is not properly set
- * @return  M4ERR_ALLOC             there is no more memory available
- * @return  M4ERR_BAD_STREAM_ID     the streamID does not exist
- ************************************************************************
- */
-M4OSA_ERR M4READER_PCM_jump(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
-     M4OSA_Int32* pTime)
-{
-    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
-    M4SYS_StreamID          streamIdArray[2];
-    M4OSA_ERR               err;
-    M4SYS_AccessUnit*       pAu;
-    M4OSA_Time                time64;
-
-    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_PCM_jump: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-         "M4READER_PCM_jump: invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER, "M4READER_PCM_jump: invalid time pointer");
-
-    time64 = (M4OSA_Time)*pTime;
-
-    if (pStreamHandler == pC->m_pAudioStream)
-    {
-        pAu = &pC->m_audioAu;
-    }
-    else
-    {
-        M4OSA_TRACE1_0("M4READER_PCM_jump: passed StreamHandler is not known");
-        return M4ERR_PARAMETER;
-    }
-
-    if (pAu->dataAddress != M4OSA_NULL)
-    {
-        err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
-        if (err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_0("M4READER_PCM_jump: Error when freeing access unit");
-            return err;
-        }
-        pAu->dataAddress = M4OSA_NULL;
-    }
-
-    streamIdArray[0] = pStreamHandler->m_streamId;
-    streamIdArray[1] = 0;
-
-    pAu->CTS = time64;
-    pAu->DTS = time64;
-
-    err = M4PCMR_seek(pC->m_coreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
-
-    *pTime = (M4OSA_Int32)time64;
-
-    return err;
-}
-
-/**
- *************************************************************************
- * @brief Retrieves the generic interfaces implemented by the reader
- *
- * @param pMediaType          : Pointer on a M4READER_MediaType (allocated by the caller)
- *                              that will be filled with the media type supported by this reader
- * @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface
- *                              implemented by this reader. The interface is a structure allocated
- *                              by the function and must be un-allocated by the caller.
- * @param pRdrDataInterface   : Address of a pointer that will be set to the data interface
- *                              implemented by this reader. The interface is a structure allocated
- *                              by the function and must be un-allocated by the caller.
- *
- * @returns : M4NO_ERROR     if OK
- *            ERR_ALLOC      if an allocation failed
- *            ERR_PARAMETER  at least one parameter is not properly set (in DEBUG only)
- *************************************************************************
- */
-M4OSA_ERR   M4READER_PCM_getInterfaces(M4READER_MediaType *pMediaType,
-                                       M4READER_GlobalInterface **pRdrGlobalInterface,
-                                       M4READER_DataInterface **pRdrDataInterface)
-/************************************************************************/
-{
-    M4OSA_DEBUG_IF1((pMediaType == 0),          M4ERR_PARAMETER,
-         "M4READER_PCM_getInterfaces: invalid pointer to MediaType passed");
-    M4OSA_DEBUG_IF1((pRdrGlobalInterface == 0), M4ERR_PARAMETER,
-         "M4READER_PCM_getInterfaces: invalid pointer to M4READER_GlobalInterface");
-    M4OSA_DEBUG_IF1((pRdrDataInterface == 0),   M4ERR_PARAMETER,
-         "M4READER_PCM_getInterfaces: invalid pointer to M4READER_DataInterface");
-
-    *pRdrGlobalInterface =
-         (M4READER_GlobalInterface*)M4OSA_32bitAlignedMalloc( sizeof(M4READER_GlobalInterface), M4READER_WAV,
-             (M4OSA_Char *)"M4READER_PCM GlobalInterface");
-    if (M4OSA_NULL == *pRdrGlobalInterface)
-    {
-        return M4ERR_ALLOC;
-    }
-    *pRdrDataInterface =
-         (M4READER_DataInterface*)M4OSA_32bitAlignedMalloc( sizeof(M4READER_DataInterface), M4READER_WAV,
-            (M4OSA_Char *) "M4READER_PCM DataInterface");
-    if (M4OSA_NULL == *pRdrDataInterface)
-    {
-        free(*pRdrGlobalInterface);
-        return M4ERR_ALLOC;
-    }
-
-    *pMediaType = M4READER_kMediaTypePCM;
-
-    (*pRdrGlobalInterface)->m_pFctCreate           = M4READER_PCM_create;
-    (*pRdrGlobalInterface)->m_pFctDestroy          = M4READER_PCM_destroy;
-    (*pRdrGlobalInterface)->m_pFctOpen             = M4READER_PCM_open;
-    (*pRdrGlobalInterface)->m_pFctClose            = M4READER_PCM_close;
-    (*pRdrGlobalInterface)->m_pFctStart            = M4OSA_NULL;
-    (*pRdrGlobalInterface)->m_pFctStop             = M4OSA_NULL;
-    (*pRdrGlobalInterface)->m_pFctGetOption        = M4READER_PCM_getOption;
-    (*pRdrGlobalInterface)->m_pFctSetOption        = M4READER_PCM_setOption;
-    (*pRdrGlobalInterface)->m_pFctGetNextStream    = M4READER_PCM_getNextStream;
-    (*pRdrGlobalInterface)->m_pFctFillAuStruct     = M4READER_PCM_fillAuStruct;
-    (*pRdrGlobalInterface)->m_pFctJump             = M4READER_PCM_jump;
-    (*pRdrGlobalInterface)->m_pFctReset            = M4READER_PCM_reset;
-    (*pRdrGlobalInterface)->m_pFctGetPrevRapTime   = M4OSA_NULL; /*all AUs are RAP*/
-
-    (*pRdrDataInterface)->m_pFctGetNextAu          = M4READER_PCM_getNextAu;
-
-    (*pRdrDataInterface)->m_readerContext = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-
diff --git a/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c b/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c
deleted file mode 100755
index fb83952..0000000
--- a/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c
+++ /dev/null
@@ -1,703 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <inttypes.h>
-
-#include "utils/Log.h"
-#include "M4OSA_Types.h"
-#include "M4OSA_Debug.h"
-
-#include "M4VD_EXTERNAL_Interface.h"
-#include "M4VD_Tools.h"
-#include "M4_VideoEditingCommon.h"
-#include "OMX_Video.h"
-/**
- ************************************************************************
- * @file   M4VD_EXTERNAL_BitstreamParser.c
- * @brief
- * @note   This file implements external Bitstream parser
- ************************************************************************
- */
-
-typedef struct {
-    M4OSA_UInt8 code;
-    M4OSA_Int32 profile;
-    M4OSA_Int32 level;
-} codeProfileLevel;
-
-static codeProfileLevel mpeg4ProfileLevelTable[] = {
-    {0x01, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level1},
-    {0x02, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level2},
-    {0x03, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level3},
-    {0x04, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level4a},
-    {0x05, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level5},
-    {0x08, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level0},
-    {0x11, OMX_VIDEO_MPEG4ProfileSimpleScalable,OMX_VIDEO_MPEG4Level1},
-    {0x12, OMX_VIDEO_MPEG4ProfileSimpleScalable,OMX_VIDEO_MPEG4Level2},
-    {0x21, OMX_VIDEO_MPEG4ProfileCore, OMX_VIDEO_MPEG4Level1},
-    {0x22, OMX_VIDEO_MPEG4ProfileCore, OMX_VIDEO_MPEG4Level2},
-    {0x32, OMX_VIDEO_MPEG4ProfileMain, OMX_VIDEO_MPEG4Level2},
-    {0x33, OMX_VIDEO_MPEG4ProfileMain, OMX_VIDEO_MPEG4Level3},
-    {0x34, OMX_VIDEO_MPEG4ProfileMain, OMX_VIDEO_MPEG4Level4},
-    {0x42, OMX_VIDEO_MPEG4ProfileNbit, OMX_VIDEO_MPEG4Level2},
-    {0x51, OMX_VIDEO_MPEG4ProfileScalableTexture, OMX_VIDEO_MPEG4Level1},
-    {0x61, OMX_VIDEO_MPEG4ProfileSimpleFace, OMX_VIDEO_MPEG4Level1},
-    {0x62, OMX_VIDEO_MPEG4ProfileSimpleFace, OMX_VIDEO_MPEG4Level2},
-    {0x71, OMX_VIDEO_MPEG4ProfileBasicAnimated, OMX_VIDEO_MPEG4Level1},
-    {0x72, OMX_VIDEO_MPEG4ProfileBasicAnimated, OMX_VIDEO_MPEG4Level2},
-    {0x81, OMX_VIDEO_MPEG4ProfileHybrid, OMX_VIDEO_MPEG4Level1},
-    {0x82, OMX_VIDEO_MPEG4ProfileHybrid, OMX_VIDEO_MPEG4Level2},
-    {0x91, OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level1},
-    {0x92, OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level2},
-    {0x93, OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level3},
-    {0x94, OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level4},
-    {0xa1, OMX_VIDEO_MPEG4ProfileCoreScalable, OMX_VIDEO_MPEG4Level1},
-    {0xa2, OMX_VIDEO_MPEG4ProfileCoreScalable, OMX_VIDEO_MPEG4Level2},
-    {0xa3, OMX_VIDEO_MPEG4ProfileCoreScalable, OMX_VIDEO_MPEG4Level3},
-    {0xb1, OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level1},
-    {0xb2, OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level2},
-    {0xb3, OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level3},
-    {0xb4, OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level4},
-    {0xc1, OMX_VIDEO_MPEG4ProfileAdvancedCore, OMX_VIDEO_MPEG4Level1},
-    {0xc2, OMX_VIDEO_MPEG4ProfileAdvancedCore, OMX_VIDEO_MPEG4Level2},
-    {0xd1, OMX_VIDEO_MPEG4ProfileAdvancedScalable, OMX_VIDEO_MPEG4Level1},
-    {0xd2, OMX_VIDEO_MPEG4ProfileAdvancedScalable, OMX_VIDEO_MPEG4Level2},
-    {0xd3, OMX_VIDEO_MPEG4ProfileAdvancedScalable, OMX_VIDEO_MPEG4Level3},
-    {0xf0, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level0},
-    {0xf1, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level1},
-    {0xf2, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level2},
-    {0xf3, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level3},
-    {0xf4, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level4},
-    {0xf5, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level5}
-};
-
-M4OSA_UInt32 M4VD_EXTERNAL_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
-     M4OSA_UInt32 nb_bits)
-{
-        return(M4VD_Tools_GetBitsFromMemory(parsingCtxt,nb_bits));
-}
-
-M4OSA_ERR M4VD_EXTERNAL_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
-                                                 M4OSA_MemAddr32 dest_bits,
-                                                 M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits)
-{
-        return (M4VD_Tools_WriteBitsToMemory( bitsToWrite,dest_bits,
-                                                offset,  nb_bits));
-}
-
-M4OSA_ERR M4DECODER_EXTERNAL_ParseVideoDSI(M4OSA_UInt8* pVol, M4OSA_Int32 aVolSize,
-                                             M4DECODER_MPEG4_DecoderConfigInfo* pDci,
-                                             M4DECODER_VideoSize* pVideoSize)
-{
-    M4VS_Bitstream_ctxt parsingCtxt;
-    M4OSA_UInt32 code, j;
-    M4OSA_MemAddr8 start;
-    M4OSA_UInt8 i;
-    M4OSA_UInt32 time_incr_length;
-    M4OSA_UInt8 vol_verid=0, b_hierarchy_type;
-
-    /* Parsing variables */
-    M4OSA_UInt8 video_object_layer_shape = 0;
-    M4OSA_UInt8 sprite_enable = 0;
-    M4OSA_UInt8 reduced_resolution_vop_enable = 0;
-    M4OSA_UInt8 scalability = 0;
-    M4OSA_UInt8 enhancement_type = 0;
-    M4OSA_UInt8 complexity_estimation_disable = 0;
-    M4OSA_UInt8 interlaced = 0;
-    M4OSA_UInt8 sprite_warping_points = 0;
-    M4OSA_UInt8 sprite_brightness_change = 0;
-    M4OSA_UInt8 quant_precision = 0;
-
-    /* Fill the structure with default parameters */
-    pVideoSize->m_uiWidth              = 0;
-    pVideoSize->m_uiHeight             = 0;
-
-    pDci->uiTimeScale          = 0;
-    pDci->uiProfile            = 0;
-    pDci->uiUseOfResynchMarker = 0;
-    pDci->bDataPartition       = M4OSA_FALSE;
-    pDci->bUseOfRVLC           = M4OSA_FALSE;
-
-    /* Reset the bitstream context */
-    parsingCtxt.stream_byte = 0;
-    parsingCtxt.stream_index = 8;
-    parsingCtxt.in = (M4OSA_Int8 *)pVol;
-
-    start = (M4OSA_Int8 *)pVol;
-
-    /* Start parsing */
-    while (parsingCtxt.in - start < aVolSize)
-    {
-        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
-        if (code == 0)
-        {
-            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
-            if (code == 0)
-            {
-                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
-                if (code == 1)
-                {
-                    /* start code found */
-                    code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
-
-                    /* ----- 0x20..0x2F : video_object_layer_start_code ----- */
-
-                    if ((code > 0x1F) && (code < 0x30))
-                    {
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 1);/* random accessible vol */
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 8);/* video object type indication */
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 1);/* is object layer identifier */
-                        if (code == 1)
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     4); /* video object layer verid */
-                            vol_verid = (M4OSA_UInt8)code;
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     3); /* video object layer priority */
-                        }
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 4);/* aspect ratio */
-                        if (code == 15)
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     16); /* par_width and par_height (8+8) */
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 1);/* vol control parameters */
-                        if (code == 1)
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     3);/* chroma format + low delay (3+1) */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     1);/* vbv parameters */
-                            if (code == 1)
-                            {
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         32);/* first and latter half bitrate + 2 marker bits
-                                            (15 + 1 + 15 + 1) */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         31);/* first and latter half vbv buffer size + first
-                                          half vbv occupancy + marker bits (15+1+3+11+1)*/
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         16);/* first half vbv occupancy + marker bits (15+1)*/
-                            }
-                        }
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 2); /* video object layer shape */
-                        /* Need to save it for vop parsing */
-                        video_object_layer_shape = (M4OSA_UInt8)code;
-
-                        if (code != 0) return 0; /* only rectangular case supported */
-
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 1); /* Marker bit */
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 16); /* VOP time increment resolution */
-                        pDci->uiTimeScale = code;
-
-                        /* Computes time increment length */
-                        j    = code - 1;
-                        for (i = 0; (i < 32) && (j != 0); j >>=1)
-                        {
-                            i++;
-                        }
-                        time_incr_length = (i == 0) ? 1 : i;
-
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 1);/* Marker bit */
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 1);/* Fixed VOP rate */
-                        if (code == 1)
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     time_incr_length);/* Fixed VOP time increment */
-                        }
-
-                        if(video_object_layer_shape != 1) /* 1 = Binary */
-                        {
-                            if(video_object_layer_shape == 0) /* 0 = rectangular */
-                            {
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         1);/* Marker bit */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         13);/* Width */
-                                pVideoSize->m_uiWidth = code;
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         1);/* Marker bit */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         13);/* Height */
-                                pVideoSize->m_uiHeight = code;
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         1);/* Marker bit */
-                            }
-                        }
-
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 1);/* interlaced */
-                        interlaced = (M4OSA_UInt8)code;
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                 1);/* OBMC disable */
-
-                        if(vol_verid == 1)
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     1);/* sprite enable */
-                            sprite_enable = (M4OSA_UInt8)code;
-                        }
-                        else
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     2);/* sprite enable */
-                            sprite_enable = (M4OSA_UInt8)code;
-                        }
-                        if ((sprite_enable == 1) || (sprite_enable == 2))
-                        /* Sprite static = 1 and Sprite GMC = 2 */
-                        {
-                            if (sprite_enable != 2)
-                            {
-
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         13);/* sprite width */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         1);/* Marker bit */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         13);/* sprite height */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         1);/* Marker bit */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         13);/* sprite l coordinate */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         1);/* Marker bit */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         13);/* sprite top coordinate */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         1);/* Marker bit */
-                            }
-
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     6);/* sprite warping points */
-                            sprite_warping_points = (M4OSA_UInt8)code;
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     2);/* sprite warping accuracy */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     1);/* sprite brightness change */
-                            sprite_brightness_change = (M4OSA_UInt8)code;
-                            if (sprite_enable != 2)
-                            {
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                             1);/* low latency sprite enable */
-                            }
-                        }
-                        if ((vol_verid != 1) && (video_object_layer_shape != 0))
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         1);/* sadct disable */
-                        }
-
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1); /* not 8 bits */
-                        if (code)
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     4);/* quant precision */
-                            quant_precision = (M4OSA_UInt8)code;
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         4);/* bits per pixel */
-                        }
-
-                        /* greyscale not supported */
-                        if(video_object_layer_shape == 3)
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     3); /* nogray quant update + composition method +
-                                            linear composition */
-                        }
-
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     1);/* quant type */
-                        if (code)
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         1);/* load intra quant mat */
-                            if (code)
-                            {
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);/* */
-                                 i    = 1;
-                                while (i < 64)
-                                {
-                                    code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
-                                    if (code == 0)
-                                        break;
-                                    i++;
-                                }
-                            }
-
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                         1);/* load non intra quant mat */
-                            if (code)
-                            {
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);/* */
-                                 i    = 1;
-                                while (i < 64)
-                                {
-                                    code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
-                                    if (code == 0)
-                                        break;
-                                    i++;
-                                }
-                            }
-                        }
-
-                        if (vol_verid != 1)
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     1);/* quarter sample */
-                        }
-
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     1);/* complexity estimation disable */
-                        complexity_estimation_disable = (M4OSA_UInt8)code;
-                        if (!code)
-                        {
-                            //return M4ERR_NOT_IMPLEMENTED;
-                        }
-
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     1);/* resync marker disable */
-                        pDci->uiUseOfResynchMarker = (code) ? 0 : 1;
-
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
-                                     1);/* data partitionned */
-                        pDci->bDataPartition = (code) ? M4OSA_TRUE : M4OSA_FALSE;
-                        if (code)
-                        {
-                            /* reversible VLC */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
-                            pDci->bUseOfRVLC = (code) ? M4OSA_TRUE : M4OSA_FALSE;
-                        }
-
-                        if (vol_verid != 1)
-                        {
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);/* newpred */
-                            if (code)
-                            {
-                                //return M4ERR_PARAMETER;
-                            }
-                            /* reduced resolution vop enable */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
-                            reduced_resolution_vop_enable = (M4OSA_UInt8)code;
-                        }
-
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);/* scalability */
-                        scalability = (M4OSA_UInt8)code;
-                        if (code)
-                        {
-                            /* hierarchy type */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
-                            b_hierarchy_type = (M4OSA_UInt8)code;
-                            /* ref layer id */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 4);
-                            /* ref sampling direct */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
-                            /* hor sampling factor N */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
-                            /* hor sampling factor M */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
-                            /* vert sampling factor N */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
-                            /* vert sampling factor M */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
-                            /* enhancement type */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
-                            enhancement_type = (M4OSA_UInt8)code;
-                            if ((!b_hierarchy_type) && (video_object_layer_shape == 1))
-                            {
-                                /* use ref shape */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
-                                /* use ref texture */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
-                                /* shape hor sampling factor N */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
-                                /* shape hor sampling factor M */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
-                                /* shape vert sampling factor N */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
-                                /* shape vert sampling factor M */
-                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
-                            }
-                        }
-                        break;
-                    }
-
-                    /* ----- 0xB0 : visual_object_sequence_start_code ----- */
-
-                    else if(code == 0xB0)
-                    {
-                        /* profile_and_level_indication */
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
-                        pDci->uiProfile = (M4OSA_UInt8)code;
-                    }
-
-                    /* ----- 0xB5 : visual_object_start_code ----- */
-
-                    else if(code == 0xB5)
-                    {
-                        /* is object layer identifier */
-                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
-                        if (code == 1)
-                        {
-                             /* visual object verid */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 4);
-                            vol_verid = (M4OSA_UInt8)code;
-                             /* visual object layer priority */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 3);
-                        }
-                        else
-                        {
-                             /* Realign on byte */
-                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 7);
-                            vol_verid = 1;
-                        }
-                    }
-
-                    /* ----- end ----- */
-                }
-                else
-                {
-                    if ((code >> 2) == 0x20)
-                    {
-                        /* H263 ...-> wrong*/
-                        break;
-                    }
-                }
-            }
-        }
-    }
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR getAVCProfileAndLevel(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
-                      M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel) {
-
-    M4OSA_UInt16 index = 28; /* the 29th byte is SPS start */
-    M4OSA_Bool constraintSet3;
-
-    if ((pProfile == M4OSA_NULL) || (pLevel == M4OSA_NULL)) {
-        return M4ERR_PARAMETER;
-    }
-
-    if ((DSISize <= index) || (pDSI == M4OSA_NULL)) {
-        ALOGE("getAVCProfileAndLevel: DSI is invalid");
-        *pProfile = M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
-        *pLevel = M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
-        return M4ERR_PARAMETER;
-    }
-
-    constraintSet3 = (pDSI[index+2] & 0x10);
-    ALOGV("getAVCProfileAndLevel profile_byte %d, level_byte: %d constrain3flag: %d",
-          pDSI[index+1], pDSI[index+3], constraintSet3);
-
-    switch (pDSI[index+1]) {
-        case 66:
-            *pProfile = OMX_VIDEO_AVCProfileBaseline;
-            break;
-        case 77:
-            *pProfile = OMX_VIDEO_AVCProfileMain;
-            break;
-        case 88:
-            *pProfile = OMX_VIDEO_AVCProfileExtended;
-            break;
-        case 100:
-            *pProfile = OMX_VIDEO_AVCProfileHigh;
-            break;
-        case 110:
-            *pProfile = OMX_VIDEO_AVCProfileHigh10;
-            break;
-        case 122:
-            *pProfile = OMX_VIDEO_AVCProfileHigh422;
-            break;
-        case 244:
-            *pProfile = OMX_VIDEO_AVCProfileHigh444;
-            break;
-        default:
-            *pProfile = M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
-    }
-
-    switch (pDSI[index+3]) {
-        case 10:
-            *pLevel = OMX_VIDEO_AVCLevel1;
-            break;
-        case 11:
-            if (constraintSet3)
-                *pLevel = OMX_VIDEO_AVCLevel1b;
-            else
-                *pLevel = OMX_VIDEO_AVCLevel11;
-            break;
-        case 12:
-            *pLevel = OMX_VIDEO_AVCLevel12;
-            break;
-        case 13:
-            *pLevel = OMX_VIDEO_AVCLevel13;
-            break;
-        case 20:
-            *pLevel = OMX_VIDEO_AVCLevel2;
-            break;
-        case 21:
-            *pLevel = OMX_VIDEO_AVCLevel21;
-            break;
-        case 22:
-            *pLevel = OMX_VIDEO_AVCLevel22;
-            break;
-        case 30:
-            *pLevel = OMX_VIDEO_AVCLevel3;
-            break;
-        case 31:
-            *pLevel = OMX_VIDEO_AVCLevel31;
-            break;
-        case 32:
-            *pLevel = OMX_VIDEO_AVCLevel32;
-            break;
-        case 40:
-            *pLevel = OMX_VIDEO_AVCLevel4;
-            break;
-        case 41:
-            *pLevel = OMX_VIDEO_AVCLevel41;
-            break;
-        case 42:
-            *pLevel = OMX_VIDEO_AVCLevel42;
-            break;
-        case 50:
-            *pLevel = OMX_VIDEO_AVCLevel5;
-            break;
-        case 51:
-            *pLevel = OMX_VIDEO_AVCLevel51;
-            break;
-        default:
-            *pLevel = M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
-    }
-    ALOGV("getAVCProfileAndLevel profile %" PRId32 " level %" PRId32,
-          *pProfile, *pLevel);
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR getH263ProfileAndLevel(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
-                      M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel) {
-
-    M4OSA_UInt16 index = 7; /* the 5th and 6th bytes contain the level and profile */
-
-    if ((pProfile == M4OSA_NULL) || (pLevel == M4OSA_NULL)) {
-        ALOGE("getH263ProfileAndLevel invalid pointer for pProfile");
-        return M4ERR_PARAMETER;
-    }
-
-    if ((DSISize < index) || (pDSI == M4OSA_NULL)) {
-        ALOGE("getH263ProfileAndLevel: DSI is invalid");
-        *pProfile = M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
-        *pLevel = M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
-        return M4ERR_PARAMETER;
-    }
-    ALOGV("getH263ProfileAndLevel profile_byte %d, level_byte %d",
-          pDSI[6], pDSI[5]);
-    /* get the H263 level */
-    switch (pDSI[5]) {
-        case 10:
-            *pLevel = OMX_VIDEO_H263Level10;
-            break;
-        case 20:
-            *pLevel = OMX_VIDEO_H263Level20;
-            break;
-        case 30:
-            *pLevel = OMX_VIDEO_H263Level30;
-            break;
-        case 40:
-            *pLevel = OMX_VIDEO_H263Level40;
-            break;
-        case 45:
-            *pLevel = OMX_VIDEO_H263Level45;
-            break;
-        case 50:
-            *pLevel = OMX_VIDEO_H263Level50;
-            break;
-        case 60:
-            *pLevel = OMX_VIDEO_H263Level60;
-            break;
-        case 70:
-            *pLevel = OMX_VIDEO_H263Level70;
-            break;
-        default:
-           *pLevel = M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
-    }
-
-    /* get H263 profile */
-    switch (pDSI[6]) {
-        case 0:
-            *pProfile = OMX_VIDEO_H263ProfileBaseline;
-            break;
-        case 1:
-            *pProfile = OMX_VIDEO_H263ProfileH320Coding;
-            break;
-        case 2:
-            *pProfile = OMX_VIDEO_H263ProfileBackwardCompatible;
-            break;
-        case 3:
-            *pProfile = OMX_VIDEO_H263ProfileISWV2;
-            break;
-        case 4:
-            *pProfile = OMX_VIDEO_H263ProfileISWV3;
-            break;
-        case 5:
-            *pProfile = OMX_VIDEO_H263ProfileHighCompression;
-            break;
-        case 6:
-            *pProfile = OMX_VIDEO_H263ProfileInternet;
-            break;
-        case 7:
-            *pProfile = OMX_VIDEO_H263ProfileInterlace;
-            break;
-        case 8:
-            *pProfile = OMX_VIDEO_H263ProfileHighLatency;
-            break;
-        default:
-           *pProfile = M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
-    }
-    ALOGV("getH263ProfileAndLevel profile %" PRId32 " level %" PRId32,
-          *pProfile, *pLevel);
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR getMPEG4ProfileAndLevel(M4OSA_UInt8 profileAndLevel,
-                      M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel) {
-
-    M4OSA_UInt32 i = 0;
-    M4OSA_UInt32 length = 0;
-    if ((pProfile == M4OSA_NULL) || (pLevel == M4OSA_NULL)) {
-        return M4ERR_PARAMETER;
-    }
-    ALOGV("getMPEG4ProfileAndLevel profileAndLevel %d", profileAndLevel);
-    length = sizeof(mpeg4ProfileLevelTable) /sizeof(mpeg4ProfileLevelTable[0]);
-    *pProfile = M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
-    *pLevel = M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
-    for (i = 0; i < length; i++) {
-        if (mpeg4ProfileLevelTable[i].code == profileAndLevel) {
-            *pProfile = mpeg4ProfileLevelTable[i].profile;
-            *pLevel = mpeg4ProfileLevelTable[i].level;
-            break;
-        }
-    }
-    ALOGV("getMPEG4ProfileAndLevel profile %" PRId32 " level %" PRId32,
-          *pProfile, *pLevel);
-    return M4NO_ERROR;
-}
diff --git a/libvideoeditor/vss/src/M4VD_Tools.c b/libvideoeditor/vss/src/M4VD_Tools.c
deleted file mode 100755
index fdb4b41..0000000
--- a/libvideoeditor/vss/src/M4VD_Tools.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Debug.h"
-
-#include "M4VD_Tools.h"
-
-/**
- ************************************************************************
- * @file   M4VD_Tools.c
- * @brief
- * @note   This file implements helper functions for Bitstream parser
- ************************************************************************
- */
-
-M4OSA_UInt32 M4VD_Tools_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
-     M4OSA_UInt32 nb_bits)
-{
-    M4OSA_UInt32    code;
-    M4OSA_UInt32    i;
-    code = 0;
-    for (i = 0; i < nb_bits; i++)
-    {
-        if (parsingCtxt->stream_index == 8)
-        {
-            //M4OSA_memcpy( (M4OSA_MemAddr8)&(parsingCtxt->stream_byte), parsingCtxt->in,
-            //     sizeof(unsigned char));
-            parsingCtxt->stream_byte = (unsigned char)(parsingCtxt->in)[0];
-            parsingCtxt->in++;
-            //fread(&stream_byte, sizeof(unsigned char),1,in);
-            parsingCtxt->stream_index = 0;
-        }
-        code = (code << 1);
-        code |= ((parsingCtxt->stream_byte & 0x80) >> 7);
-
-        parsingCtxt->stream_byte = (parsingCtxt->stream_byte << 1);
-        parsingCtxt->stream_index++;
-    }
-
-    return code;
-}
-
-M4OSA_ERR M4VD_Tools_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
-                                     M4OSA_MemAddr32 dest_bits,
-                                     M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits)
-{
-    M4OSA_UInt8 i,j;
-    M4OSA_UInt32 temp_dest = 0, mask = 0, temp = 1;
-    M4OSA_UInt32 input = bitsToWrite;
-    input = (input << (32 - nb_bits - offset));
-
-    /* Put destination buffer to 0 */
-    for(j=0;j<3;j++)
-    {
-        for(i=0;i<8;i++)
-        {
-            if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
-            {
-                mask |= (temp << ((7*(j+1))-i+j));
-            }
-        }
-    }
-    mask = ~mask;
-    *dest_bits &= mask;
-
-    /* Parse input bits, and fill output buffer */
-    for(j=0;j<3;j++)
-    {
-        for(i=0;i<8;i++)
-        {
-            if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
-            {
-                temp = ((input & (0x80000000 >> offset)) >> (31-offset));
-                //*dest_bits |= (temp << (31 - i));
-                *dest_bits |= (temp << ((7*(j+1))-i+j));
-                input = (input << 1);
-            }
-        }
-    }
-
-    return M4NO_ERROR;
-}
-
-
-
diff --git a/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c b/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c
deleted file mode 100755
index 86bb46b..0000000
--- a/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file     M4VIFI_RGB565toYUV420.c
- * @brief    Contain video library function
- * @note     Color Conversion Filter
- *           -# Contains the format conversion filters from RGB565 to YUV420
- ******************************************************************************
-*/
-
-/* Prototypes of functions, and type definitions */
-#include    "M4VIFI_FiltersAPI.h"
-/* Macro definitions */
-#include    "M4VIFI_Defines.h"
-/* Clip table declaration */
-#include    "M4VIFI_Clip.h"
-
-
-/**
- ******************************************************************************
- * M4VIFI_UInt8 M4VIFI_RGB565toYUV420 (void *pUserData,
- *                                     M4VIFI_ImagePlane *pPlaneIn,
- *                                   M4VIFI_ImagePlane *pPlaneOut)
- * @brief   transform RGB565 image to a YUV420 image.
- * @note    Convert RGB565 to YUV420,
- *          Loop on each row ( 2 rows by 2 rows )
- *              Loop on each column ( 2 col by 2 col )
- *                  Get 4 RGB samples from input data and build 4 output Y samples
- *                  and each single U & V data
- *              end loop on col
- *          end loop on row
- * @param   pUserData: (IN) User Specific Data
- * @param   pPlaneIn: (IN) Pointer to RGB565 Plane
- * @param   pPlaneOut: (OUT) Pointer to  YUV420 buffer Plane
- * @return  M4VIFI_OK: there is no error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
- ******************************************************************************
-*/
-M4VIFI_UInt8    M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
-                                                      M4VIFI_ImagePlane *pPlaneOut)
-{
-    M4VIFI_UInt32   u32_width, u32_height;
-    M4VIFI_UInt32   u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
-    M4VIFI_UInt32   u32_stride_rgb, u32_stride_2rgb;
-    M4VIFI_UInt32   u32_col, u32_row;
-
-    M4VIFI_Int32    i32_r00, i32_r01, i32_r10, i32_r11;
-    M4VIFI_Int32    i32_g00, i32_g01, i32_g10, i32_g11;
-    M4VIFI_Int32    i32_b00, i32_b01, i32_b10, i32_b11;
-    M4VIFI_Int32    i32_y00, i32_y01, i32_y10, i32_y11;
-    M4VIFI_Int32    i32_u00, i32_u01, i32_u10, i32_u11;
-    M4VIFI_Int32    i32_v00, i32_v01, i32_v10, i32_v11;
-    M4VIFI_UInt8    *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
-    M4VIFI_UInt8    *pu8_y_data, *pu8_u_data, *pu8_v_data;
-    M4VIFI_UInt8    *pu8_rgbn_data, *pu8_rgbn;
-    M4VIFI_UInt16   u16_pix1, u16_pix2, u16_pix3, u16_pix4;
-    M4VIFI_UInt8 count_null=0;
-
-    /* Check planes height are appropriate */
-    if( (pPlaneIn->u_height != pPlaneOut[0].u_height)           ||
-        (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1))   ||
-        (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
-    {
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-    }
-
-    /* Check planes width are appropriate */
-    if( (pPlaneIn->u_width != pPlaneOut[0].u_width)         ||
-        (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
-        (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
-    {
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-    }
-
-    /* Set the pointer to the beginning of the output data buffers */
-    pu8_y_data = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
-    pu8_u_data = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
-    pu8_v_data = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
-
-    /* Set the pointer to the beginning of the input data buffers */
-    pu8_rgbn_data   = pPlaneIn->pac_data + pPlaneIn->u_topleft;
-
-    /* Get the size of the output image */
-    u32_width = pPlaneOut[0].u_width;
-    u32_height = pPlaneOut[0].u_height;
-
-    /* Set the size of the memory jumps corresponding to row jump in each output plane */
-    u32_stride_Y = pPlaneOut[0].u_stride;
-    u32_stride2_Y = u32_stride_Y << 1;
-    u32_stride_U = pPlaneOut[1].u_stride;
-    u32_stride_V = pPlaneOut[2].u_stride;
-
-    /* Set the size of the memory jumps corresponding to row jump in input plane */
-    u32_stride_rgb = pPlaneIn->u_stride;
-    u32_stride_2rgb = u32_stride_rgb << 1;
-
-
-    /* Loop on each row of the output image, input coordinates are estimated from output ones */
-    /* Two YUV rows are computed at each pass */
-    for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
-    {
-        /* Current Y plane row pointers */
-        pu8_yn = pu8_y_data;
-        /* Next Y plane row pointers */
-        pu8_ys = pu8_yn + u32_stride_Y;
-        /* Current U plane row pointer */
-        pu8_u = pu8_u_data;
-        /* Current V plane row pointer */
-        pu8_v = pu8_v_data;
-
-        pu8_rgbn = pu8_rgbn_data;
-
-        /* Loop on each column of the output image */
-        for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
-        {
-            /* Get four RGB 565 samples from input data */
-            u16_pix1 = *( (M4VIFI_UInt16 *) pu8_rgbn);
-            u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_rgbn + CST_RGB_16_SIZE));
-            u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb));
-            u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb + CST_RGB_16_SIZE));
-
-            /* Unpack RGB565 to 8bit R, G, B */
-            /* (x,y) */
-            GET_RGB565(i32_b00,i32_g00,i32_r00,u16_pix1);
-            /* (x+1,y) */
-            GET_RGB565(i32_b10,i32_g10,i32_r10,u16_pix2);
-            /* (x,y+1) */
-            GET_RGB565(i32_b01,i32_g01,i32_r01,u16_pix3);
-            /* (x+1,y+1) */
-            GET_RGB565(i32_b11,i32_g11,i32_r11,u16_pix4);
-            /* If RGB is transparent color (0, 63, 0), we transform it to white (31,63,31) */
-            if(i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
-            {
-                i32_b00 = 31;
-                i32_r00 = 31;
-            }
-            if(i32_b10 == 0 && i32_g10 == 63 && i32_r10 == 0)
-            {
-                i32_b10 = 31;
-                i32_r10 = 31;
-            }
-            if(i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
-            {
-                i32_b01 = 31;
-                i32_r01 = 31;
-            }
-            if(i32_b11 == 0 && i32_g11 == 63 && i32_r11 == 0)
-            {
-                i32_b11 = 31;
-                i32_r11 = 31;
-            }
-            /* Convert RGB value to YUV */
-            i32_u00 = U16(i32_r00, i32_g00, i32_b00);
-            i32_v00 = V16(i32_r00, i32_g00, i32_b00);
-            /* luminance value */
-            i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
-
-            i32_u10 = U16(i32_r10, i32_g10, i32_b10);
-            i32_v10 = V16(i32_r10, i32_g10, i32_b10);
-            /* luminance value */
-            i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
-
-            i32_u01 = U16(i32_r01, i32_g01, i32_b01);
-            i32_v01 = V16(i32_r01, i32_g01, i32_b01);
-            /* luminance value */
-            i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
-
-            i32_u11 = U16(i32_r11, i32_g11, i32_b11);
-            i32_v11 = V16(i32_r11, i32_g11, i32_b11);
-            /* luminance value */
-            i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
-
-            /* Store luminance data */
-            pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
-            pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
-            pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
-            pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
-            *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
-            *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
-            /* Prepare for next column */
-            pu8_rgbn += (CST_RGB_16_SIZE<<1);
-            /* Update current Y plane line pointer*/
-            pu8_yn += 2;
-            /* Update next Y plane line pointer*/
-            pu8_ys += 2;
-            /* Update U plane line pointer*/
-            pu8_u ++;
-            /* Update V plane line pointer*/
-            pu8_v ++;
-        } /* End of horizontal scanning */
-
-        /* Prepare pointers for the next row */
-        pu8_y_data += u32_stride2_Y;
-        pu8_u_data += u32_stride_U;
-        pu8_v_data += u32_stride_V;
-        pu8_rgbn_data += u32_stride_2rgb;
-
-
-    } /* End of vertical scanning */
-
-    return M4VIFI_OK;
-}
-/* End of file M4VIFI_RGB565toYUV420.c */
-
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c b/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c
deleted file mode 100755
index bf0bc06..0000000
--- a/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c
+++ /dev/null
@@ -1,4139 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_AudioMixing.c
- * @brief    Video Studio Service 3GPP audio mixing implementation.
- * @note
- ******************************************************************************
- */
-
-/****************/
-/*** Includes ***/
-/****************/
-
-#include "NXPSW_CompilerSwitches.h"
-/**
- * Our headers */
-#include "M4VSS3GPP_API.h"
-#include "M4VSS3GPP_InternalTypes.h"
-#include "M4VSS3GPP_InternalFunctions.h"
-#include "M4VSS3GPP_ErrorCodes.h"
-
-/* Put the definition of silence frames here */
-#define M4VSS3GPP_SILENCE_FRAMES
-#include "M4VSS3GPP_InternalConfig.h"
-
-/**
- * OSAL headers */
-#include "M4OSA_Memory.h" /**< OSAL memory management */
-#include "M4OSA_Debug.h"  /**< OSAL debug management */
-
-
-#include "VideoEditorResampler.h"
-/**
- ******************************************************************************
- * @brief    Static functions
- ******************************************************************************
- */
-static M4OSA_ERR
-M4VSS3GPP_intAudioMixingOpen( M4VSS3GPP_InternalAudioMixingContext *pC,
-                             M4VSS3GPP_AudioMixingSettings *pSettings );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingConvert(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingDoMixing(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingTransition(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder(
-    M4VSS3GPP_InternalAudioMixingContext *pC );
-static M4OSA_Bool M4VSS3GPP_isThresholdBreached( M4OSA_Int32 *averageValue,
-                                                M4OSA_Int32 storeCount,
-                                                M4OSA_Int32 thresholdValue );
-/**
- *    Internal warning */
-#define M4VSS3GPP_WAR_END_OF_ADDED_AUDIO    M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0030)
-
-/* A define used with SSRC 1.04 and above to avoid taking
-blocks smaller that the minimal block size */
-#define M4VSS_SSRC_MINBLOCKSIZE        600
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_audioMixingInit(M4VSS3GPP_AudioMixingContext* pContext,
- *                                     M4VSS3GPP_AudioMixingSettings* pSettings)
- * @brief    Initializes the VSS audio mixing operation (allocates an execution context).
- * @note
- * @param    pContext        (OUT) Pointer on the VSS audio mixing context to allocate
- * @param    pSettings        (IN) Pointer to valid audio mixing settings
- * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
- * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return    M4ERR_ALLOC:        There is no more available memory
- ******************************************************************************
- */
-
-M4OSA_ERR M4VSS3GPP_audioMixingInit( M4VSS3GPP_AudioMixingContext *pContext,
-                                    M4VSS3GPP_AudioMixingSettings *pSettings,
-                                    M4OSA_FileReadPointer *pFileReadPtrFct,
-                                    M4OSA_FileWriterPointer *pFileWritePtrFct )
-{
-    M4VSS3GPP_InternalAudioMixingContext *pC;
-    M4OSA_ERR err;
-
-    M4OSA_TRACE3_2(
-        "M4VSS3GPP_audioMixingInit called with pContext=0x%x, pSettings=0x%x",
-        pContext, pSettings);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4VSS3GPP_audioMixingInit: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings), M4ERR_PARAMETER,
-        "M4VSS3GPP_audioMixingInit: pSettings is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
-        "M4VSS3GPP_audioMixingInit: pFileReadPtrFct is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
-        "M4VSS3GPP_audioMixingInit: pFileWritePtrFct is M4OSA_NULL");
-
-    if( pSettings->uiBeginLoop > pSettings->uiEndLoop )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_audioMixingInit: Begin loop time is higher than end loop time!");
-        return M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP;
-    }
-
-    /**
-    * Allocate the VSS audio mixing context and return it to the user */
-    pC = (M4VSS3GPP_InternalAudioMixingContext
-        *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_InternalAudioMixingContext),
-        M4VSS3GPP,(M4OSA_Char *)"M4VSS3GPP_InternalAudioMixingContext");
-    *pContext = pC;
-
-    if( M4OSA_NULL == pC )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_audioMixingInit(): unable to allocate \
-            M4VSS3GPP_InternalAudioMixingContext,returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-    /* Initialization of context Variables */
-    memset((void *)pC ,0,
-                 sizeof(M4VSS3GPP_InternalAudioMixingContext));
-    /**
-    * Copy this setting in context */
-    pC->iAddCts = pSettings->uiAddCts;
-    pC->bRemoveOriginal = pSettings->bRemoveOriginal;
-    pC->b_DuckingNeedeed = pSettings->b_DuckingNeedeed;
-    pC->InDucking_threshold = pSettings->InDucking_threshold;
-    pC->fBTVolLevel = pSettings->fBTVolLevel;
-    pC->fPTVolLevel = pSettings->fPTVolLevel;
-    pC->InDucking_lowVolume = pSettings->InDucking_lowVolume;
-    pC->bDoDucking = M4OSA_FALSE;
-    pC->bLoop = pSettings->bLoop;
-    pC->bNoLooping = M4OSA_FALSE;
-    pC->bjumpflag = M4OSA_TRUE;
-    /**
-    * Init some context variables */
-
-    pC->pInputClipCtxt = M4OSA_NULL;
-    pC->pAddedClipCtxt = M4OSA_NULL;
-    pC->fOrigFactor = 1.0F;
-    pC->fAddedFactor = 0.0F;
-    pC->bSupportSilence = M4OSA_FALSE;
-    pC->bHasAudio = M4OSA_FALSE;
-    pC->bAudioMixingIsNeeded = M4OSA_FALSE;
-
-    /* Init PC->ewc members */
-    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-    pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
-    pC->ewc.bVideoDataPartitioning = M4OSA_FALSE;
-    pC->ewc.pVideoOutputDsi = M4OSA_NULL;
-    pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
-    pC->ewc.uiNbChannels = 1;
-    pC->ewc.pAudioOutputDsi = M4OSA_NULL;
-    pC->ewc.pAudioEncCtxt = M4OSA_NULL;
-    pC->ewc.pAudioEncDSI.pInfo = M4OSA_NULL;
-    pC->ewc.pSilenceFrameData = M4OSA_NULL;
-    pC->ewc.pEncContext = M4OSA_NULL;
-    pC->ewc.pDummyAuBuffer = M4OSA_NULL;
-    pC->ewc.p3gpWriterContext = M4OSA_NULL;
-    pC->pLVAudioResampler = M4OSA_NULL;
-    /**
-    * Set the OSAL filesystem function set */
-    pC->pOsaFileReadPtr = pFileReadPtrFct;
-    pC->pOsaFileWritPtr = pFileWritePtrFct;
-
-    /**
-    * Ssrc stuff */
-    pC->b_SSRCneeded = M4OSA_FALSE;
-    pC->pSsrcBufferIn = M4OSA_NULL;
-    pC->pSsrcBufferOut = M4OSA_NULL;
-    pC->pTempBuffer = M4OSA_NULL;
-    pC->pPosInTempBuffer = M4OSA_NULL;
-    pC->pPosInSsrcBufferIn = M4OSA_NULL;
-    pC->pPosInSsrcBufferOut = M4OSA_NULL;
-    pC->SsrcScratch = M4OSA_NULL;
-    pC->uiBeginLoop = pSettings->uiBeginLoop;
-    pC->uiEndLoop = pSettings->uiEndLoop;
-
-    /*
-    * Reset pointers for media and codecs interfaces */
-    err = M4VSS3GPP_clearInterfaceTables(&pC->ShellAPI);
-    M4ERR_CHECK_RETURN(err);
-
-    /*  Call the media and codecs subscription module */
-    err = M4VSS3GPP_subscribeMediaAndCodec(&pC->ShellAPI);
-    M4ERR_CHECK_RETURN(err);
-
-    /**
-    * Open input clip, added clip and output clip and proceed with the settings */
-    err = M4VSS3GPP_intAudioMixingOpen(pC, pSettings);
-    M4ERR_CHECK_RETURN(err);
-
-    /**
-    * Update main state automaton */
-    if( M4OSA_NULL != pC->pInputClipCtxt->pVideoStream )
-        pC->State = M4VSS3GPP_kAudioMixingState_VIDEO;
-    else
-        pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT;
-
-    pC->ewc.iOutputDuration = (M4OSA_Int32)pC->pInputClipCtxt->pSettings->
-        ClipProperties.uiClipDuration;
-    /*gInputParams.lvBTChannelCount*/
-    pC->pLVAudioResampler = LVAudioResamplerCreate(16,
-        pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels,
-        /* gInputParams.lvOutSampleRate*/(M4OSA_Int32)pSettings->outputASF, 1);
-     if( M4OSA_NULL == pC->pLVAudioResampler )
-     {
-         return M4ERR_ALLOC;
-     }
-        LVAudiosetSampleRate(pC->pLVAudioResampler,
-        /*gInputParams.lvInSampleRate*/
-        pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency);
-
-    LVAudiosetVolume(pC->pLVAudioResampler,
-                    (M4OSA_Int16)(0x1000 ),
-                    (M4OSA_Int16)(0x1000 ));
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_audioMixingInit(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_audioMixingStep(M4VSS3GPP_AudioMixingContext pContext)
- * @brief    Perform one step of audio mixing.
- * @note
- * @param     pContext          (IN) VSS audio mixing context
- * @return    M4NO_ERROR:       No error
- * @return    M4ERR_PARAMETER:  pContext is M4OSA_NULL (debug only)
- * @param     pProgress         (OUT) Progress percentage (0 to 100) of the finalization operation
- * @return    M4ERR_STATE:      VSS is not in an appropriate state for this function to be called
- * @return    M4VSS3GPP_WAR_END_OF_AUDIO_MIXING: Audio mixing is over, user should now call
- *                                               M4VSS3GPP_audioMixingCleanUp()
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_audioMixingStep( M4VSS3GPP_AudioMixingContext pContext,
-                                    M4OSA_UInt8 *pProgress )
-{
-    M4OSA_ERR err;
-    M4VSS3GPP_InternalAudioMixingContext *pC =
-        (M4VSS3GPP_InternalAudioMixingContext *)pContext;
-
-    M4OSA_TRACE3_1("M4VSS3GPP_audioMixingStep called with pContext=0x%x",
-        pContext);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4VSS3GPP_audioMixingStep: pContext is M4OSA_NULL");
-
-    /**
-    * State automaton */
-    switch( pC->State )
-    {
-        case M4VSS3GPP_kAudioMixingState_VIDEO:
-            err = M4VSS3GPP_intAudioMixingStepVideo(pC);
-
-            /**
-            * Compute the progress percentage
-            * Note: audio and video CTS are not initialized before
-            * the call of M4VSS3GPP_intAudioMixingStepVideo */
-
-            /* P4ME00003276: First 0-50% segment is dedicated to state :
-               M4VSS3GPP_kAudioMixingState_VIDEO */
-            *pProgress = (M4OSA_UInt8)(50 * (pC->ewc.WriterVideoAU.CTS)
-                / pC->pInputClipCtxt->pVideoStream->
-                m_basicProperties.m_duration);
-
-            /**
-            * There may be no audio track (Remove audio track feature).
-            * In that case we double the current percentage */
-            if( M4SYS_kAudioUnknown == pC->ewc.WriterAudioStream.streamType )
-            {
-                ( *pProgress) <<= 1; /**< x2 */
-            }
-            else if( *pProgress >= 50 )
-            {
-                *pProgress =
-                    49; /**< Video processing is not greater than 50% */
-            }
-
-            if( M4WAR_NO_MORE_AU == err )
-            {
-                if( pC->bHasAudio )
-                {
-                    /**
-                    * Video is over, state transition to audio and return OK */
-                    if( pC->iAddCts > 0 )
-                        pC->State =
-                        M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT;
-                    else
-                        pC->State =
-                        M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
-                }
-                else
-                {
-                    /**
-                    * No audio, state transition to FINISHED */
-                    pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
-                }
-
-                return M4NO_ERROR;
-            }
-            else if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_audioMixingStep: M4VSS3GPP_intAudioMixingStepVideo returns 0x%x!",
-                    err);
-                return err;
-            }
-            else
-            {
-                return M4NO_ERROR;
-            }
-            break;
-
-        case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
-        case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
-        case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
-            if( pC->pAddedClipCtxt->iAudioFrameCts
-                != -pC->pAddedClipCtxt->iSilenceFrameDuration
-                && (pC->pAddedClipCtxt->iAudioFrameCts - 0.5)
-                / pC->pAddedClipCtxt->scale_audio > pC->uiEndLoop
-                && pC->uiEndLoop > 0 )
-            {
-            if(pC->bLoop == M4OSA_FALSE)
-            {
-                pC->bNoLooping = M4OSA_TRUE;
-            }
-            else
-            {
-                M4OSA_Int32 jumpCTS = (M4OSA_Int32)(pC->uiBeginLoop);
-
-                err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
-                    pC->pAddedClipCtxt->pReaderContext,
-                    (M4_StreamHandler *)pC->pAddedClipCtxt->
-                    pAudioStream, &jumpCTS);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_audioMixingStep: error when jumping in added audio clip: 0x%x",
-                        err);
-                    return err;
-                }
-                /**
-                * Use offset to give a correct CTS ... */
-                pC->pAddedClipCtxt->iAoffset =
-                    (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
-            }
-
-            }
-
-            if( M4OSA_FALSE == pC->bRemoveOriginal )
-            {
-                err = M4VSS3GPP_intAudioMixingStepAudioMix(pC);
-            }
-            else
-            {
-                err = M4VSS3GPP_intAudioMixingStepAudioReplace(pC);
-            }
-
-            /**
-            * Compute the progress percentage
-            * Note: audio and video CTS are not initialized before
-            * the call of M4VSS3GPP_intAudioMixingStepAudio */
-            if( 0 != pC->ewc.iOutputDuration )
-            {
-                /* P4ME00003276: Second 50-100% segment is dedicated to states :
-                M4VSS3GPP_kAudioMixingState_AUDIO... */
-                /* For Audio the progress computation is based on dAto and offset,
-                   it is more accurate */
-                *pProgress = (M4OSA_UInt8)(50
-                    + (50 * pC->ewc.dATo - pC->pInputClipCtxt->iVoffset)
-                    / (pC->ewc.iOutputDuration)); /**< 50 for 100/2 **/
-
-                if( *pProgress >= 100 )
-                {
-                    *pProgress =
-                        99; /**< It's not really finished, I prefer to return less than 100% */
-                }
-            }
-            else
-            {
-                *pProgress = 99;
-            }
-
-            if( M4WAR_NO_MORE_AU == err )
-            {
-                /**
-                * Audio is over, state transition to FINISHED */
-                pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
-                return M4NO_ERROR;
-            }
-            else if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_audioMixingStep: M4VSS3GPP_intAudioMixingStepAudio returns 0x%x!",
-                    err);
-                return err;
-            }
-            else
-            {
-                return M4NO_ERROR;
-            }
-            break;
-
-        case M4VSS3GPP_kAudioMixingState_FINISHED:
-
-            /**
-            * Progress percentage: finalize finished -> 100% */
-            *pProgress = 100;
-
-            /**
-            * Audio mixing is finished, return correct warning */
-            return M4VSS3GPP_WAR_END_OF_AUDIO_MIXING;
-
-        default:
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_audioMixingStep: State error (0x%x)! Returning M4ERR_STATE",
-                pC->State);
-            return M4ERR_STATE;
-    }
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_audioMixingCleanUp(M4VSS3GPP_AudioMixingContext pContext)
- * @brief    Free all resources used by the VSS audio mixing operation.
- * @note    The context is no more valid after this call
- * @param    pContext            (IN) VSS audio mixing context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_audioMixingCleanUp( M4VSS3GPP_AudioMixingContext pContext )
-{
-    M4VSS3GPP_InternalAudioMixingContext *pC =
-        (M4VSS3GPP_InternalAudioMixingContext *)pContext;
-    M4OSA_ERR err;
-    M4OSA_UInt32 lastCTS;
-
-    M4OSA_TRACE3_1("M4VSS3GPP_audioMixingCleanUp called with pContext=0x%x",
-        pContext);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4VSS3GPP_audioMixingCleanUp: pContext is M4OSA_NULL");
-
-    /**
-    * Check input parameter */
-    if( M4OSA_NULL == pContext )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_audioMixingCleanUp(): M4VSS3GPP_audioMixingCleanUp: pContext is\
-             M4OSA_NULL, returning M4ERR_PARAMETER");
-        return M4ERR_PARAMETER;
-    }
-
-    /**
-    * Close Input 3GPP file */
-    if( M4OSA_NULL != pC->pInputClipCtxt )
-    {
-        M4VSS3GPP_intClipCleanUp(pC->pInputClipCtxt);
-        pC->pInputClipCtxt = M4OSA_NULL;
-    }
-
-    /**
-    * Close Added 3GPP file */
-    if( M4OSA_NULL != pC->pAddedClipCtxt )
-    {
-        M4VSS3GPP_intClipCleanUp(pC->pAddedClipCtxt);
-        pC->pAddedClipCtxt = M4OSA_NULL;
-    }
-
-    /**
-    * Close the 3GP writer. In normal use case it has already been closed,
-      but not in abort use case */
-    if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
-    {
-        /* Update last Video CTS */
-        lastCTS = pC->ewc.iOutputDuration;
-
-        err = pC->ShellAPI.pWriterGlobalFcts->pFctSetOption(
-            pC->ewc.p3gpWriterContext,
-            (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_audioMixingCleanUp: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
-                err);
-        }
-
-        err = pC->ShellAPI.pWriterGlobalFcts->pFctCloseWrite(
-            pC->ewc.p3gpWriterContext);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_audioMixingCleanUp: pWriterGlobalFcts->pFctCloseWrite returns 0x%x!",
-                err);
-            /**< don't return the error because we have other things to free! */
-        }
-        pC->ewc.p3gpWriterContext = M4OSA_NULL;
-    }
-
-    /**
-    * Free the Audio encoder context */
-    if( M4OSA_NULL != pC->ewc.pAudioEncCtxt )
-    {
-        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctClose(
-            pC->ewc.pAudioEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_audioMixingCleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctCleanUp(
-            pC->ewc.pAudioEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_audioMixingCleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        pC->ewc.pAudioEncCtxt = M4OSA_NULL;
-    }
-
-    /**
-    * Free the ssrc stuff */
-
-    if( M4OSA_NULL != pC->SsrcScratch )
-    {
-        free(pC->SsrcScratch);
-        pC->SsrcScratch = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->pSsrcBufferIn )
-    {
-        free(pC->pSsrcBufferIn);
-        pC->pSsrcBufferIn = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->pSsrcBufferOut
-        && (M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0) )
-    {
-        free(pC->pSsrcBufferOut);
-        pC->pSsrcBufferOut = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->pTempBuffer )
-    {
-        free(pC->pTempBuffer);
-        pC->pTempBuffer = M4OSA_NULL;
-    }
-
-    if (pC->pLVAudioResampler != M4OSA_NULL)
-    {
-        LVDestroy(pC->pLVAudioResampler);
-        pC->pLVAudioResampler = M4OSA_NULL;
-    }
-
-    /**
-    * Free the shells interfaces */
-    M4VSS3GPP_unRegisterAllWriters(&pC->ShellAPI);
-    M4VSS3GPP_unRegisterAllEncoders(&pC->ShellAPI);
-    M4VSS3GPP_unRegisterAllReaders(&pC->ShellAPI);
-    M4VSS3GPP_unRegisterAllDecoders(&pC->ShellAPI);
-
-    /**
-    * Free the context */
-    free(pContext);
-    pContext = M4OSA_NULL;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_audioMixingCleanUp(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/******************************************************************************/
-/******************************************************************************/
-/*********                  STATIC FUNCTIONS                         **********/
-/******************************************************************************/
-/******************************************************************************/
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingOpen()
- * @brief    Initializes the VSS audio mixing operation (allocates an execution context).
- * @note
- * @param    pContext        (OUT) Pointer on the VSS audio mixing context to allocate
- * @param    pSettings        (IN) Pointer to valid audio mixing settings
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return    M4ERR_ALLOC:        There is no more available memory
- ******************************************************************************
- */
-static M4OSA_ERR
-M4VSS3GPP_intAudioMixingOpen( M4VSS3GPP_InternalAudioMixingContext *pC,
-                             M4VSS3GPP_AudioMixingSettings *pSettings )
-{
-    M4OSA_ERR err;
-    M4OSA_UInt32 outputASF = 0;
-    M4ENCODER_Header *encHeader;
-
-    M4OSA_TRACE3_2(
-        "M4VSS3GPP_intAudioMixingOpen called with pContext=0x%x, pSettings=0x%x",
-        pC, pSettings);
-
-    /**
-    * The Add Volume must be (strictly) superior than zero */
-    if( pSettings->uiAddVolume == 0 )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intAudioMixingOpen(): AddVolume is zero,\
-            returning M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO");
-        return M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO;
-    }
-    /*
-    else if(pSettings->uiAddVolume >= 100) // If volume is set to 100, no more original audio ...
-    {
-    pC->bRemoveOriginal = M4OSA_TRUE;
-    }
-    */
-    /**
-    * Build the input clip settings */
-    pC->InputClipSettings.pFile =
-        pSettings->pOriginalClipFile; /**< Input 3GPP file descriptor */
-    pC->InputClipSettings.FileType = M4VIDEOEDITING_kFileType_3GPP;
-    pC->InputClipSettings.uiBeginCutTime =
-        0; /**< No notion of cut for the audio mixing feature */
-    pC->InputClipSettings.uiEndCutTime =
-        0; /**< No notion of cut for the audio mixing feature */
-
-    /**
-    * Open the original Audio/Video 3GPP clip */
-    err = M4VSS3GPP_intClipInit(&pC->pInputClipCtxt, pC->pOsaFileReadPtr);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipInit(orig) returns 0x%x",
-            err);
-        return err;
-    }
-
-    err = M4VSS3GPP_intClipOpen(pC->pInputClipCtxt, &pC->InputClipSettings,
-        M4OSA_FALSE, M4OSA_FALSE, M4OSA_TRUE);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipOpen(orig) returns 0x%x",
-            err);
-        return err;
-    }
-
-    if( M4OSA_NULL == pC->pInputClipCtxt->pAudioStream )
-        {
-        pC->bRemoveOriginal = M4OSA_TRUE;
-        }
-    /**
-    * If there is no video, it's an error */
-    if( M4OSA_NULL == pC->pInputClipCtxt->pVideoStream )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intAudioMixingOpen(): no video stream in clip,\
-            returning M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
-        return M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE;
-    }
-
-    /**
-    * Compute clip properties */
-    err = M4VSS3GPP_intBuildAnalysis(pC->pInputClipCtxt,
-        &pC->pInputClipCtxt->pSettings->ClipProperties);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intBuildAnalysis(orig) returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Build the added clip settings */
-    pC->AddedClipSettings.pFile =
-        pSettings->pAddedAudioTrackFile; /**< Added file descriptor */
-    pC->AddedClipSettings.FileType = pSettings->AddedAudioFileType;
-    pC->AddedClipSettings.uiBeginCutTime =
-        0; /**< No notion of cut for the audio mixing feature */
-    pC->AddedClipSettings.uiEndCutTime   = 0;/**< No notion of cut for the audio mixing feature */
-    pC->AddedClipSettings.ClipProperties.uiNbChannels=
-        pSettings->uiNumChannels;
-    pC->AddedClipSettings.ClipProperties.uiSamplingFrequency=    pSettings->uiSamplingFrequency;
-
-    if( M4OSA_NULL != pC->AddedClipSettings.pFile )
-    {
-        /**
-        * Open the added Audio clip */
-        err = M4VSS3GPP_intClipInit(&pC->pAddedClipCtxt, pC->pOsaFileReadPtr);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipInit(added) returns 0x%x",
-                err);
-            return err;
-        }
-
-        err = M4VSS3GPP_intClipOpen(pC->pAddedClipCtxt, &pC->AddedClipSettings,
-            M4OSA_FALSE, M4OSA_FALSE, M4OSA_TRUE);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipOpen(added) returns 0x%x",
-                err);
-            return err;
-        }
-
-        /**
-        * If there is no audio, it's an error */
-        if( M4OSA_NULL == pC->pAddedClipCtxt->pAudioStream )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingOpen(): no audio nor video stream in clip,\
-                returning M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
-            return M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE;
-        }
-
-        /**
-        * Compute clip properties */
-        err = M4VSS3GPP_intBuildAnalysis(pC->pAddedClipCtxt,
-            &pC->pAddedClipCtxt->pSettings->ClipProperties);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intBuildAnalysis(added) returns 0x%x",
-                err);
-            return err;
-        }
-
-        switch( pSettings->outputASF )
-        {
-            case M4VIDEOEDITING_k8000_ASF:
-                outputASF = 8000;
-                break;
-
-            case M4VIDEOEDITING_k16000_ASF:
-                outputASF = 16000;
-                break;
-
-            case M4VIDEOEDITING_k22050_ASF:
-                outputASF = 22050;
-                break;
-
-            case M4VIDEOEDITING_k24000_ASF:
-                outputASF = 24000;
-                break;
-
-            case M4VIDEOEDITING_k32000_ASF:
-                outputASF = 32000;
-                break;
-
-            case M4VIDEOEDITING_k44100_ASF:
-                outputASF = 44100;
-                break;
-
-            case M4VIDEOEDITING_k48000_ASF:
-                outputASF = 48000;
-                break;
-
-            default:
-                M4OSA_TRACE1_0("Bad parameter in output ASF ");
-                return M4ERR_PARAMETER;
-                break;
-        }
-
-        if( pC->bRemoveOriginal == M4OSA_TRUE
-            && (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
-            == M4VIDEOEDITING_kMP3 || pC->pAddedClipCtxt->pSettings->
-            ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM
-            || pC->pAddedClipCtxt->pSettings->
-            ClipProperties.AudioStreamType
-            != pSettings->outputAudioFormat
-            || pC->pAddedClipCtxt->pSettings->
-            ClipProperties.uiSamplingFrequency != outputASF
-            || pC->pAddedClipCtxt->pSettings->
-            ClipProperties.uiNbChannels
-            != pSettings->outputNBChannels) )
-        {
-
-            if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
-            {
-                pSettings->outputASF = M4VIDEOEDITING_k8000_ASF;
-                pSettings->outputNBChannels = 1;
-                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize = 320;
-            }
-            else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
-            {
-                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize =
-                    2048 * pSettings->outputNBChannels;
-            }
-
-            pC->pInputClipCtxt->pSettings->ClipProperties.uiSamplingFrequency =
-                outputASF;
-
-            if( outputASF != pC->pAddedClipCtxt->pSettings->
-                ClipProperties.uiSamplingFrequency )
-            {
-                /* We need to call SSRC in order to align ASF and/or nb of channels */
-                /* Moreover, audio encoder may be needed in case of audio replacing... */
-                pC->b_SSRCneeded = M4OSA_TRUE;
-            }
-
-            if( pSettings->outputNBChannels
-                < pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
-            {
-                /* Stereo to Mono */
-                pC->ChannelConversion = 1;
-            }
-            else if( pSettings->outputNBChannels
-                > pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
-            {
-                /* Mono to Stereo */
-                pC->ChannelConversion = 2;
-            }
-
-            pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels =
-                pSettings->outputNBChannels;
-        }
-
-        /**
-        * Check compatibility chart */
-        err = M4VSS3GPP_intAudioMixingCompatibility(pC,
-            &pC->pInputClipCtxt->pSettings->ClipProperties,
-            &pC->pAddedClipCtxt->pSettings->ClipProperties);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingOpen():\
-                M4VSS3GPP_intAudioMixingCompatibility returns 0x%x",
-                err);
-            return err;
-        }
-
-        /**
-        * Check loop parameters */
-        if( pC->uiBeginLoop > pC->pAddedClipCtxt->pSettings->
-            ClipProperties.uiClipAudioDuration )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingOpen():\
-                begin loop time is higher than added clip audio duration");
-            return M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP;
-        }
-
-        /**
-        * Ok, let's go with this audio track */
-        pC->bHasAudio = M4OSA_TRUE;
-    }
-    else
-    {
-        /* No added file, force remove original */
-        pC->AddedClipSettings.FileType = M4VIDEOEDITING_kFileType_Unsupported;
-        pC->bRemoveOriginal = M4OSA_TRUE;
-        pC->bHasAudio = M4OSA_FALSE;
-    }
-
-    /**
-    * Copy the video properties of the input clip to the output properties */
-    pC->ewc.uiVideoBitrate =
-        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoBitrate;
-    pC->ewc.uiVideoWidth =
-        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoWidth;
-    pC->ewc.uiVideoHeight =
-        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoHeight;
-    pC->ewc.uiVideoTimeScale =
-        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoTimeScale;
-    pC->ewc.bVideoDataPartitioning =
-        pC->pInputClipCtxt->pSettings->ClipProperties.bMPEG4dataPartition;
-    pC->ewc.outputVideoProfile =
-        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoProfile;
-    pC->ewc.outputVideoLevel =
-        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoLevel;
-    switch( pC->pInputClipCtxt->pSettings->ClipProperties.VideoStreamType )
-    {
-        case M4VIDEOEDITING_kH263:
-            pC->ewc.VideoStreamType = M4SYS_kH263;
-            break;
-
-        case M4VIDEOEDITING_kMPEG4:
-            pC->ewc.VideoStreamType = M4SYS_kMPEG_4;
-            break;
-
-        case M4VIDEOEDITING_kH264:
-            pC->ewc.VideoStreamType = M4SYS_kH264;
-            break;
-
-        default:
-            pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
-            break;
-    }
-
-    /* Add a link to video dsi */
-    if( M4SYS_kH264 == pC->ewc.VideoStreamType )
-    {
-
-        /* For H.264 encoder case
-        * Fetch the DSI from the shell video encoder, and feed it to the writer */
-
-        M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingOpen: get DSI for H264 stream");
-
-        if( M4OSA_NULL == pC->ewc.pEncContext )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingOpen: pC->ewc.pEncContext is NULL");
-            err = M4VSS3GPP_intAudioMixingCreateVideoEncoder(pC);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intAudioMixingOpen:\
-                    M4VSS3GPP_intAudioMixingCreateVideoEncoder returned error 0x%x",
-                    err);
-            }
-        }
-
-        if( M4OSA_NULL != pC->ewc.pEncContext )
-        {
-            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctGetOption(
-                pC->ewc.pEncContext, M4ENCODER_kOptionID_EncoderHeader,
-                (M4OSA_DataOption) &encHeader);
-
-            if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intAudioMixingOpen: failed to get the encoder header (err 0x%x)",
-                    err);
-                M4OSA_TRACE1_2(
-                    "M4VSS3GPP_intAudioMixingOpen: encHeader->pBuf=0x%x, size=0x%x",
-                    encHeader->pBuf, encHeader->Size);
-            }
-            else
-            {
-                M4OSA_TRACE1_0(
-                    "M4VSS3GPP_intAudioMixingOpen: send DSI for H264 stream to 3GP writer");
-
-                /**
-                * Allocate and copy the new DSI */
-                pC->ewc.pVideoOutputDsi =
-                    (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(encHeader->Size, M4VSS3GPP,
-                    (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H264)");
-
-                if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
-                {
-                    M4OSA_TRACE1_0(
-                        "M4VSS3GPP_intAudioMixingOpen():\
-                        unable to allocate pVideoOutputDsi (H264), returning M4ERR_ALLOC");
-                    return M4ERR_ALLOC;
-                }
-                pC->ewc.uiVideoOutputDsiSize = (M4OSA_UInt16)encHeader->Size;
-                memcpy((void *)pC->ewc.pVideoOutputDsi, (void *)encHeader->pBuf,
-                    encHeader->Size);
-            }
-
-            err = M4VSS3GPP_intAudioMixingDestroyVideoEncoder(pC);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intAudioMixingOpen:\
-                    M4VSS3GPP_intAudioMixingDestroyVideoEncoder returned error 0x%x",
-                    err);
-            }
-        }
-        else
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingOpen: pC->ewc.pEncContext is NULL, cannot get the DSI");
-        }
-    }
-    else
-    {
-        M4OSA_TRACE3_1(
-            "M4VSS3GPP_intAudioMixingOpen: input clip video stream type = 0x%x",
-            pC->ewc.VideoStreamType);
-        pC->ewc.uiVideoOutputDsiSize =
-            (M4OSA_UInt16)pC->pInputClipCtxt->pVideoStream->
-            m_basicProperties.m_decoderSpecificInfoSize;
-        pC->ewc.pVideoOutputDsi = (M4OSA_MemAddr8)pC->pInputClipCtxt->pVideoStream->
-            m_basicProperties.m_pDecoderSpecificInfo;
-    }
-
-    /**
-    * Copy the audio properties of the added clip to the output properties */
-    if( pC->bHasAudio )
-    {
-        if( pC->bRemoveOriginal == M4OSA_TRUE )
-        {
-            pC->ewc.uiNbChannels =
-                pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels;
-            pC->ewc.uiAudioBitrate =
-                pC->pAddedClipCtxt->pSettings->ClipProperties.uiAudioBitrate;
-            pC->ewc.uiSamplingFrequency = pC->pAddedClipCtxt->pSettings->
-                ClipProperties.uiSamplingFrequency;
-            pC->ewc.uiSilencePcmSize =
-                pC->pAddedClipCtxt->pSettings->ClipProperties.uiDecodedPcmSize;
-            pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
-
-            /* if output settings are differents from added clip settings,
-            we need to reencode BGM */
-            if( pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
-                != pSettings->outputAudioFormat
-                || pC->pAddedClipCtxt->pSettings->
-                ClipProperties.uiSamplingFrequency != outputASF
-                || pC->pAddedClipCtxt->pSettings->
-                ClipProperties.uiNbChannels
-                != pSettings->outputNBChannels
-                || pC->pAddedClipCtxt->pSettings->
-                ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 )
-            {
-                /* Set reader DSI to NULL (unknown), we will use encoder DSI later */
-                if( pC->pAddedClipCtxt->pAudioStream->
-                    m_basicProperties.m_pDecoderSpecificInfo != M4OSA_NULL )
-                {
-
-                    /*
-                     free(pC->pAddedClipCtxt->pAudioStream->\
-                       m_basicProperties.m_pDecoderSpecificInfo);
-                       */
-                    pC->pAddedClipCtxt->pAudioStream->
-                        m_basicProperties.m_decoderSpecificInfoSize = 0;
-                    pC->pAddedClipCtxt->pAudioStream->
-                        m_basicProperties.m_pDecoderSpecificInfo = M4OSA_NULL;
-                }
-
-                pC->ewc.uiNbChannels =
-                    pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
-                pC->ewc.uiSamplingFrequency = pC->pInputClipCtxt->pSettings->
-                    ClipProperties.uiSamplingFrequency;
-                pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
-
-                if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
-                {
-                    pC->ewc.AudioStreamType = M4SYS_kAMR;
-                    pC->ewc.pSilenceFrameData =
-                        (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
-                    pC->ewc.uiSilenceFrameSize =
-                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
-                    pC->ewc.iSilenceFrameDuration =
-                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
-                    pC->ewc.uiAudioBitrate = 12200;
-                    pC->ewc.uiSamplingFrequency = 8000;
-                    pC->ewc.uiSilencePcmSize = 320;
-                    pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
-                }
-                else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
-                {
-                    pC->ewc.AudioStreamType = M4SYS_kAAC;
-
-                    if( pSettings->outputAudioBitrate
-                        == M4VIDEOEDITING_kUndefinedBitrate )
-                    {
-                        switch( pC->ewc.uiSamplingFrequency )
-                        {
-                            case 16000:
-                                pC->ewc.uiAudioBitrate =
-                                    M4VIDEOEDITING_k24_KBPS;
-                                break;
-
-                            case 22050:
-                            case 24000:
-                                pC->ewc.uiAudioBitrate =
-                                    M4VIDEOEDITING_k32_KBPS;
-                                break;
-
-                            case 32000:
-                                pC->ewc.uiAudioBitrate =
-                                    M4VIDEOEDITING_k48_KBPS;
-                                break;
-
-                            case 44100:
-                            case 48000:
-                                pC->ewc.uiAudioBitrate =
-                                    M4VIDEOEDITING_k64_KBPS;
-                                break;
-
-                            default:
-                                pC->ewc.uiAudioBitrate =
-                                    M4VIDEOEDITING_k64_KBPS;
-                                break;
-                        }
-
-                        if( pC->ewc.uiNbChannels == 2 )
-                        {
-                            /* Output bitrate have to be doubled */
-                            pC->ewc.uiAudioBitrate += pC->ewc.uiAudioBitrate;
-                        }
-                    }
-                    else
-                    {
-                        pC->ewc.uiAudioBitrate = pSettings->outputAudioBitrate;
-                    }
-
-                    if( pC->ewc.uiNbChannels == 1 )
-                    {
-                        pC->ewc.pSilenceFrameData =
-                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
-                        pC->ewc.uiSilenceFrameSize =
-                            M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
-                    }
-                    else
-                    {
-                        pC->ewc.pSilenceFrameData =
-                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
-                        pC->ewc.uiSilenceFrameSize =
-                            M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
-                    }
-                    pC->ewc.iSilenceFrameDuration =
-                        1024; /* AAC is always 1024/Freq sample duration */
-                }
-            }
-            else
-            {
-                switch( pC->pAddedClipCtxt->pSettings->
-                    ClipProperties.AudioStreamType )
-                {
-                    case M4VIDEOEDITING_kAMR_NB:
-                        pC->ewc.AudioStreamType = M4SYS_kAMR;
-                        pC->ewc.pSilenceFrameData =
-                            (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
-                        pC->ewc.uiSilenceFrameSize =
-                            M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
-                        pC->ewc.iSilenceFrameDuration =
-                            M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
-                        break;
-
-                    case M4VIDEOEDITING_kAAC:
-                    case M4VIDEOEDITING_kAACplus:
-                    case M4VIDEOEDITING_keAACplus:
-                        pC->ewc.AudioStreamType = M4SYS_kAAC;
-
-                        if( pC->ewc.uiNbChannels == 1 )
-                        {
-                            pC->ewc.pSilenceFrameData =
-                                (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
-                            pC->ewc.uiSilenceFrameSize =
-                                M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
-                        }
-                        else
-                        {
-                            pC->ewc.pSilenceFrameData =
-                                (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
-                            pC->ewc.uiSilenceFrameSize =
-                                M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
-                        }
-                        pC->ewc.iSilenceFrameDuration =
-                            1024; /* AAC is always 1024/Freq sample duration */
-                        break;
-
-                    case M4VIDEOEDITING_kEVRC:
-                        pC->ewc.AudioStreamType = M4SYS_kEVRC;
-                        pC->ewc.pSilenceFrameData = M4OSA_NULL;
-                        pC->ewc.uiSilenceFrameSize = 0;
-                        pC->ewc.iSilenceFrameDuration = 160; /* EVRC frames are 20 ms at 8000 Hz
-                                            (makes it easier to factorize amr and evrc code) */
-                        break;
-
-                    case M4VIDEOEDITING_kPCM:
-                        /* Set reader DSI to NULL (unknown), we will use encoder DSI later */
-                        pC->pAddedClipCtxt->pAudioStream->
-                            m_basicProperties.m_decoderSpecificInfoSize = 0;
-                        pC->pAddedClipCtxt->pAudioStream->
-                            m_basicProperties.m_pDecoderSpecificInfo =
-                            M4OSA_NULL;
-
-                        if( pC->pAddedClipCtxt->pSettings->
-                            ClipProperties.uiSamplingFrequency == 8000 )
-                        {
-                            pC->ewc.AudioStreamType = M4SYS_kAMR;
-                            pC->ewc.pSilenceFrameData = (M4OSA_UInt8
-                                *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
-                            pC->ewc.uiSilenceFrameSize =
-                                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
-                            pC->ewc.iSilenceFrameDuration =
-                                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
-                            pC->ewc.uiAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
-                        }
-                        else if( pC->pAddedClipCtxt->pSettings->
-                            ClipProperties.uiSamplingFrequency == 16000 )
-                        {
-                            if( pC->ewc.uiNbChannels == 1 )
-                            {
-                                pC->ewc.AudioStreamType = M4SYS_kAAC;
-                                pC->ewc.pSilenceFrameData = (M4OSA_UInt8
-                                    *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
-                                pC->ewc.uiSilenceFrameSize =
-                                    M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
-                                pC->ewc.iSilenceFrameDuration =
-                                    1024; /* AAC is always 1024/Freq sample duration */
-                                pC->ewc.uiAudioBitrate =
-                                    M4VIDEOEDITING_k32_KBPS;
-                            }
-                            else
-                            {
-                                pC->ewc.AudioStreamType = M4SYS_kAAC;
-                                pC->ewc.pSilenceFrameData = (M4OSA_UInt8
-                                    *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
-                                pC->ewc.uiSilenceFrameSize =
-                                    M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
-                                pC->ewc.iSilenceFrameDuration =
-                                    1024; /* AAC is always 1024/Freq sample duration */
-                                pC->ewc.uiAudioBitrate =
-                                    M4VIDEOEDITING_k64_KBPS;
-                            }
-                        }
-                        else
-                        {
-                            pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
-                        }
-                        break;
-
-                    default:
-                        pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
-                        break;
-                }
-            }
-
-            /* Add a link to audio dsi */
-            pC->ewc.uiAudioOutputDsiSize =
-                (M4OSA_UInt16)pC->pAddedClipCtxt->pAudioStream->
-                m_basicProperties.m_decoderSpecificInfoSize;
-            pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)pC->pAddedClipCtxt->pAudioStream->
-                m_basicProperties.m_pDecoderSpecificInfo;
-        }
-        else
-        {
-            pC->ewc.uiNbChannels =
-                pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
-            pC->ewc.uiAudioBitrate =
-                pC->pInputClipCtxt->pSettings->ClipProperties.uiAudioBitrate;
-            pC->ewc.uiSamplingFrequency = pC->pInputClipCtxt->pSettings->
-                ClipProperties.uiSamplingFrequency;
-            pC->ewc.uiSilencePcmSize =
-                pC->pInputClipCtxt->pSettings->ClipProperties.uiDecodedPcmSize;
-            pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
-
-            switch( pC->pInputClipCtxt->pSettings->
-                ClipProperties.AudioStreamType )
-            {
-                case M4VIDEOEDITING_kAMR_NB:
-                    pC->ewc.AudioStreamType = M4SYS_kAMR;
-                    pC->ewc.pSilenceFrameData =
-                        (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
-                    pC->ewc.uiSilenceFrameSize =
-                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
-                    pC->ewc.iSilenceFrameDuration =
-                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
-                    break;
-
-                case M4VIDEOEDITING_kAAC:
-                case M4VIDEOEDITING_kAACplus:
-                case M4VIDEOEDITING_keAACplus:
-                    pC->ewc.AudioStreamType = M4SYS_kAAC;
-
-                    if( pC->ewc.uiNbChannels == 1 )
-                    {
-                        pC->ewc.pSilenceFrameData =
-                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
-                        pC->ewc.uiSilenceFrameSize =
-                            M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
-                    }
-                    else
-                    {
-                        pC->ewc.pSilenceFrameData =
-                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
-                        pC->ewc.uiSilenceFrameSize =
-                            M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
-                    }
-                    pC->ewc.iSilenceFrameDuration =
-                        1024; /* AAC is always 1024/Freq sample duration */
-                    break;
-
-                default:
-                    pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
-                    M4OSA_TRACE1_0(
-                        "M4VSS3GPP_intAudioMixingOpen: No audio track in input file.");
-                    return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
-                    break;
-            }
-
-            /* Add a link to audio dsi */
-            pC->ewc.uiAudioOutputDsiSize =
-                (M4OSA_UInt16)pC->pInputClipCtxt->pAudioStream->
-                m_basicProperties.m_decoderSpecificInfoSize;
-            pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)pC->pInputClipCtxt->pAudioStream->
-                m_basicProperties.m_pDecoderSpecificInfo;
-        }
-    }
-
-    /**
-    * Copy common 'silence frame stuff' to ClipContext */
-    pC->pInputClipCtxt->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
-    pC->pInputClipCtxt->pSilenceFrameData = pC->ewc.pSilenceFrameData;
-    pC->pInputClipCtxt->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
-    pC->pInputClipCtxt->iSilenceFrameDuration = pC->ewc.iSilenceFrameDuration;
-    pC->pInputClipCtxt->scale_audio = pC->ewc.scale_audio;
-
-    pC->pInputClipCtxt->iAudioFrameCts =
-        -pC->pInputClipCtxt->iSilenceFrameDuration; /* Reset time */
-
-    /**
-    * Copy common 'silence frame stuff' to ClipContext */
-    if( pC->bHasAudio )
-    {
-        pC->pAddedClipCtxt->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
-        pC->pAddedClipCtxt->pSilenceFrameData = pC->ewc.pSilenceFrameData;
-        pC->pAddedClipCtxt->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
-        pC->pAddedClipCtxt->iSilenceFrameDuration =
-            pC->ewc.iSilenceFrameDuration;
-        pC->pAddedClipCtxt->scale_audio = pC->ewc.scale_audio;
-
-        pC->pAddedClipCtxt->iAudioFrameCts =
-            -pC->pAddedClipCtxt->iSilenceFrameDuration; /* Reset time */
-    }
-
-    /**
-    * Check AddCts is lower than original clip duration */
-    if( ( M4OSA_NULL != pC->pInputClipCtxt->pVideoStream)
-        && (pC->iAddCts > (M4OSA_Int32)pC->pInputClipCtxt->pVideoStream->
-        m_basicProperties.m_duration) )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intAudioMixingOpen(): uiAddCts is larger than video duration,\
-            returning M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION");
-        return M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION;
-    }
-
-    /**
-    * If the audio tracks are not compatible, replace input track by silence */
-    if( M4OSA_FALSE == pC->pInputClipCtxt->pSettings->
-        ClipProperties.bAudioIsCompatibleWithMasterClip )
-    {
-        M4VSS3GPP_intClipDeleteAudioTrack(pC->pInputClipCtxt);
-    }
-
-    /**
-    * Check if audio mixing is required */
-    if( ( ( pC->bHasAudio) && (M4OSA_FALSE
-        == pC->pAddedClipCtxt->pSettings->ClipProperties.bAudioIsEditable))
-        || (M4OSA_TRUE == pC->bRemoveOriginal) ) /*||
-                                                 (pSettings->uiAddVolume >= 100)) */
-    {
-        pC->bAudioMixingIsNeeded = M4OSA_FALSE;
-    }
-    else
-    {
-        pC->bAudioMixingIsNeeded = M4OSA_TRUE;
-    }
-
-    /**
-    * Check if output audio can support silence frames
-    Trick i use bAudioIsCompatibleWithMasterClip filed to store that  */
-    if( pC->bHasAudio )
-    {
-        pC->bSupportSilence = pC->pAddedClipCtxt->pSettings->
-            ClipProperties.bAudioIsCompatibleWithMasterClip;
-
-        if( M4OSA_FALSE == pC->bSupportSilence )
-        {
-            if( pC->iAddCts > 0 )
-            {
-                M4OSA_TRACE1_0(
-                    "M4VSS3GPP_intAudioMixingOpen():\
-                    iAddCts should be set to 0 with this audio track !");
-                return M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK;
-            }
-
-            if( 0 < pC->uiEndLoop )
-            {
-                M4OSA_TRACE1_0(
-                    "M4VSS3GPP_intAudioMixingOpen():\
-                    uiEndLoop should be set to 0 with this audio track !");
-                return M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK;
-            }
-        }
-    }
-    if( pC->b_DuckingNeedeed == M4OSA_FALSE)
-    {
-        /**
-        * Compute the factor to apply to sample to do the mixing */
-        pC->fAddedFactor = 0.50F;
-        pC->fOrigFactor = 0.50F;
-    }
-
-
-    /**
-    * Check if SSRC is needed */
-    if( M4OSA_TRUE == pC->b_SSRCneeded )
-    {
-        M4OSA_UInt32 numerator, denominator, ratio, ratioBuffer;
-
-        /**
-        * Init the SSRC module */
-        SSRC_ReturnStatus_en
-            ReturnStatus; /* Function return status                       */
-        LVM_INT16 NrSamplesMin =
-            0; /* Minimal number of samples on the input or on the output */
-        LVM_INT32
-            ScratchSize; /* The size of the scratch memory               */
-        LVM_INT16
-            *pInputInScratch; /* Pointer to input in the scratch buffer       */
-        LVM_INT16
-            *
-            pOutputInScratch; /* Pointer to the output in the scratch buffer  */
-        SSRC_Params_t ssrcParams;          /* Memory for init parameters                    */
-
-        switch( pC->pAddedClipCtxt->pSettings->
-            ClipProperties.uiSamplingFrequency )
-        {
-            case 8000:
-                ssrcParams.SSRC_Fs_In = LVM_FS_8000;
-                break;
-
-            case 11025:
-                ssrcParams.SSRC_Fs_In = LVM_FS_11025;
-                break;
-
-            case 12000:
-                ssrcParams.SSRC_Fs_In = LVM_FS_12000;
-                break;
-
-            case 16000:
-                ssrcParams.SSRC_Fs_In = LVM_FS_16000;
-                break;
-
-            case 22050:
-                ssrcParams.SSRC_Fs_In = LVM_FS_22050;
-                break;
-
-            case 24000:
-                ssrcParams.SSRC_Fs_In = LVM_FS_24000;
-                break;
-
-            case 32000:
-                ssrcParams.SSRC_Fs_In = LVM_FS_32000;
-                break;
-
-            case 44100:
-                ssrcParams.SSRC_Fs_In = LVM_FS_44100;
-                break;
-
-            case 48000:
-                ssrcParams.SSRC_Fs_In = LVM_FS_48000;
-                break;
-
-            default:
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intAudioMixingOpen: invalid added clip sampling frequency (%d Hz),\
-                    returning M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM",
-                    pC->pAddedClipCtxt->pSettings->
-                    ClipProperties.uiSamplingFrequency);
-                return M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM;
-        }
-
-        if( 1 == pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
-        {
-            ssrcParams.SSRC_NrOfChannels = LVM_MONO;
-        }
-        else
-        {
-            ssrcParams.SSRC_NrOfChannels = LVM_STEREO;
-        }
-
-        switch( pC->ewc.uiSamplingFrequency )
-        {
-            case 8000:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_8000;
-                break;
-
-            case 16000:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_16000;
-                break;
-
-            case 22050:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_22050;
-                break;
-
-            case 24000:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_24000;
-                break;
-
-            case 32000:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_32000;
-                break;
-
-            case 44100:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_44100;
-                break;
-
-            case 48000:
-                ssrcParams.SSRC_Fs_Out = LVM_FS_48000;
-                break;
-
-            default:
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intAudioMixingOpen: invalid output sampling frequency (%d Hz),\
-                    returning M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED",
-                    pC->ewc.uiSamplingFrequency);
-                return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
-                break;
-        }
-        ReturnStatus = 0;
-
-        switch (ssrcParams.SSRC_Fs_In){
-        case LVM_FS_8000:
-            ssrcParams.NrSamplesIn = 320;
-            break;
-        case LVM_FS_11025:
-            ssrcParams.NrSamplesIn =441;
-            break;
-        case LVM_FS_12000:
-            ssrcParams.NrSamplesIn =    480;
-            break;
-        case LVM_FS_16000:
-            ssrcParams.NrSamplesIn =    640;
-            break;
-        case LVM_FS_22050:
-            ssrcParams.NrSamplesIn =    882;
-            break;
-        case LVM_FS_24000:
-            ssrcParams.NrSamplesIn =    960;
-            break;
-        case LVM_FS_32000:
-            ssrcParams.NrSamplesIn = 1280;
-            break;
-        case LVM_FS_44100:
-            ssrcParams.NrSamplesIn = 1764;
-            break;
-        case LVM_FS_48000:
-            ssrcParams.NrSamplesIn = 1920;
-            break;
-        default:
-            ReturnStatus = -1;
-            break;
-        }
-
-        switch (ssrcParams.SSRC_Fs_Out){
-        case LVM_FS_8000:
-            ssrcParams.NrSamplesOut= 320;
-            break;
-        case LVM_FS_11025:
-            ssrcParams.NrSamplesOut =441;
-            break;
-        case LVM_FS_12000:
-            ssrcParams.NrSamplesOut=    480;
-            break;
-        case LVM_FS_16000:
-            ssrcParams.NrSamplesOut=    640;
-            break;
-        case LVM_FS_22050:
-            ssrcParams.NrSamplesOut=    882;
-            break;
-        case LVM_FS_24000:
-            ssrcParams.NrSamplesOut=    960;
-            break;
-        case LVM_FS_32000:
-            ssrcParams.NrSamplesOut = 1280;
-            break;
-        case LVM_FS_44100:
-            ssrcParams.NrSamplesOut= 1764;
-            break;
-        case LVM_FS_48000:
-            ssrcParams.NrSamplesOut = 1920;
-            break;
-        default:
-            ReturnStatus = -1;
-            break;
-        }
-        if( ReturnStatus != SSRC_OK )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingOpen:\
-                Error code %d returned by the SSRC_GetNrSamples function",
-                ReturnStatus);
-            return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
-        }
-
-        NrSamplesMin =
-            (LVM_INT16)((ssrcParams.NrSamplesIn > ssrcParams.NrSamplesOut)
-            ? ssrcParams.NrSamplesOut : ssrcParams.NrSamplesIn);
-
-        while( NrSamplesMin < M4VSS_SSRC_MINBLOCKSIZE )
-        { /* Don't take blocks smaller that the minimal block size */
-            ssrcParams.NrSamplesIn = (LVM_INT16)(ssrcParams.NrSamplesIn << 1);
-            ssrcParams.NrSamplesOut = (LVM_INT16)(ssrcParams.NrSamplesOut << 1);
-            NrSamplesMin = (LVM_INT16)(NrSamplesMin << 1);
-        }
-        pC->iSsrcNbSamplIn = (LVM_INT16)(
-            ssrcParams.
-            NrSamplesIn); /* multiplication by NrOfChannels is done below */
-        pC->iSsrcNbSamplOut = (LVM_INT16)(ssrcParams.NrSamplesOut);
-
-        numerator =
-            pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
-            * pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels;
-        denominator =
-            pC->pInputClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
-            * pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
-
-        if( numerator % denominator == 0 )
-        {
-            ratioBuffer = (M4OSA_UInt32)(numerator / denominator);
-        }
-        else
-        {
-            ratioBuffer = (M4OSA_UInt32)(numerator / denominator) + 1;
-        }
-
-        ratio =
-            (M4OSA_UInt32)(( pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
-            * ratioBuffer) / (pC->iSsrcNbSamplIn * sizeof(short)
-            * pC->pAddedClipCtxt->pSettings->
-            ClipProperties.uiNbChannels));
-
-        if( ratio == 0 )
-        {
-            /* It means that the input size of SSRC bufferIn is bigger than the asked buffer */
-            pC->minimumBufferIn = pC->iSsrcNbSamplIn * sizeof(short)
-                * pC->pAddedClipCtxt->pSettings->
-                ClipProperties.uiNbChannels;
-        }
-        else
-        {
-            ratio++; /* We use the immediate superior integer */
-            pC->minimumBufferIn = ratio * (pC->iSsrcNbSamplIn * sizeof(short)
-                * pC->pAddedClipCtxt->pSettings->
-                ClipProperties.uiNbChannels);
-        }
-
-        /**
-        * Allocate buffer for the input of the SSRC */
-        pC->pSsrcBufferIn =
-            (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->minimumBufferIn
-            + pC->pAddedClipCtxt->
-            AudioDecBufferOut.
-            m_bufferSize,
-            M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferIn");
-
-        if( M4OSA_NULL == pC->pSsrcBufferIn )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingOpen():\
-                unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-        pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
-
-        /**
-        * Allocate buffer for the output of the SSRC */
-        /* The "3" value below should be optimized ... one day ... */
-        pC->pSsrcBufferOut =
-            (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(3 * pC->iSsrcNbSamplOut * sizeof(short)
-            * pC->ewc.uiNbChannels, M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
-
-        if( M4OSA_NULL == pC->pSsrcBufferOut )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingOpen():\
-                unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-        pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
-
-        /**
-        * Allocate temporary buffer needed in case of channel conversion */
-        if( pC->ChannelConversion > 0 )
-        {
-            /* The "3" value below should be optimized ... one day ... */
-            pC->pTempBuffer =
-                (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(3 * pC->iSsrcNbSamplOut
-                * sizeof(short) * pC->pAddedClipCtxt->pSettings->
-                ClipProperties.uiNbChannels, M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
-
-            if( M4OSA_NULL == pC->pTempBuffer )
-            {
-                M4OSA_TRACE1_0(
-                    "M4VSS3GPP_intAudioMixingOpen():\
-                    unable to allocate pTempBuffer, returning M4ERR_ALLOC");
-                return M4ERR_ALLOC;
-            }
-            pC->pPosInTempBuffer = pC->pTempBuffer;
-        }
-    }
-    else if( pC->ChannelConversion > 0 )
-    {
-        pC->minimumBufferIn =
-            pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
-
-        /**
-        * Allocate buffer for the input of the SSRC */
-        pC->pSsrcBufferIn =
-            (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->minimumBufferIn
-            + pC->pAddedClipCtxt->
-            AudioDecBufferOut.
-            m_bufferSize,
-            M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferIn");
-
-        if( M4OSA_NULL == pC->pSsrcBufferIn )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingOpen(): \
-                unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-        pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
-
-        /**
-        * Allocate buffer for the output of the SSRC */
-        /* The "3" value below should be optimized ... one day ... */
-        pC->pSsrcBufferOut = (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(
-            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize,
-            M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
-
-        if( M4OSA_NULL == pC->pSsrcBufferOut )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingOpen():\
-                unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-        pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
-    }
-    else if( (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3)||
-         (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM))
-    {
-        M4OSA_UInt32 minbuffer = 0;
-
-        if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
-        {
-            pC->minimumBufferIn = 2048 * pC->ewc.uiNbChannels;
-            minbuffer = pC->minimumBufferIn;
-        }
-        else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
-        {
-            pC->minimumBufferIn = 320;
-
-            if( pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize > 320 )
-            {
-                minbuffer = pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
-            }
-            else
-            {
-                minbuffer = pC->minimumBufferIn; /* Not really possible ...*/
-            }
-        }
-        else
-        {
-            M4OSA_TRACE1_0("Bad output audio format, in case of MP3 replacing");
-            return M4ERR_PARAMETER;
-        }
-
-        /**
-        * Allocate buffer for the input of the SSRC */
-        pC->pSsrcBufferIn =
-            (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(2 * minbuffer, M4VSS3GPP,
-            (M4OSA_Char *)"pSsrcBufferIn");
-
-        if( M4OSA_NULL == pC->pSsrcBufferIn )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingOpen(): unable to allocate pSsrcBufferIn,\
-                returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-        pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
-
-        pC->pPosInSsrcBufferOut = pC->pPosInSsrcBufferIn;
-        pC->pSsrcBufferOut = pC->pSsrcBufferIn;
-    }
-
-    /**
-    * Check if audio encoder is needed to do audio mixing or audio resampling */
-    if( M4OSA_TRUE == pC->bAudioMixingIsNeeded || M4VIDEOEDITING_kPCM
-        == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
-        || M4VIDEOEDITING_kMP3
-        == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
-        || pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
-        != pSettings->outputAudioFormat
-        || pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
-        != outputASF
-        || pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels
-        != pSettings->outputNBChannels )
-    {
-        /**
-        * Init the audio encoder */
-        err = M4VSS3GPP_intCreateAudioEncoder(&pC->ewc, &pC->ShellAPI,
-            pC->ewc.uiAudioBitrate);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intCreateAudioEncoder() returns 0x%x",
-                err);
-            return err;
-        }
-
-        /* In case of PCM, MP3 or audio replace with reencoding, use encoder DSI */
-        if( pC->ewc.uiAudioOutputDsiSize == 0 && (M4VIDEOEDITING_kPCM
-            == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
-            || M4VIDEOEDITING_kMP3 == pC->pAddedClipCtxt->pSettings->
-            ClipProperties.AudioStreamType
-            || pC->pAddedClipCtxt->pSettings->
-            ClipProperties.AudioStreamType
-            != pSettings->outputAudioFormat
-            || pC->pAddedClipCtxt->pSettings->
-            ClipProperties.uiSamplingFrequency != outputASF
-            || pC->pAddedClipCtxt->pSettings->
-            ClipProperties.uiNbChannels
-            != pSettings->outputNBChannels) )
-        {
-            pC->ewc.uiAudioOutputDsiSize =
-                (M4OSA_UInt16)pC->ewc.pAudioEncDSI.infoSize;
-            pC->ewc.pAudioOutputDsi = pC->ewc.pAudioEncDSI.pInfo;
-        }
-    }
-
-    /**
-    * Init the output 3GPP file */
-    /*11/12/2008 CR3283 add the max output file size for the MMS use case in VideoArtist*/
-    err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
-        pC->pOsaFileWritPtr, pSettings->pOutputClipFile,
-        pC->pOsaFileReadPtr, pSettings->pTemporaryFile, 0);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intCreate3GPPOutputFile() returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingOpen(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence()
- * @brief    Write an audio silence frame into the writer
- * @note    Mainly used when padding with silence
- * @param    pC            (IN) VSS audio mixing internal context
- * @return    M4NO_ERROR:    No error
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err;
-
-    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
-        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingWriteSilence:\
-         pWriterDataFcts->pStartAU(audio) returns 0x%x!", err);
-        return err;
-    }
-
-    M4OSA_TRACE2_0("A #### silence AU");
-
-    memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
-        (void *)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
-
-    pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
-    pC->ewc.WriterAudioAU.CTS =
-        (M4OSA_Time)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
-
-    M4OSA_TRACE2_2("B ---- write : cts  = %ld [ 0x%x ]",
-        (M4OSA_Int32)(pC->ewc.dATo), pC->ewc.WriterAudioAU.size);
-
-    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
-        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingWriteSilence:\
-            pWriterDataFcts->pProcessAU(silence) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(M4VSS3GPP_InternalAudioMixingContext *pC)
- * @brief    Perform one step of video.
- * @note
- * @param    pC            (IN) VSS audio mixing internal context
- * @return    M4NO_ERROR:    No error
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err;
-    M4OSA_UInt16 offset;
-
-    M4OSA_TRACE2_3("  VIDEO step : dVTo = %f  state = %d  offset = %ld",
-        pC->ewc.dOutputVidCts, pC->State, pC->pInputClipCtxt->iVoffset);
-
-    /**
-    * Read the input video AU */
-    err = pC->pInputClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
-        pC->pInputClipCtxt->pReaderContext,
-        (M4_StreamHandler *)pC->pInputClipCtxt->pVideoStream,
-        &pC->pInputClipCtxt->VideoAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE3_1(
-            "M4VSS3GPP_intAudioMixingStepVideo(): m_pFctGetNextAu(video) returns 0x%x",
-            err);
-        return err;
-    }
-
-    M4OSA_TRACE2_3("C .... read  : cts  = %.0f + %ld [ 0x%x ]",
-        pC->pInputClipCtxt->VideoAU.m_CTS, pC->pInputClipCtxt->iVoffset,
-        pC->pInputClipCtxt->VideoAU.m_size);
-
-    /**
-    * Get the output AU to write into */
-    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
-        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, &pC->ewc.WriterVideoAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingStepVideo: pWriterDataFcts->pStartAU(Video) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    offset = 0;
-    /* for h.264 stream do not read the 1st 4 bytes as they are header indicators */
-    if( pC->pInputClipCtxt->pVideoStream->m_basicProperties.m_streamType
-        == M4DA_StreamTypeVideoMpeg4Avc )
-    {
-        M4OSA_TRACE3_0(
-            "M4VSS3GPP_intAudioMixingStepVideo(): input stream type H264");
-        offset = 4;
-    }
-    pC->pInputClipCtxt->VideoAU.m_size  -=  offset;
-    /**
-    * Check that the video AU is not larger than expected */
-    if( pC->pInputClipCtxt->VideoAU.m_size > pC->ewc.uiVideoMaxAuSize )
-    {
-        M4OSA_TRACE1_2(
-            "M4VSS3GPP_intAudioMixingStepVideo: AU size greater than MaxAuSize (%d>%d)!\
-            returning M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE",
-            pC->pInputClipCtxt->VideoAU.m_size, pC->ewc.uiVideoMaxAuSize);
-        return M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE;
-    }
-
-    /**
-    * Copy the input AU payload to the output AU */
-    memcpy((void *)pC->ewc.WriterVideoAU.dataAddress,
-        (void *)(pC->pInputClipCtxt->VideoAU.m_dataAddress + offset),
-        (pC->pInputClipCtxt->VideoAU.m_size));
-
-    /**
-    * Copy the input AU parameters to the output AU */
-    pC->ewc.WriterVideoAU.size = pC->pInputClipCtxt->VideoAU.m_size;
-    pC->ewc.WriterVideoAU.CTS =
-        (M4OSA_UInt32)(pC->pInputClipCtxt->VideoAU.m_CTS + 0.5);
-    pC->ewc.WriterVideoAU.attribute = pC->pInputClipCtxt->VideoAU.m_attribute;
-
-    /**
-    * Write the AU */
-    M4OSA_TRACE2_2("D ---- write : cts  = %lu [ 0x%x ]",
-        pC->ewc.WriterVideoAU.CTS, pC->ewc.WriterVideoAU.size);
-
-    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
-        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, &pC->ewc.WriterVideoAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingStepVideo: pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingStepVideo(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(M4VSS3GPP_InternalAudioMixingContext *pC)
- * @brief    Perform one step of audio.
- * @note
- * @param    pC            (IN) VSS audio mixing internal context
- * @return    M4NO_ERROR:    No error
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err;
-
-    M4OSA_TRACE2_3("  AUDIO mix  : dATo = %f  state = %d  offset = %ld",
-        pC->ewc.dATo, pC->State, pC->pInputClipCtxt->iAoffset);
-
-    switch( pC->State )
-    {
-        /**********************************************************/
-        case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
-            {
-                err = M4VSS3GPP_intAudioMixingCopyOrig(pC);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingStepAudioMix:\
-                        M4VSS3GPP_intAudioMixingCopyOrig(1) returns 0x%x!",
-                        err);
-                    return err;
-                }
-
-                /**
-                * Check if we reached the AddCts */
-                if( pC->ewc.dATo >= pC->iAddCts )
-                {
-                    /**
-                    * First segment is over, state transition to second and return OK */
-                    pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
-
-                    /* Transition from reading state to encoding state */
-                    err = M4VSS3GPP_intAudioMixingTransition(pC);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intAudioMixingStepAudioMix(): pre-encode fails err = 0x%x",
-                            err);
-                        return err;
-                    }
-
-                    /**
-                    * Return with no error so the step function will be called again */
-                    pC->pAddedClipCtxt->iAoffset =
-                        (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
-
-                    M4OSA_TRACE2_0(
-                        "M4VSS3GPP_intAudioMixingStepAudioMix(): returning M4NO_ERROR (1->2)");
-
-                    return M4NO_ERROR;
-                }
-            }
-            break;
-
-            /**********************************************************/
-        case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
-            {
-                if( M4OSA_TRUE == pC->bAudioMixingIsNeeded ) /**< Mix */
-                {
-                    /**
-                    * Read the added audio AU */
-                    if( pC->ChannelConversion > 0 || pC->b_SSRCneeded == M4OSA_TRUE
-                        || pC->pAddedClipCtxt->pSettings->
-                        ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 )
-                    {
-                        /* In case of sampling freq conversion and/or channel conversion,
-                           the read next AU will be    called by the
-                           M4VSS3GPP_intAudioMixingDoMixing function */
-                    }
-                    else
-                    {
-                        err =
-                            M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
-
-                        M4OSA_TRACE2_3("E .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                            pC->pAddedClipCtxt->iAudioFrameCts
-                            / pC->pAddedClipCtxt->scale_audio,
-                            pC->pAddedClipCtxt->iAoffset
-                            / pC->pAddedClipCtxt->scale_audio,
-                            pC->pAddedClipCtxt->uiAudioFrameSize);
-
-                        if( M4WAR_NO_MORE_AU == err )
-                        {
-                            /**
-                            * Decide what to do when audio is over */
-                            if( pC->uiEndLoop > 0 )
-                            {
-                                /**
-                                * Jump at the Begin loop time */
-                                M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
-
-                                err = pC->pAddedClipCtxt->ShellAPI.m_pReader->
-                                    m_pFctJump(
-                                    pC->pAddedClipCtxt->pReaderContext,
-                                    (M4_StreamHandler
-                                    *)pC->pAddedClipCtxt->pAudioStream,
-                                    &time);
-
-                                if( M4NO_ERROR != err )
-                                {
-                                    M4OSA_TRACE1_1(
-                                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                                        m_pReader->m_pFctJump(audio returns 0x%x",
-                                        err);
-                                    return err;
-                                }
-                            }
-                            else
-                            {
-                                /* Transition from encoding state to reading state */
-                                err = M4VSS3GPP_intAudioMixingTransition(pC);
-
-                                if( M4NO_ERROR != err )
-                                {
-                                    M4OSA_TRACE1_1(
-                                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                                        pre-encode fails err = 0x%x",
-                                        err);
-                                    return err;
-                                }
-
-                                /**
-                                * Second segment is over, state transition to third and
-                                 return OK */
-                                pC->State =
-                                    M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
-
-                                /**
-                                * Return with no error so the step function will be
-                                 called again */
-                                M4OSA_TRACE2_0(
-                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                                    returning M4NO_ERROR (2->3) a");
-                                return M4NO_ERROR;
-                            }
-                        }
-                        else if( M4NO_ERROR != err )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                                m_pFctGetNextAu(audio) returns 0x%x",
-                                err);
-                            return err;
-                        }
-                    }
-
-                    /**
-                    * Read the original audio AU */
-                    err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
-
-                    M4OSA_TRACE2_3("F .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                        pC->pInputClipCtxt->iAudioFrameCts
-                        / pC->pInputClipCtxt->scale_audio,
-                        pC->pInputClipCtxt->iAoffset
-                        / pC->pInputClipCtxt->scale_audio,
-                        pC->pInputClipCtxt->uiAudioFrameSize);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE3_1(
-                            "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                            m_pFctGetNextAu(audio) returns 0x%x",
-                            err);
-                        return err;
-                    }
-
-                    if( pC->ChannelConversion == 0
-                        && pC->b_SSRCneeded == M4OSA_FALSE
-                        && pC->pAddedClipCtxt->pSettings->
-                        ClipProperties.AudioStreamType != M4VIDEOEDITING_kMP3 )
-                    {
-                        /**
-                        * Get the output AU to write into */
-                        err = pC->ShellAPI.pWriterDataFcts->pStartAU(
-                            pC->ewc.p3gpWriterContext,
-                            M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
-                            &pC->ewc.WriterAudioAU);
-
-                        if( M4NO_ERROR != err )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intAudioMixingStepAudioMix:\
-                                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
-                                err);
-                            return err;
-                        }
-                    }
-
-                    /**
-                    * Perform the audio mixing */
-                    err = M4VSS3GPP_intAudioMixingDoMixing(pC);
-
-                    if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
-                    {
-                        return M4NO_ERROR;
-                    }
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intAudioMixingStepAudioMix:\
-                            M4VSS3GPP_intAudioMixingDoMixing returns 0x%x!",
-                            err);
-                        return err;
-                    }
-                }
-                else /**< No mix, just copy added audio */
-                {
-                    err = M4VSS3GPP_intAudioMixingCopyAdded(pC);
-
-                    if( M4WAR_NO_MORE_AU == err )
-                    {
-                        /**
-                        * Decide what to do when audio is over */
-                        if( pC->uiEndLoop > 0 )
-                        {
-                            /**
-                            * Jump at the Begin loop time */
-                            M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
-
-                            err =
-                                pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
-                                pC->pAddedClipCtxt->pReaderContext,
-                                (M4_StreamHandler
-                                *)pC->pAddedClipCtxt->pAudioStream,
-                                &time);
-
-                            if( M4NO_ERROR != err )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                                    m_pReader->m_pFctJump(audio returns 0x%x",
-                                    err);
-                                return err;
-                            }
-
-                            /**
-                            * 'BZZZ' bug fix:
-                            * add a silence frame */
-                            err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
-
-                            if( M4NO_ERROR != err )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                                    M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
-                                    err);
-                                return err;
-                            }
-
-                            /**
-                            * Return with no error so the step function will be called again to
-                              read audio data */
-                            pC->pAddedClipCtxt->iAoffset =
-                                (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio
-                                + 0.5);
-
-                            M4OSA_TRACE2_0(
-                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                                    returning M4NO_ERROR (loop)");
-                            return M4NO_ERROR;
-                        }
-                        else
-                        {
-                            /* Transition to begin cut */
-                            err = M4VSS3GPP_intAudioMixingTransition(pC);
-
-                            if( M4NO_ERROR != err )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                                    pre-encode fails err = 0x%x",
-                                    err);
-                                return err;
-                            }
-
-                            /**
-                            * Second segment is over, state transition to third */
-                            pC->State =
-                                M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
-
-                            /**
-                            * Return with no error so the step function will be called again */
-                            M4OSA_TRACE2_0(
-                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                                returning M4NO_ERROR (2->3) b");
-                            return M4NO_ERROR;
-                        }
-                    }
-                    else if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                            M4VSS3GPP_intAudioMixingCopyOrig(2) returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-
-                /**
-                * Check if we reached the end of the video */
-                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
-                {
-                    M4OSA_TRACE3_0(
-                        "M4VSS3GPP_intAudioMixingStepAudioMix(): Video duration reached,\
-                        returning M4WAR_NO_MORE_AU");
-                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
-                }
-            }
-            break;
-
-            /**********************************************************/
-        case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
-            {
-                err = M4VSS3GPP_intAudioMixingCopyOrig(pC);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingStepAudioMix:\
-                        M4VSS3GPP_intAudioMixingCopyOrig(3) returns 0x%x!",
-                        err);
-                    return err;
-                }
-
-                /**
-                * Check if we reached the end of the video */
-                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
-                {
-                    M4OSA_TRACE3_0(
-                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                        Video duration reached, returning M4WAR_NO_MORE_AU");
-                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
-                }
-            }
-            break;
-       default:
-            break;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0(
-        "M4VSS3GPP_intAudioMixingStepAudioMix(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(M4VSS3GPP_InternalAudioMixingContext *pC)
- * @brief    Perform one step of audio.
- * @note
- * @param    pC            (IN) VSS audio mixing internal context
- * @return    M4NO_ERROR:    No error
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err;
-
-    M4OSA_TRACE2_3("  AUDIO repl : dATo = %f  state = %d  offset = %ld",
-        pC->ewc.dATo, pC->State, pC->pInputClipCtxt->iAoffset);
-
-    switch( pC->State )
-    {
-        /**********************************************************/
-        case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
-            {
-                /**
-                * Replace the SID (silence) payload in the writer AU */
-                err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                        M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
-                        err);
-                    return err;
-                }
-
-                /**
-                * Check if we reached the AddCts */
-                if( pC->ewc.dATo >= pC->iAddCts )
-                {
-                    /**
-                    * First segment is over, state transition to second and return OK */
-                    pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
-
-                    /**
-                    * Return with no error so the step function will be called again */
-                    pC->pAddedClipCtxt->iAoffset =
-                        (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
-
-                    M4OSA_TRACE2_0("M4VSS3GPP_intAudioMixingStepAudioReplace():\
-                         returning M4NO_ERROR (1->2)");
-                    return M4NO_ERROR;
-                }
-            }
-            break;
-
-            /**********************************************************/
-        case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
-            {
-                err = M4VSS3GPP_intAudioMixingCopyAdded(pC);
-
-                if( M4WAR_NO_MORE_AU == err )
-                {
-                    /**
-                    * Decide what to do when audio is over */
-
-                    if( pC->uiEndLoop > 0 )
-                    {
-                        /**
-                        * Jump at the Begin loop time */
-                        M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
-
-                        err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
-                            pC->pAddedClipCtxt->pReaderContext,
-                            (M4_StreamHandler
-                            *)pC->pAddedClipCtxt->pAudioStream, &time);
-
-                        if( M4NO_ERROR != err )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intAudioMixingStepAudioReplace():\
-                                m_pReader->m_pFctJump(audio returns 0x%x",
-                                err);
-                            return err;
-                        }
-
-                        /**
-                        * 'BZZZ' bug fix:
-                        * add a silence frame */
-                        err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
-
-                        if( M4NO_ERROR != err )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                                M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
-                                err);
-                            return err;
-                        }
-
-                        /**
-                        * Return with no error so the step function will be called again to
-                          read audio data */
-                        pC->pAddedClipCtxt->iAoffset =
-                            (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
-
-                        M4OSA_TRACE2_0(
-                            "M4VSS3GPP_intAudioMixingStepAudioReplace():\
-                            returning M4NO_ERROR (loop)");
-
-                        return M4NO_ERROR;
-                    }
-                    else if( M4OSA_TRUE == pC->bSupportSilence )
-                    {
-                        /**
-                        * Second segment is over, state transition to third and return OK */
-                        pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
-
-                        /**
-                        * Return with no error so the step function will be called again */
-                        M4OSA_TRACE2_0(
-                            "M4VSS3GPP_intAudioMixingStepAudioReplace():\
-                                 returning M4NO_ERROR (2->3)");
-                        return M4NO_ERROR;
-                    }
-                    else
-                    {
-                        /**
-                        * The third segment (silence) is only done if supported.
-                        * In other case, we finish here. */
-                        pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
-
-                        /**
-                        * Return with no error so the step function will be called again */
-                        M4OSA_TRACE2_0(
-                            "M4VSS3GPP_intAudioMixingStepAudioReplace():\
-                                 returning M4NO_ERROR (2->F)");
-                        return M4NO_ERROR;
-                    }
-                }
-                else if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingStepAudioReplace():\
-                        M4VSS3GPP_intAudioMixingCopyOrig(2) returns 0x%x",
-                        err);
-                    return err;
-                }
-
-                /**
-                * Check if we reached the end of the clip */
-                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
-                {
-                    M4OSA_TRACE3_0(
-                        "M4VSS3GPP_intAudioMixingStepAudioReplace(): Clip duration reached,\
-                        returning M4WAR_NO_MORE_AU");
-                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
-                }
-            }
-            break;
-
-            /**********************************************************/
-        case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
-            {
-                /**
-                * Replace the SID (silence) payload in the writer AU */
-                err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
-                        M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
-                        err);
-                    return err;
-                }
-
-                /**
-                * Check if we reached the end of the video */
-                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
-                {
-                    M4OSA_TRACE3_0(
-                        "M4VSS3GPP_intAudioMixingStepAudioReplace():\
-                        Video duration reached, returning M4WAR_NO_MORE_AU");
-                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
-                }
-            }
-            break;
-        default:
-            break;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0(
-        "M4VSS3GPP_intAudioMixingStepAudioReplace(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(M4VSS3GPP_InternalAudioMixingContext *pC)
- * @brief    Read one AU from the original audio file and write it to the output
- * @note
- * @param    pC    (IN) VSS audio mixing internal context
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err;
-
-    /**
-    * Read the input original audio AU */
-    err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
-
-    M4OSA_TRACE2_3("G .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-        pC->pInputClipCtxt->iAudioFrameCts / pC->pInputClipCtxt->scale_audio,
-        pC->pInputClipCtxt->iAoffset / pC->pInputClipCtxt->scale_audio,
-        pC->pInputClipCtxt->uiAudioFrameSize);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE3_1(
-            "M4VSS3GPP_intAudioMixingCopyOrig(): m_pFctGetNextAu(audio) returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Get the output AU to write into */
-    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
-        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingCopyOrig: pWriterDataFcts->pStartAU(audio) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    * Copy the input AU properties to the output AU */
-    pC->ewc.WriterAudioAU.size = pC->pInputClipCtxt->uiAudioFrameSize;
-    pC->ewc.WriterAudioAU.CTS =
-        pC->pInputClipCtxt->iAudioFrameCts + pC->pInputClipCtxt->iAoffset;
-
-    /**
-    * Copy the AU itself */
-    memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
-        (void *)pC->pInputClipCtxt->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
-
-    /**
-    * Write the mixed AU */
-    M4OSA_TRACE2_2("H ---- write : cts  = %ld [ 0x%x ]",
-        (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
-        pC->ewc.WriterAudioAU.size);
-
-    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
-        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingCopyOrig: pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    * Increment the audio CTS for the next step */
-    pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingCopyOrig(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(M4VSS3GPP_InternalAudioMixingContext *pC)
- * @brief    Read one AU from the added audio file and write it to the output
- * @note
- * @param    pC    (IN) VSS audio mixing internal context
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err;
-
-    if(pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 ||
-        pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM ||
-        pC->b_SSRCneeded == M4OSA_TRUE ||
-        pC->ChannelConversion > 0)
-    {
-        M4ENCODER_AudioBuffer pEncInBuffer; /**< Encoder input buffer for api */
-        M4ENCODER_AudioBuffer
-            pEncOutBuffer; /**< Encoder output buffer for api */
-        M4OSA_Time
-            frameTimeDelta; /**< Duration of the encoded (then written) data */
-        M4OSA_MemAddr8 tempPosBuffer;
-
-        err = M4VSS3GPP_intAudioMixingConvert(pC);
-
-        if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
-        {
-            M4OSA_TRACE2_0(
-                "M4VSS3GPP_intAudioMixingCopyAdded:\
-                M4VSS3GPP_intAudioMixingConvert end of added file");
-            return M4NO_ERROR;
-        }
-        else if( err != M4NO_ERROR )
-        {
-            M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingCopyAdded:\
-                M4VSS3GPP_intAudioMixingConvert returned 0x%x", err);
-            return err;
-        }
-
-        /**
-        * Get the output AU to write into */
-        err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
-            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingStepAudioMix:\
-                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /* [Mono] or [Stereo interleaved] : all is in one buffer */
-        pEncInBuffer.pTableBuffer[0] = pC->pSsrcBufferOut;
-        pEncInBuffer.pTableBufferSize[0] =
-            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-        pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
-        pEncInBuffer.pTableBufferSize[1] = 0;
-
-        /* Time in ms from data size, because it is PCM16 samples */
-        frameTimeDelta = pEncInBuffer.pTableBufferSize[0] / sizeof(short)
-            / pC->ewc.uiNbChannels;
-
-        /**
-        * Prepare output buffer */
-        pEncOutBuffer.pTableBuffer[0] =
-            (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
-        pEncOutBuffer.pTableBufferSize[0] = 0;
-
-        M4OSA_TRACE2_0("K **** blend AUs");
-        /**
-        * Encode the PCM audio */
-
-        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
-            pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingDoMixing():\
-                pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
-                err);
-            return err;
-        }
-
-        /**
-        * Set AU cts and size */
-        pC->ewc.WriterAudioAU.size =
-            pEncOutBuffer.
-            pTableBufferSize[0]; /**< Get the size of encoded data */
-        pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
-
-        /* Update decoded buffer here */
-        if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0 )
-        {
-            tempPosBuffer = pC->pSsrcBufferOut
-                + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-            memmove((void *)pC->pSsrcBufferOut, (void *)tempPosBuffer,
-                pC->pPosInSsrcBufferOut - tempPosBuffer);
-            pC->pPosInSsrcBufferOut -=
-                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-        }
-        else
-        {
-            tempPosBuffer = pC->pSsrcBufferIn + pC->minimumBufferIn;
-            memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
-                pC->pPosInSsrcBufferIn - tempPosBuffer);
-            pC->pPosInSsrcBufferIn -= pC->minimumBufferIn;
-        }
-
-        /**
-        * Write the mixed AU */
-        M4OSA_TRACE2_2("J ---- write : cts  = %ld [ 0x%x ]",
-            (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
-            pC->ewc.WriterAudioAU.size);
-
-        err =
-            pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
-            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingCopyAdded:\
-                pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Increment the audio CTS for the next step */
-        pC->ewc.dATo += frameTimeDelta / pC->ewc.scale_audio;
-    }
-    else
-    {
-        /**
-        * Read the added audio AU */
-        err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
-
-        M4OSA_TRACE2_3("I .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-            pC->pAddedClipCtxt->iAudioFrameCts
-            / pC->pAddedClipCtxt->scale_audio,
-            pC->pAddedClipCtxt->iAoffset / pC->pAddedClipCtxt->scale_audio,
-            pC->pAddedClipCtxt->uiAudioFrameSize);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE3_1(
-                "M4VSS3GPP_intAudioMixingCopyAdded(): m_pFctGetNextAu(audio) returns 0x%x",
-                err);
-            return err;
-        }
-
-        /**
-        * Get the output AU to write into */
-        err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
-            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingCopyAdded:\
-                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Copy the input AU properties to the output AU */
-
-        /** THE CHECK BELOW IS ADDED TO PREVENT ISSUES LINKED TO PRE-ALLOCATED MAX AU SIZE
-        max AU size is set based on M4VSS3GPP_AUDIO_MAX_AU_SIZE defined in file
-        M4VSS3GPP_InternalConfig.h, If this error occurs increase the limit set in this file
-        */
-        if( pC->pAddedClipCtxt->uiAudioFrameSize > pC->ewc.WriterAudioAU.size )
-        {
-            M4OSA_TRACE1_2(
-                "ERROR: audio AU size (%d) to copy larger than allocated one (%d) => abort",
-                pC->pAddedClipCtxt->uiAudioFrameSize,
-                pC->ewc.WriterAudioAU.size);
-            M4OSA_TRACE1_0(
-                "PLEASE CONTACT SUPPORT TO EXTEND MAX AU SIZE IN THE PRODUCT LIBRARY");
-            err = M4ERR_UNSUPPORTED_MEDIA_TYPE;
-            return err;
-        }
-        pC->ewc.WriterAudioAU.size = pC->pAddedClipCtxt->uiAudioFrameSize;
-        pC->ewc.WriterAudioAU.CTS =
-            pC->pAddedClipCtxt->iAudioFrameCts + pC->pAddedClipCtxt->iAoffset;
-
-        /**
-        * Copy the AU itself */
-        memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
-            (void *)pC->pAddedClipCtxt->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
-
-        /**
-        * Write the mixed AU */
-        M4OSA_TRACE2_2("J ---- write : cts  = %ld [ 0x%x ]",
-            (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
-            pC->ewc.WriterAudioAU.size);
-
-        err =
-            pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
-            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingCopyAdded:\
-                pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Increment the audio CTS for the next step */
-        pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingCopyAdded(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR  M4VSS3GPP_intAudioMixingConvert(M4VSS3GPP_InternalAudioMixingContext *pC)
- * @brief    Convert PCM of added track to the right ASF / nb of Channels
- * @note
- * @param    pC    (IN) VSS audio mixing internal context
- * @return    M4NO_ERROR:    No error
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingConvert(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err;
-    int ssrcErr; /**< Error while ssrc processing */
-    M4OSA_UInt32 uiChannelConvertorNbSamples =
-        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize / sizeof(short)
-        / pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
-    M4OSA_MemAddr8 tempPosBuffer;
-
-    M4OSA_UInt32 outFrameCount = uiChannelConvertorNbSamples;
-    /* Do we need to feed SSRC buffer In ? */
-    /**
-    * RC: This is not really optimum (memmove). We should handle this with linked list. */
-    while( pC->pPosInSsrcBufferIn - pC->pSsrcBufferIn < (M4OSA_Int32)pC->minimumBufferIn )
-    {
-        /* We need to get more PCM data */
-        if (pC->bNoLooping == M4OSA_TRUE)
-        {
-            err = M4WAR_NO_MORE_AU;
-        }
-        else
-        {
-        err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
-        }
-        if(pC->bjumpflag)
-        {
-        /**
-            * Jump at the Begin loop time */
-            M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
-
-            err =
-                pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump\
-                    (pC->pAddedClipCtxt->pReaderContext,
-                     (M4_StreamHandler*)pC->pAddedClipCtxt->pAudioStream, &time);
-            if (M4NO_ERROR != err)
-            {
-                M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingConvert():\
-                     m_pReader->m_pFctJump(audio returns 0x%x", err);
-                return err;
-            }
-            pC->bjumpflag = M4OSA_FALSE;
-        }
-        M4OSA_TRACE2_3("E .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-             pC->pAddedClipCtxt->iAudioFrameCts / pC->pAddedClipCtxt->scale_audio,
-                 pC->pAddedClipCtxt->iAoffset / pC->pAddedClipCtxt->scale_audio,
-                     pC->pAddedClipCtxt->uiAudioFrameSize);
-        if( M4WAR_NO_MORE_AU == err )
-        {
-            if(pC->bNoLooping == M4OSA_TRUE)
-            {
-                pC->uiEndLoop =0; /* Value 0 means no looping is required */
-            }
-            /**
-            * Decide what to do when audio is over */
-            if( pC->uiEndLoop > 0 )
-            {
-                /**
-                * Jump at the Begin loop time */
-                M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
-
-                err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
-                    pC->pAddedClipCtxt->pReaderContext,
-                    (M4_StreamHandler *)pC->pAddedClipCtxt->
-                    pAudioStream, &time);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingConvert():\
-                        m_pReader->m_pFctJump(audio returns 0x%x",
-                        err);
-                    return err;
-                }
-            }
-            else
-            {
-                /* Transition from encoding state to reading state */
-                err = M4VSS3GPP_intAudioMixingTransition(pC);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingStepAudioMix(): pre-encode fails err = 0x%x",
-                        err);
-                    return err;
-                }
-
-                /**
-                * Second segment is over, state transition to third and return OK */
-                pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
-
-                /**
-                * Return with no error so the step function will be called again */
-                M4OSA_TRACE2_0(
-                    "M4VSS3GPP_intAudioMixingConvert():\
-                    returning M4VSS3GPP_WAR_END_OF_ADDED_AUDIO (2->3) a");
-                return M4VSS3GPP_WAR_END_OF_ADDED_AUDIO;
-            }
-        }
-        else if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingConvert(): m_pFctGetNextAu(audio) returns 0x%x",
-                err);
-            return err;
-        }
-
-        err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pAddedClipCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingDoMixing:\
-                M4VSS3GPP_intClipDecodeCurrentAudioFrame(added) returns 0x%x",
-                err);
-            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
-        }
-
-        /* Copy decoded data into SSRC buffer in */
-        memcpy((void *)pC->pPosInSsrcBufferIn,
-            (void *)pC->pAddedClipCtxt->AudioDecBufferOut.m_dataAddress,
-            pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize);
-        /* Update position pointer into SSRC buffer In */
-
-        pC->pPosInSsrcBufferIn +=
-            pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
-    }
-
-    /* Do the resampling / channel conversion if needed (=feed buffer out) */
-    if( pC->b_SSRCneeded == M4OSA_TRUE )
-    {
-        pC->ChannelConversion = 0;
-        if( pC->ChannelConversion > 0 )
-        {
-            while( pC->pPosInTempBuffer - pC->pTempBuffer
-                < (M4OSA_Int32)(pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
-                *pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels)
-                / pC->ChannelConversion )
-                /* We use ChannelConversion variable because in case 2, we need twice less data */
-            {
-                ssrcErr = 0;
-                memset((void *)pC->pPosInTempBuffer,0,
-                    (pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels));
-
-                LVAudioresample_LowQuality((short*)pC->pPosInTempBuffer,
-                    (short*)pC->pSsrcBufferIn,
-                    pC->iSsrcNbSamplOut,
-                    pC->pLVAudioResampler);
-                if( 0 != ssrcErr )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingConvert: SSRC_Process returns 0x%x, returning ",
-                        ssrcErr);
-                    return ssrcErr;
-                }
-
-                pC->pPosInTempBuffer += pC->iSsrcNbSamplOut * sizeof(short)
-                    * pC->pAddedClipCtxt->pSettings->
-                    ClipProperties.uiNbChannels;
-
-                /* Update SSRC bufferIn */
-                tempPosBuffer =
-                    pC->pSsrcBufferIn + (pC->iSsrcNbSamplIn * sizeof(short)
-                    * pC->pAddedClipCtxt->pSettings->
-                    ClipProperties.uiNbChannels);
-                memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
-                    pC->pPosInSsrcBufferIn - tempPosBuffer);
-                pC->pPosInSsrcBufferIn -= pC->iSsrcNbSamplIn * sizeof(short)
-                    * pC->pAddedClipCtxt->pSettings->
-                    ClipProperties.uiNbChannels;
-            }
-        }
-        else
-        {
-            while( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
-                < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
-            {
-                ssrcErr = 0;
-                memset((void *)pC->pPosInSsrcBufferOut,0,
-                    (pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels));
-
-                LVAudioresample_LowQuality((short*)pC->pPosInSsrcBufferOut,
-                    (short*)pC->pSsrcBufferIn,
-                    pC->iSsrcNbSamplOut,
-                    pC->pLVAudioResampler);
-                if( 0 != ssrcErr )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingConvert: SSRC_Process returns 0x%x, returning ",
-                        ssrcErr);
-                    return ssrcErr;
-                }
-                pC->pPosInSsrcBufferOut +=
-                    pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels;
-
-                /* Update SSRC bufferIn */
-                tempPosBuffer =
-                    pC->pSsrcBufferIn + (pC->iSsrcNbSamplIn * sizeof(short)
-                    * pC->pAddedClipCtxt->pSettings->
-                    ClipProperties.uiNbChannels);
-                memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
-                    pC->pPosInSsrcBufferIn - tempPosBuffer);
-                pC->pPosInSsrcBufferIn -= pC->iSsrcNbSamplIn * sizeof(short)
-                    * pC->pAddedClipCtxt->pSettings->
-                    ClipProperties.uiNbChannels;
-            }
-        }
-
-        /* Convert Stereo<->Mono */
-        switch( pC->ChannelConversion )
-        {
-            case 0: /* No channel conversion */
-                break;
-
-            case 1: /* stereo to mono */
-                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
-                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
-                {
-                    From2iToMono_16((short *)pC->pTempBuffer,
-                        (short *)pC->pSsrcBufferOut,
-                        (short)(uiChannelConvertorNbSamples));
-                    /* Update pTempBuffer */
-                    tempPosBuffer = pC->pTempBuffer
-                        + (uiChannelConvertorNbSamples * sizeof(short)
-                        * pC->pAddedClipCtxt->pSettings->
-                        ClipProperties.
-                        uiNbChannels); /* Buffer is in bytes */
-                    memmove((void *)pC->pTempBuffer, (void *)tempPosBuffer,
-                        pC->pPosInTempBuffer - tempPosBuffer);
-                    pC->pPosInTempBuffer -=
-                        (uiChannelConvertorNbSamples * sizeof(short)
-                        * pC->pAddedClipCtxt->pSettings->
-                        ClipProperties.uiNbChannels);
-                    pC->pPosInSsrcBufferOut +=
-                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-                }
-                break;
-
-            case 2: /* mono to stereo */
-                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
-                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
-                {
-                    MonoTo2I_16((short *)pC->pTempBuffer,
-                        (short *)pC->pSsrcBufferOut,
-                        (short)uiChannelConvertorNbSamples);
-                    tempPosBuffer = pC->pTempBuffer
-                        + (uiChannelConvertorNbSamples * sizeof(short)
-                        * pC->pAddedClipCtxt->pSettings->
-                        ClipProperties.uiNbChannels);
-                    memmove((void *)pC->pTempBuffer, (void *)tempPosBuffer,
-                        pC->pPosInTempBuffer - tempPosBuffer);
-                    pC->pPosInTempBuffer -=
-                        (uiChannelConvertorNbSamples * sizeof(short)
-                        * pC->pAddedClipCtxt->pSettings->
-                        ClipProperties.uiNbChannels);
-                    pC->pPosInSsrcBufferOut +=
-                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-                }
-                break;
-        }
-    }
-    else if( pC->ChannelConversion > 0 )
-    {
-        //M4OSA_UInt32 uiChannelConvertorNbSamples =
-        // pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize / sizeof(short) /
-        // pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
-        /* Convert Stereo<->Mono */
-        switch( pC->ChannelConversion )
-        {
-            case 0: /* No channel conversion */
-                break;
-
-            case 1: /* stereo to mono */
-                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
-                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
-                {
-                    From2iToMono_16((short *)pC->pSsrcBufferIn,
-                        (short *)pC->pSsrcBufferOut,
-                        (short)(uiChannelConvertorNbSamples));
-                    /* Update pTempBuffer */
-                    tempPosBuffer = pC->pSsrcBufferIn
-                        + (uiChannelConvertorNbSamples * sizeof(short)
-                        * pC->pAddedClipCtxt->pSettings->
-                        ClipProperties.
-                        uiNbChannels); /* Buffer is in bytes */
-                    memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
-                        pC->pPosInSsrcBufferIn - tempPosBuffer);
-                    pC->pPosInSsrcBufferIn -=
-                        (uiChannelConvertorNbSamples * sizeof(short)
-                        * pC->pAddedClipCtxt->pSettings->
-                        ClipProperties.uiNbChannels);
-                    pC->pPosInSsrcBufferOut +=
-                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-                }
-                break;
-
-            case 2: /* mono to stereo */
-                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
-                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
-                {
-                    MonoTo2I_16((short *)pC->pSsrcBufferIn,
-                        (short *)pC->pSsrcBufferOut,
-                        (short)uiChannelConvertorNbSamples);
-                    tempPosBuffer = pC->pSsrcBufferIn
-                        + (uiChannelConvertorNbSamples * sizeof(short)
-                        * pC->pAddedClipCtxt->pSettings->
-                        ClipProperties.uiNbChannels);
-                    memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
-                        pC->pPosInSsrcBufferIn - tempPosBuffer);
-                    pC->pPosInSsrcBufferIn -=
-                        (uiChannelConvertorNbSamples * sizeof(short)
-                        * pC->pAddedClipCtxt->pSettings->
-                        ClipProperties.uiNbChannels);
-                    pC->pPosInSsrcBufferOut +=
-                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-                }
-                break;
-        }
-    }
-    else
-    {
-        /* No channel conversion nor sampl. freq. conversion needed, just buffer management */
-        pC->pPosInSsrcBufferOut = pC->pPosInSsrcBufferIn;
-    }
-
-    return M4NO_ERROR;
-}
-
-M4OSA_Int32 M4VSS3GPP_getDecibelSound( M4OSA_UInt32 value )
-    {
-    int dbSound = 1;
-
-    if( value == 0 )
-        return 0;
-
-    if( value > 0x4000 && value <= 0x8000 )      // 32768
-        dbSound = 90;
-
-    else if( value > 0x2000 && value <= 0x4000 ) // 16384
-        dbSound = 84;
-
-    else if( value > 0x1000 && value <= 0x2000 ) // 8192
-        dbSound = 78;
-
-    else if( value > 0x0800 && value <= 0x1000 ) // 4028
-        dbSound = 72;
-
-    else if( value > 0x0400 && value <= 0x0800 ) // 2048
-        dbSound = 66;
-
-    else if( value > 0x0200 && value <= 0x0400 ) // 1024
-        dbSound = 60;
-
-    else if( value > 0x0100 && value <= 0x0200 ) // 512
-        dbSound = 54;
-
-    else if( value > 0x0080 && value <= 0x0100 ) // 256
-        dbSound = 48;
-
-    else if( value > 0x0040 && value <= 0x0080 ) // 128
-        dbSound = 42;
-
-    else if( value > 0x0020 && value <= 0x0040 ) // 64
-        dbSound = 36;
-
-    else if( value > 0x0010 && value <= 0x0020 ) // 32
-        dbSound = 30;
-
-    else if( value > 0x0008 && value <= 0x0010 ) //16
-        dbSound = 24;
-
-    else if( value > 0x0007 && value <= 0x0008 ) //8
-        dbSound = 24;
-
-    else if( value > 0x0003 && value <= 0x0007 ) // 4
-        dbSound = 18;
-
-    else if( value > 0x0001 && value <= 0x0003 ) //2
-        dbSound = 12;
-
-    else if( value > 0x000 && value <= 0x0001 )  // 1
-        dbSound = 6;
-
-    else
-        dbSound = 0;
-
-    return dbSound;
-    }
-/**
- ******************************************************************************
- * M4OSA_ERR  M4VSS3GPP_intAudioMixingDoMixing(M4VSS3GPP_InternalAudioMixingContext *pC)
- * @brief    Mix the current audio AUs (decoder, mix, encode)
- * @note
- * @param    pC    (IN) VSS audio mixing internal context
- * @return    M4NO_ERROR:    No error
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingDoMixing(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err;
-    M4OSA_Int16 *pPCMdata1;
-    M4OSA_Int16 *pPCMdata2;
-    M4OSA_UInt32 uiPCMsize;
-
-    M4ENCODER_AudioBuffer pEncInBuffer;  /**< Encoder input buffer for api */
-    M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
-    M4OSA_Time
-        frameTimeDelta; /**< Duration of the encoded (then written) data */
-    M4OSA_MemAddr8 tempPosBuffer;
-    /* ducking variable */
-    M4OSA_UInt16 loopIndex = 0;
-    M4OSA_Int16 *pPCM16Sample = M4OSA_NULL;
-    M4OSA_Int32 peakDbValue = 0;
-    M4OSA_Int32 previousDbValue = 0;
-    M4OSA_UInt32 i;
-
-    /**
-    * Decode original audio track AU */
-
-    err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pInputClipCtxt);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingDoMixing:\
-            M4VSS3GPP_intClipDecodeCurrentAudioFrame(orig) returns 0x%x",
-            err);
-        return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
-    }
-
-    if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0
-        || pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
-        == M4VIDEOEDITING_kMP3 )
-    {
-        err = M4VSS3GPP_intAudioMixingConvert(pC);
-
-        if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
-        {
-            return err;
-        }
-
-        if( err != M4NO_ERROR )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingDoMixing: M4VSS3GPP_intAudioMixingConvert returned 0x%x",
-                err);
-            return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
-        }
-
-        /**
-        * Get the output AU to write into */
-        err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
-            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingStepAudioMix:\
-                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        pPCMdata2 = (M4OSA_Int16 *)pC->pSsrcBufferOut;
-    }
-    else
-    {
-        /**
-        * Decode added audio track AU */
-        err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pAddedClipCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingDoMixing:\
-                M4VSS3GPP_intClipDecodeCurrentAudioFrame(added) returns 0x%x",
-                err);
-            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
-        }
-
-        /**
-        * Check both clips decoded the same amount of PCM samples */
-        if( pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
-            != pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingDoMixing:\
-                both clips AU must have the same decoded PCM size!");
-            return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
-        }
-        pPCMdata2 = (M4OSA_Int16 *)pC->pAddedClipCtxt->AudioDecBufferOut.m_dataAddress;
-    }
-
-    /**
-    * Mix the two decoded PCM audios */
-    pPCMdata1 =
-        (M4OSA_Int16 *)pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
-    uiPCMsize = pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
-        / 2; /*buffer size (bytes) to number of sample (int16)*/
-
-    if( pC->b_DuckingNeedeed )
-    {
-        loopIndex = 0;
-        peakDbValue = 0;
-        previousDbValue = peakDbValue;
-
-        pPCM16Sample = (M4OSA_Int16 *)pC->pInputClipCtxt->
-            AudioDecBufferOut.m_dataAddress;
-
-        //Calculate the peak value
-         while( loopIndex
-             < pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
-            / sizeof(M4OSA_Int16) )
-        {
-            if( pPCM16Sample[loopIndex] >= 0 )
-            {
-                peakDbValue = previousDbValue > pPCM16Sample[loopIndex]
-                ? previousDbValue : pPCM16Sample[loopIndex];
-                previousDbValue = peakDbValue;
-            }
-            else
-            {
-                peakDbValue = previousDbValue > -pPCM16Sample[loopIndex]
-                ? previousDbValue : -pPCM16Sample[loopIndex];
-                previousDbValue = peakDbValue;
-            }
-            loopIndex++;
-        }
-
-        pC->audioVolumeArray[pC->audVolArrIndex] =
-            M4VSS3GPP_getDecibelSound(peakDbValue);
-
-        /* WINDOW_SIZE is 10 by default and check for threshold is done after 10 cycles */
-        if( pC->audVolArrIndex >= WINDOW_SIZE - 1 )
-        {
-            pC->bDoDucking =
-                M4VSS3GPP_isThresholdBreached((M4OSA_Int32 *)&(pC->audioVolumeArray),
-                pC->audVolArrIndex, pC->InDucking_threshold);
-
-            pC->audVolArrIndex = 0;
-        }
-        else
-        {
-            pC->audVolArrIndex++;
-        }
-
-        /*
-        *Below logic controls the mixing weightage for Background Track and Primary Track
-        *for the duration of window under analysis to give fade-out for Background and fade-in
-        *for primary
-        *
-        *Current fading factor is distributed in equal range over the defined window size.
-        *
-        *For a window size = 25 (500 ms (window under analysis) / 20 ms (sample duration))
-        *
-        */
-
-        if( pC->bDoDucking )
-        {
-            if( pC->duckingFactor
-                > pC->InDucking_lowVolume ) // FADE OUT BG Track
-            {
-                    // decrement ducking factor in total steps in factor of low volume steps to reach
-                    // low volume level
-                pC->duckingFactor -= (pC->InDucking_lowVolume);
-            }
-            else
-            {
-                pC->duckingFactor = pC->InDucking_lowVolume;
-            }
-        }
-        else
-        {
-            if( pC->duckingFactor < 1.0 ) // FADE IN BG Track
-            {
-                // increment ducking factor in total steps of low volume factor to reach
-                // orig.volume level
-                pC->duckingFactor += (pC->InDucking_lowVolume);
-            }
-        else
-           {
-                pC->duckingFactor = 1.0;
-            }
-        }
-        /* endif - ducking_enable */
-
-        /* Mixing Logic */
-
-        while( uiPCMsize-- > 0 )
-        {
-            M4OSA_Int32 temp;
-
-           /* set vol factor for BT and PT */
-            *pPCMdata2 = (M4OSA_Int16)(*pPCMdata2 * pC->fBTVolLevel);
-
-            *pPCMdata1 = (M4OSA_Int16)(*pPCMdata1 * pC->fPTVolLevel);
-
-            /* mix the two samples */
-
-            *pPCMdata2 = (M4OSA_Int16)(( *pPCMdata2) * (pC->duckingFactor));
-            *pPCMdata1 = (M4OSA_Int16)(*pPCMdata2 / 2 + *pPCMdata1 / 2);
-
-
-            if( *pPCMdata1 < 0 )
-            {
-                temp = -( *pPCMdata1)
-                    * 2; // bring to same Amplitude level as it was original
-
-                if( temp > 32767 )
-                {
-                    *pPCMdata1 = -32766; // less then max allowed value
-                }
-                else
-                {
-                    *pPCMdata1 = (M4OSA_Int16)(-temp);
-               }
-        }
-        else
-        {
-            temp = ( *pPCMdata1)
-                * 2; // bring to same Amplitude level as it was original
-
-            if( temp > 32768 )
-            {
-                *pPCMdata1 = 32767; // less than max allowed value
-            }
-            else
-            {
-                *pPCMdata1 = (M4OSA_Int16)temp;
-            }
-        }
-
-            pPCMdata2++;
-            pPCMdata1++;
-        }
-    }
-    else
-    {
-        while( uiPCMsize-- > 0 )
-       {
-        /* mix the two samples */
-            *pPCMdata1 = (M4OSA_Int16)(*pPCMdata1 * pC->fOrigFactor * pC->fPTVolLevel
-               + *pPCMdata2 * pC->fAddedFactor * pC->fBTVolLevel );
-
-            pPCMdata1++;
-            pPCMdata2++;
-        }
-    }
-
-    /* Update pC->pSsrcBufferOut buffer */
-
-    if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0 )
-    {
-        tempPosBuffer = pC->pSsrcBufferOut
-            + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-        memmove((void *)pC->pSsrcBufferOut, (void *)tempPosBuffer,
-            pC->pPosInSsrcBufferOut - tempPosBuffer);
-        pC->pPosInSsrcBufferOut -=
-            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-    }
-    else if( pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
-        == M4VIDEOEDITING_kMP3 )
-    {
-        tempPosBuffer = pC->pSsrcBufferIn
-            + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-        memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
-            pC->pPosInSsrcBufferIn - tempPosBuffer);
-        pC->pPosInSsrcBufferIn -=
-            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-    }
-
-    /* [Mono] or [Stereo interleaved] : all is in one buffer */
-    pEncInBuffer.pTableBuffer[0] =
-        pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
-    pEncInBuffer.pTableBufferSize[0] =
-        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-    pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
-    pEncInBuffer.pTableBufferSize[1] = 0;
-
-    /* Time in ms from data size, because it is PCM16 samples */
-    frameTimeDelta =
-        pEncInBuffer.pTableBufferSize[0] / sizeof(short) / pC->ewc.uiNbChannels;
-
-    /**
-    * Prepare output buffer */
-    pEncOutBuffer.pTableBuffer[0] =
-        (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
-    pEncOutBuffer.pTableBufferSize[0] = 0;
-
-    M4OSA_TRACE2_0("K **** blend AUs");
-
-    /**
-    * Encode the PCM audio */
-    err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(pC->ewc.pAudioEncCtxt,
-        &pEncInBuffer, &pEncOutBuffer);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingDoMixing(): pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Set AU cts and size */
-    pC->ewc.WriterAudioAU.size =
-        pEncOutBuffer.pTableBufferSize[0]; /**< Get the size of encoded data */
-    pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
-
-    /**
-    * Write the AU */
-    M4OSA_TRACE2_2("L ---- write : cts  = %ld [ 0x%x ]",
-        (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
-        pC->ewc.WriterAudioAU.size);
-
-    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
-        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingDoMixing: pWriterDataFcts->pProcessAU returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    * Increment the audio CTS for the next step */
-    pC->ewc.dATo += frameTimeDelta / pC->ewc.scale_audio;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingDoMixing(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR  M4VSS3GPP_intAudioMixingTransition(M4VSS3GPP_InternalAudioMixingContext *pC)
- * @brief    Decode/encode a few AU backward to initiate the encoder for later Mix segment.
- * @note
- * @param    pC    (IN) VSS audio mixing internal context
- * @return    M4NO_ERROR:    No error
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingTransition(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err;
-
-    M4ENCODER_AudioBuffer pEncInBuffer;  /**< Encoder input buffer for api */
-    M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
-    M4OSA_Time
-        frameTimeDelta = 0; /**< Duration of the encoded (then written) data */
-
-    M4OSA_Int32 iTargetCts, iCurrentCts;
-
-    /**
-    * 'BZZZ' bug fix:
-    * add a silence frame */
-    err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingTransition():\
-            M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
-            err);
-        return err;
-    }
-
-    iCurrentCts = (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
-
-    /* Do not do pre-encode step if there is no mixing (remove, 100 %, or not editable) */
-    if( M4OSA_FALSE == pC->bAudioMixingIsNeeded )
-    {
-        /**
-        * Advance in the original audio stream to reach the current time
-        * (We don't want iAudioCTS to be modified by the jump function,
-        * so we have to use a local variable). */
-        err = M4VSS3GPP_intClipJumpAudioAt(pC->pInputClipCtxt, &iCurrentCts);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingTransition:\
-             M4VSS3GPP_intClipJumpAudioAt() returns 0x%x!", err);
-            return err;
-        }
-    }
-    else
-    {
-        /**< don't try to pre-decode if clip is at its beginning... */
-        if( iCurrentCts > 0 )
-        {
-            /**
-            * Get the output AU to write into */
-            err = pC->ShellAPI.pWriterDataFcts->pStartAU(
-                pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
-                &pC->ewc.WriterAudioAU);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intAudioMixingTransition:\
-                    pWriterDataFcts->pStartAU(audio) returns 0x%x!",
-                    err);
-                return err;
-            }
-
-            /**
-            * Jump a few AUs backward */
-            iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
-                * pC->ewc.iSilenceFrameDuration;
-
-            if( iTargetCts < 0 )
-            {
-                iTargetCts = 0; /**< Sanity check */
-            }
-
-            err = M4VSS3GPP_intClipJumpAudioAt(pC->pInputClipCtxt, &iTargetCts);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
-                    M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
-                    err);
-                return err;
-            }
-
-            /**
-            * Decode/encode up to the wanted position */
-            while( pC->pInputClipCtxt->iAudioFrameCts < iCurrentCts )
-            {
-                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
-
-                M4OSA_TRACE2_3("M .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                    pC->pInputClipCtxt->iAudioFrameCts
-                    / pC->pInputClipCtxt->scale_audio,
-                    pC->pInputClipCtxt->iAoffset
-                    / pC->pInputClipCtxt->scale_audio,
-                    pC->pInputClipCtxt->uiAudioFrameSize);
-
-                if( M4OSA_ERR_IS_ERROR(err) )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
-                        M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x!",
-                        err);
-                    return err;
-                }
-
-                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(
-                    pC->pInputClipCtxt);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
-                        M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
-                        err);
-                    return err;
-                }
-
-                /* [Mono] or [Stereo interleaved] : all is in one buffer */
-                pEncInBuffer.pTableBuffer[0] =
-                    pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
-                pEncInBuffer.pTableBufferSize[0] =
-                    pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
-                pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
-                pEncInBuffer.pTableBufferSize[1] = 0;
-
-                /* Time in ms from data size, because it is PCM16 samples */
-                frameTimeDelta =
-                    pEncInBuffer.pTableBufferSize[0] / sizeof(short)
-                    / pC->ewc.uiNbChannels;
-
-                /**
-                * Prepare output buffer */
-                pEncOutBuffer.pTableBuffer[0] =
-                    (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
-                pEncOutBuffer.pTableBufferSize[0] = 0;
-
-                M4OSA_TRACE2_0("N **** pre-encode");
-
-                /**
-                * Encode the PCM audio */
-                err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
-                    pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingTransition():\
-                        pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
-                        err);
-                    return err;
-                }
-            }
-
-            /**
-            * Set AU cts and size */
-            pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
-                0]; /**< Get the size of encoded data */
-                pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
-
-                /**
-                * Write the AU */
-                M4OSA_TRACE2_2("O ---- write : cts  = %ld [ 0x%x ]",
-                    (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
-                    pC->ewc.WriterAudioAU.size);
-
-                err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
-                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
-                    &pC->ewc.WriterAudioAU);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingTransition:\
-                        pWriterDataFcts->pProcessAU returns 0x%x!",    err);
-                    return err;
-                }
-
-                /**
-                * Increment the audio CTS for the next step */
-                pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
-        }
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder()
- * @brief    Creates the video encoder
- * @note
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err;
-    M4ENCODER_AdvancedParams EncParams;
-
-    /**
-    * Simulate a writer interface with our specific function */
-    pC->ewc.OurWriterDataInterface.pProcessAU =
-        M4VSS3GPP_intProcessAU; /**< This function is VSS 3GPP specific,
-                                but it follow the writer interface */
-    pC->ewc.OurWriterDataInterface.pStartAU =
-        M4VSS3GPP_intStartAU; /**< This function is VSS 3GPP specific,
-                              but it follow the writer interface */
-    pC->ewc.OurWriterDataInterface.pWriterContext =
-        (M4WRITER_Context)
-        pC; /**< We give the internal context as writer context */
-
-    /**
-    * Get the encoder interface, if not already done */
-    if( M4OSA_NULL == pC->ShellAPI.pVideoEncoderGlobalFcts )
-    {
-        err = M4VSS3GPP_setCurrentVideoEncoder(&pC->ShellAPI,
-            pC->ewc.VideoStreamType);
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingCreateVideoEncoder: setCurrentEncoder returns 0x%x",
-            err);
-        M4ERR_CHECK_RETURN(err);
-    }
-
-    /**
-    * Set encoder shell parameters according to VSS settings */
-
-    /* Common parameters */
-    EncParams.InputFormat = M4ENCODER_kIYUV420;
-    EncParams.FrameWidth = pC->ewc.uiVideoWidth;
-    EncParams.FrameHeight = pC->ewc.uiVideoHeight;
-    EncParams.uiTimeScale = pC->ewc.uiVideoTimeScale;
-    EncParams.videoProfile = pC->ewc.outputVideoProfile;
-    EncParams.videoLevel = pC->ewc.outputVideoLevel;
-
-    /* No strict regulation in video editor */
-    /* Because of the effects and transitions we should allow more flexibility */
-    /* Also it prevents to drop important frames
-      (with a bad result on sheduling and block effetcs) */
-    EncParams.bInternalRegulation = M4OSA_FALSE;
-    EncParams.FrameRate = M4ENCODER_kVARIABLE_FPS;
-
-    /**
-    * Other encoder settings (defaults) */
-    EncParams.uiHorizontalSearchRange = 0;     /* use default */
-    EncParams.uiVerticalSearchRange = 0;       /* use default */
-    EncParams.bErrorResilience = M4OSA_FALSE;  /* no error resilience */
-    EncParams.uiIVopPeriod = 0;                /* use default */
-    EncParams.uiMotionEstimationTools = 0;     /* M4V_MOTION_EST_TOOLS_ALL */
-    EncParams.bAcPrediction = M4OSA_TRUE;      /* use AC prediction */
-    EncParams.uiStartingQuantizerValue = 10;   /* initial QP = 10 */
-    EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
-
-    switch( pC->ewc.VideoStreamType )
-    {
-        case M4SYS_kH263:
-
-            EncParams.Format = M4ENCODER_kH263;
-
-            EncParams.uiStartingQuantizerValue = 10;
-            EncParams.uiRateFactor = 1; /* default */
-
-            EncParams.bErrorResilience = M4OSA_FALSE;
-            EncParams.bDataPartitioning = M4OSA_FALSE;
-            break;
-
-        case M4SYS_kMPEG_4:
-
-            EncParams.Format = M4ENCODER_kMPEG4;
-
-            EncParams.uiStartingQuantizerValue = 8;
-            EncParams.uiRateFactor = 1;
-
-            if( M4OSA_FALSE == pC->ewc.bVideoDataPartitioning )
-            {
-                EncParams.bErrorResilience = M4OSA_FALSE;
-                EncParams.bDataPartitioning = M4OSA_FALSE;
-            }
-            else
-            {
-                EncParams.bErrorResilience = M4OSA_TRUE;
-                EncParams.bDataPartitioning = M4OSA_TRUE;
-            }
-            break;
-
-        case M4SYS_kH264:
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intAudioMixingCreateVideoEncoder: M4SYS_H264");
-
-            EncParams.Format = M4ENCODER_kH264;
-
-            EncParams.uiStartingQuantizerValue = 10;
-            EncParams.uiRateFactor = 1; /* default */
-
-            EncParams.bErrorResilience = M4OSA_FALSE;
-            EncParams.bDataPartitioning = M4OSA_FALSE;
-            break;
-
-        default:
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingCreateVideoEncoder: Unknown videoStreamType 0x%x",
-                pC->ewc.VideoStreamType);
-            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
-    }
-
-    EncParams.Bitrate =
-        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoBitrate;
-
-    M4OSA_TRACE1_0(
-        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctInit");
-    /**
-    * Init the video encoder (advanced settings version of the encoder Open function) */
-    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctInit(&pC->ewc.pEncContext,
-        &pC->ewc.OurWriterDataInterface, M4VSS3GPP_intVPP, pC,
-        pC->ShellAPI.pCurrentVideoEncoderExternalAPI,
-        pC->ShellAPI.pCurrentVideoEncoderUserData);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
-            pVideoEncoderGlobalFcts->pFctInit returns 0x%x",
-            err);
-        return err;
-    }
-
-    pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
-    M4OSA_TRACE1_0(
-        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctOpen");
-    M4OSA_TRACE1_2("vss: audio mix encoder open profile :%d, level %d",
-        EncParams.videoProfile, EncParams.videoLevel);
-    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctOpen(pC->ewc.pEncContext,
-        &pC->ewc.WriterVideoAU, &EncParams);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
-            pVideoEncoderGlobalFcts->pFctOpen returns 0x%x",
-            err);
-        return err;
-    }
-
-    pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
-    M4OSA_TRACE1_0(
-        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctStart");
-
-    if( M4OSA_NULL != pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart )
-    {
-        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart(
-            pC->ewc.pEncContext);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
-                pVideoEncoderGlobalFcts->pFctStart returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    pC->ewc.encoderState = M4VSS3GPP_kEncoderRunning;
-
-    /**
-    *    Return */
-    M4OSA_TRACE3_0(
-        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder()
- * @brief    Destroy the video encoder
- * @note
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder(
-    M4VSS3GPP_InternalAudioMixingContext *pC )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    if( M4OSA_NULL != pC->ewc.pEncContext )
-    {
-        if( M4VSS3GPP_kEncoderRunning == pC->ewc.encoderState )
-        {
-            if( pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
-            {
-                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop(
-                    pC->ewc.pEncContext);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
-                        pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
-                        err);
-                }
-            }
-
-            pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
-        }
-
-        /* Has the encoder actually been opened? Don't close it if that's not the case. */
-        if( M4VSS3GPP_kEncoderStopped == pC->ewc.encoderState )
-        {
-            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctClose(
-                pC->ewc.pEncContext);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
-                    pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
-                    err);
-            }
-
-            pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
-        }
-
-        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctCleanup(
-            pC->ewc.pEncContext);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
-                pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x!",
-                err);
-            /**< We do not return the error here because we still have stuff to free */
-        }
-
-        pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
-        /**
-        * Reset variable */
-        pC->ewc.pEncContext = M4OSA_NULL;
-    }
-
-    M4OSA_TRACE3_1(
-        "M4VSS3GPP_intAudioMixingDestroyVideoEncoder: returning 0x%x", err);
-    return err;
-}
-
-M4OSA_Bool M4VSS3GPP_isThresholdBreached( M4OSA_Int32 *averageValue,
-                                         M4OSA_Int32 storeCount, M4OSA_Int32 thresholdValue )
-{
-    M4OSA_Bool result = 0;
-    int i;
-    int finalValue = 0;
-
-    for ( i = 0; i < storeCount; i++ )
-        finalValue += averageValue[i];
-
-    finalValue = finalValue / storeCount;
-
-
-    if( finalValue > thresholdValue )
-        result = M4OSA_TRUE;
-    else
-        result = M4OSA_FALSE;
-
-    return result;
-}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Clip.c b/libvideoeditor/vss/src/M4VSS3GPP_Clip.c
deleted file mode 100755
index 40612f3..0000000
--- a/libvideoeditor/vss/src/M4VSS3GPP_Clip.c
+++ /dev/null
@@ -1,2112 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_Clip.c
- * @brief    Implementation of functions related to input clip management.
- * @note    All functions in this file are static, i.e. non public
- ******************************************************************************
- */
-
-/****************/
-/*** Includes ***/
-/****************/
-
-#include "NXPSW_CompilerSwitches.h"
-/**
- *    Our headers */
-#include "M4VSS3GPP_API.h"
-#include "M4VSS3GPP_ErrorCodes.h"
-#include "M4VSS3GPP_InternalTypes.h"
-#include "M4VSS3GPP_InternalFunctions.h"
-#include "M4VSS3GPP_InternalConfig.h"
-
-/**
- *    OSAL headers */
-#include "M4OSA_Memory.h" /* OSAL memory management */
-#include "M4OSA_Debug.h"  /* OSAL debug management */
-
-
-/**
- * Common headers (for aac) */
-#include "M4_Common.h"
-
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-#include "M4VD_EXTERNAL_Interface.h"
-
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-
-/* Osal header fileno */
-#include "M4OSA_CharStar.h"
-
-/**
- ******************************************************************************
- * define    Static function prototypes
- ******************************************************************************
- */
-
-static M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder(
-    M4VSS3GPP_ClipContext *pClipCtxt );
-
-static M4OSA_ERR M4VSS3GPP_intCheckAndGetCodecAacProperties(
-        M4VSS3GPP_ClipContext *pClipCtxt);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipOpen()
- * @brief    Open a clip. Creates a clip context.
- * @note
- * @param   hClipCtxt            (OUT) Return the internal clip context
- * @param   pClipSettings        (IN) Edit settings of this clip. The module will keep a
- *                               reference to this pointer
- * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
- * @param    bSkipAudioTrack        (IN) If true, do not open the audio
- * @param    bFastOpenMode        (IN) If true, use the fast mode of the 3gpp reader
- *                             (only the first AU is read)
- * @return    M4NO_ERROR:                No error
- * @return    M4ERR_ALLOC:            There is no more available memory
- ******************************************************************************
- */
-
-M4OSA_ERR M4VSS3GPP_intClipInit( M4VSS3GPP_ClipContext ** hClipCtxt,
-                                M4OSA_FileReadPointer *pFileReadPtrFct )
-{
-    M4VSS3GPP_ClipContext *pClipCtxt;
-    M4OSA_ERR err;
-
-    M4OSA_DEBUG_IF2((M4OSA_NULL == hClipCtxt), M4ERR_PARAMETER,
-        "M4VSS3GPP_intClipInit: hClipCtxt is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
-        "M4VSS3GPP_intClipInit: pFileReadPtrFct is M4OSA_NULL");
-
-    /**
-    * Allocate the clip context */
-    *hClipCtxt =
-        (M4VSS3GPP_ClipContext *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_ClipContext),
-        M4VSS3GPP, (M4OSA_Char *)"M4VSS3GPP_ClipContext");
-
-    if( M4OSA_NULL == *hClipCtxt )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intClipInit(): unable to allocate M4VSS3GPP_ClipContext,\
-            returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-    M4OSA_TRACE3_1("M4VSS3GPP_intClipInit(): clipCtxt=0x%x", *hClipCtxt);
-
-
-    /**
-    * Use this shortcut to simplify the code */
-    pClipCtxt = *hClipCtxt;
-
-    /* Inialization of context Variables */
-    memset((void *)pClipCtxt, 0,sizeof(M4VSS3GPP_ClipContext));
-
-    pClipCtxt->pSettings = M4OSA_NULL;
-
-    /**
-    * Init the clip context */
-    pClipCtxt->iVoffset = 0;
-    pClipCtxt->iAoffset = 0;
-    pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_READ;
-    pClipCtxt->Astatus = M4VSS3GPP_kClipStatus_READ;
-
-    pClipCtxt->pReaderContext = M4OSA_NULL;
-    pClipCtxt->pVideoStream = M4OSA_NULL;
-    pClipCtxt->pAudioStream = M4OSA_NULL;
-    pClipCtxt->VideoAU.m_dataAddress = M4OSA_NULL;
-    pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
-
-    pClipCtxt->pViDecCtxt = M4OSA_NULL;
-    pClipCtxt->iVideoDecCts = 0;
-    pClipCtxt->iVideoRenderCts = 0;
-    pClipCtxt->lastDecodedPlane = M4OSA_NULL;
-    pClipCtxt->iActualVideoBeginCut = 0;
-    pClipCtxt->iActualAudioBeginCut = 0;
-    pClipCtxt->bVideoAuAvailable = M4OSA_FALSE;
-    pClipCtxt->bFirstAuWritten = M4OSA_FALSE;
-
-    pClipCtxt->bMpeg4GovState = M4OSA_FALSE;
-
-    pClipCtxt->bAudioFrameAvailable = M4OSA_FALSE;
-    pClipCtxt->pAudioFramePtr = M4OSA_NULL;
-    pClipCtxt->iAudioFrameCts = 0;
-    pClipCtxt->pAudioDecCtxt = 0;
-    pClipCtxt->AudioDecBufferOut.m_bufferSize = 0;
-    pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
-
-    pClipCtxt->pFileReadPtrFct = pFileReadPtrFct;
-    pClipCtxt->pPlaneYuv   = M4OSA_NULL;
-    pClipCtxt->pPlaneYuvWithEffect = M4OSA_NULL;
-    pClipCtxt->m_pPreResizeFrame = M4OSA_NULL;
-    pClipCtxt->bGetYuvDataFromDecoder = M4OSA_TRUE;
-
-    /*
-    * Reset pointers for media and codecs interfaces */
-    err = M4VSS3GPP_clearInterfaceTables(&pClipCtxt->ShellAPI);
-    M4ERR_CHECK_RETURN(err);
-
-    /*
-    *  Call the media and codecs subscription module */
-    err = M4VSS3GPP_subscribeMediaAndCodec(&pClipCtxt->ShellAPI);
-    M4ERR_CHECK_RETURN(err);
-
-    return M4NO_ERROR;
-}
-
-// This method maps the frequency value to a string.
-static const char* freqToString(int freq) {
-    switch (freq) {
-    case 8000:
-        return "_8000";
-    case 11025:
-        return "_11025";
-    case 12000:
-        return "_12000";
-    case 16000:
-        return "_16000";
-    case 22050:
-        return "_22050";
-    case 24000:
-        return "_24000";
-    case 32000:
-        return "_32000";
-    case 44100:
-        return "_44100";
-    case 48000:
-        return "_48000";
-    default:
-        M4OSA_TRACE1_1("Unsupported sampling rate: %d Hz", freq);
-        return NULL;
-    }
-}
-
-// This method maps the number of channel value to
-// a string that will be part of a file name extension
-static const char* channelToStringAndFileExt(int channels) {
-    switch (channels) {
-    case 1:
-        return "_1.pcm";
-    case 2:
-        return "_2.pcm";
-    default:
-        M4OSA_TRACE1_1("Unsupported %d channels", channels);
-        return NULL;
-    }
-}
-
-/* Note: if the clip is opened in fast mode, it can only be used for analysis and nothing else. */
-M4OSA_ERR M4VSS3GPP_intClipOpen( M4VSS3GPP_ClipContext *pClipCtxt,
-                                M4VSS3GPP_ClipSettings *pClipSettings, M4OSA_Bool bSkipAudioTrack,
-                                M4OSA_Bool bFastOpenMode, M4OSA_Bool bAvoidOpeningVideoDec )
-{
-    M4OSA_ERR err;
-    M4READER_MediaFamily mediaFamily;
-    M4_StreamHandler *pStreamHandler;
-    M4_StreamHandler  dummyStreamHandler;
-    M4OSA_Int32 iDuration;
-    M4OSA_Void *decoderUserData;
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-
-    M4DECODER_MPEG4_DecoderConfigInfo dummy;
-    M4DECODER_VideoSize videoSizeFromDSI;
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-
-    M4DECODER_OutputFilter FilterOption;
-
-    /**
-    *    Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
-        "M4VSS3GPP_intClipOpen: pClipCtxt is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
-        "M4VSS3GPP_intClipOpen: pClipSettings is M4OSA_NULL");
-
-    M4OSA_TRACE3_2(
-        "M4VSS3GPP_intClipOpen: called with pClipCtxt: 0x%x, bAvoidOpeningVideoDec=0x%x",
-        pClipCtxt, bAvoidOpeningVideoDec);
-    /**
-    * Keep a pointer to the clip settings. Remember that we don't possess it! */
-    pClipCtxt->pSettings = pClipSettings;
-    if(M4VIDEOEDITING_kFileType_ARGB8888 == pClipCtxt->pSettings->FileType) {
-        M4OSA_TRACE3_0("M4VSS3GPP_intClipOpen: Image stream; set current vid dec");
-        err = M4VSS3GPP_setCurrentVideoDecoder(
-                  &pClipCtxt->ShellAPI, M4DA_StreamTypeVideoARGB8888);
-        M4ERR_CHECK_RETURN(err);
-
-        decoderUserData = M4OSA_NULL;
-
-        err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctCreate(
-                  &pClipCtxt->pViDecCtxt,
-                  &dummyStreamHandler,
-                  pClipCtxt->ShellAPI.m_pReader,
-                  pClipCtxt->ShellAPI.m_pReaderDataIt,
-                  &pClipCtxt->VideoAU,
-                  decoderUserData);
-
-        if (M4NO_ERROR != err) {
-            M4OSA_TRACE1_1("M4VSS3GPP_intClipOpen: \
-                m_pVideoDecoder->m_pFctCreate returns 0x%x", err);
-            return err;
-        }
-        M4OSA_TRACE3_1("M4VSS3GPP_intClipOpen: \
-            Vid dec started; pViDecCtxt=0x%x", pClipCtxt->pViDecCtxt);
-
-        return M4NO_ERROR;
-    }
-
-    /**
-    * Get the correct reader interface */
-    err = M4VSS3GPP_setCurrentReader(&pClipCtxt->ShellAPI,
-        pClipCtxt->pSettings->FileType);
-    M4ERR_CHECK_RETURN(err);
-
-    /**
-    * Init the 3GPP or MP3 reader */
-    err =
-        pClipCtxt->ShellAPI.m_pReader->m_pFctCreate(&pClipCtxt->pReaderContext);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctCreate returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Link the reader interface to the reader context (used by the decoder to know the reader) */
-    pClipCtxt->ShellAPI.m_pReaderDataIt->m_readerContext =
-        pClipCtxt->pReaderContext;
-
-    /**
-    * Set the OSAL read function set */
-    err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
-        pClipCtxt->pReaderContext,
-        M4READER_kOptionID_SetOsaFileReaderFctsPtr,
-        (M4OSA_DataOption)(pClipCtxt->pFileReadPtrFct));
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctSetOption returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Set the fast open mode if asked (3GPP only) */
-    if( M4VIDEOEDITING_kFileType_3GPP == pClipCtxt->pSettings->FileType )
-    {
-        if( M4OSA_TRUE == bFastOpenMode )
-        {
-            err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
-                pClipCtxt->pReaderContext,
-                M4READER_3GP_kOptionID_FastOpenMode, M4OSA_NULL);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intClipOpen():\
-                    m_pReader->m_pFctSetOption(FastOpenMode) returns 0x%x",
-                    err);
-                return err;
-            }
-        }
-
-        /**
-        * Set the skip audio option if asked */
-        if( M4OSA_TRUE == bSkipAudioTrack )
-        {
-            err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
-                pClipCtxt->pReaderContext,
-                M4READER_3GP_kOptionID_VideoOnly, M4OSA_NULL);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctSetOption(VideoOnly) returns 0x%x",
-                    err);
-                return err;
-            }
-        }
-    }
-    if (pClipCtxt->pSettings->FileType == M4VIDEOEDITING_kFileType_PCM) {
-        // Compose the temp filename with sample rate and channel information.
-        const char* freqStr = freqToString(
-                    pClipCtxt->pSettings->ClipProperties.uiSamplingFrequency);
-
-        if (freqStr == NULL) {
-            return M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY;
-        }
-
-        const char* chanStr = channelToStringAndFileExt(
-                    pClipCtxt->pSettings->ClipProperties.uiNbChannels);
-
-        if (chanStr == NULL) {
-                return M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS;
-        }
-
-        // Allocate one byte more to hold the null terminator
-        M4OSA_UInt32 length =
-            strlen(pClipSettings->pFile) + strlen(freqStr) + strlen(chanStr) + 1;
-
-        char* pTempFile = (char *) malloc(length);
-        if (pTempFile == NULL) {
-            M4OSA_TRACE1_1("M4VSS3GPP_intClipOpen(): malloc %d bytes fail",length);
-            return M4ERR_ALLOC;
-        }
-        memset(pTempFile, 0, length);
-        memcpy(pTempFile, pClipSettings->pFile, strlen(pClipSettings->pFile));
-        strncat(pTempFile, freqStr, strlen(freqStr));
-        strncat(pTempFile, chanStr, strlen(chanStr));
-
-        err = pClipCtxt->ShellAPI.m_pReader->m_pFctOpen( pClipCtxt->pReaderContext, pTempFile);
-        if (pTempFile != NULL) {
-            free(pTempFile);
-            pTempFile = NULL;
-        }
-        if ( M4NO_ERROR != err ) {
-            M4OSA_TRACE1_1("M4VSS3GPP_intClipOpen(): open pcm file returns error : 0x%x", err);
-            return err;
-        }
-    }
-    else
-    {
-    /**
-        * Open the 3GPP/MP3 clip file */
-        err = pClipCtxt->ShellAPI.m_pReader->m_pFctOpen( pClipCtxt->pReaderContext,
-             pClipSettings->pFile);
-    }
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_UInt32 uiDummy, uiCoreId;
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctOpen returns 0x%x", err);
-
-        /**
-        * If the error is from the core reader, we change it to a public VSS3GPP error */
-        M4OSA_ERR_SPLIT(err, uiDummy, uiCoreId, uiDummy);
-
-        if( M4MP4_READER == uiCoreId )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intClipOpen(): returning M4VSS3GPP_ERR_INVALID_3GPP_FILE");
-            return M4VSS3GPP_ERR_INVALID_3GPP_FILE;
-        }
-        return err;
-    }
-
-    /**
-    * Get the audio and video streams */
-    while( err == M4NO_ERROR )
-    {
-        err = pClipCtxt->ShellAPI.m_pReader->m_pFctGetNextStream(
-            pClipCtxt->pReaderContext, &mediaFamily, &pStreamHandler);
-
-        /*in case we found a BIFS stream or something else...*/
-        if( ( err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE))
-            || (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)) )
-        {
-            err = M4NO_ERROR;
-            continue;
-        }
-
-        if( M4NO_ERROR == err ) /**< One stream found */
-        {
-            /**
-            * Found a video stream */
-            if( ( mediaFamily == M4READER_kMediaFamilyVideo)
-                && (M4OSA_NULL == pClipCtxt->pVideoStream) )
-            {
-                if( ( M4DA_StreamTypeVideoH263 == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeVideoMpeg4
-                    == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeVideoMpeg4Avc
-                    == pStreamHandler->m_streamType) )
-                {
-                    M4OSA_TRACE3_1(
-                        "M4VSS3GPP_intClipOpen():\
-                        Found a H263 or MPEG-4 or H264 video stream in input 3gpp clip; %d",
-                        pStreamHandler->m_streamType);
-
-                    /**
-                    * Keep pointer to the video stream */
-                    pClipCtxt->pVideoStream =
-                        (M4_VideoStreamHandler *)pStreamHandler;
-                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
-
-                    /**
-                    * Reset the stream reader */
-                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctReset(
-                        pClipCtxt->pReaderContext,
-                        (M4_StreamHandler *)pClipCtxt->pVideoStream);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctReset(video) returns 0x%x",
-                            err);
-                        return err;
-                    }
-
-                    /**
-                    * Initializes an access Unit */
-                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctFillAuStruct(
-                        pClipCtxt->pReaderContext,
-                        (M4_StreamHandler *)pClipCtxt->pVideoStream,
-                        &pClipCtxt->VideoAU);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intClipOpen():\
-                            m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-                else /**< Not H263 or MPEG-4 (H264, etc.) */
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS_editClipOpen():\
-                        Found an unsupported video stream (0x%x) in input 3gpp clip",
-                        pStreamHandler->m_streamType);
-
-                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
-                }
-            }
-            /**
-            * Found an audio stream */
-            else if( ( mediaFamily == M4READER_kMediaFamilyAudio)
-                && (M4OSA_NULL == pClipCtxt->pAudioStream) )
-            {
-                if( ( M4DA_StreamTypeAudioAmrNarrowBand
-                    == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeAudioAac == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeAudioMp3
-                    == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeAudioEvrc
-                    == pStreamHandler->m_streamType)
-                    || (M4DA_StreamTypeAudioPcm
-                    == pStreamHandler->m_streamType) )
-                {
-                    M4OSA_TRACE3_1(
-                        "M4VSS3GPP_intClipOpen(): \
-                        Found an AMR-NB or AAC or MP3 audio stream in input clip; %d",
-                        pStreamHandler->m_streamType);
-
-                    /**
-                    * Keep pointer to the audio stream */
-                    pClipCtxt->pAudioStream =
-                        (M4_AudioStreamHandler *)pStreamHandler;
-                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
-
-                    /**
-                    * Reset the stream reader */
-                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctReset(
-                        pClipCtxt->pReaderContext,
-                        (M4_StreamHandler *)pClipCtxt->pAudioStream);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctReset(audio) returns 0x%x",
-                            err);
-                        return err;
-                    }
-
-                    /**
-                    * Initializes an access Unit */
-                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctFillAuStruct(
-                        pClipCtxt->pReaderContext,
-                        (M4_StreamHandler *)pClipCtxt->pAudioStream,
-                        &pClipCtxt->AudioAU);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intClipOpen():\
-                            m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-                else /**< Not AMR-NB or AAC (AMR-WB...) */
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intClipOpen():\
-                        Found an unsupported audio stream (0x%x) in input 3gpp/mp3 clip",
-                        pStreamHandler->m_streamType);
-
-                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
-                }
-            }
-        }
-        else if( M4OSA_ERR_IS_ERROR(err) )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctGetNextStream() returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * Init Video decoder */
-    if( M4OSA_NULL != pClipCtxt->pVideoStream )
-    {
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-  /* If external decoders are possible, it's best to avoid opening the decoder if the clip is only
-  going to be used for analysis, as we're not going to use it for the analysis in the case of a
-  possible external decoder anyway, and either there could be no decoder at this point or the HW
-  decoder could be present, which we want to avoid opening for that. See comments in
-  intBuildAnalysis for more details. */
-
-  /* CHANGEME Temporarily only do this for MPEG4, since for now only MPEG4 external decoders are
-  supported, and the following wouldn't work for H263 so a release where external decoders are
-  possible, but not used, wouldn't work with H263 stuff. */
-
-        if( bAvoidOpeningVideoDec && M4DA_StreamTypeVideoMpeg4
-            == pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
-        {
-            /* Oops! The mere act of opening the decoder also results in the image size being
-            filled in the video stream! Compensate for this by using ParseVideoDSI to fill
-            this info. */
-            M4OSA_TRACE3_0(
-                "M4VSS3GPP_intClipOpen: Mpeg4 stream; vid dec not started");
-            err = M4DECODER_EXTERNAL_ParseVideoDSI(pClipCtxt->pVideoStream->
-                m_basicProperties.m_pDecoderSpecificInfo,
-                pClipCtxt->pVideoStream->
-                m_basicProperties.m_decoderSpecificInfoSize,
-                &dummy, &videoSizeFromDSI);
-
-            pClipCtxt->pVideoStream->m_videoWidth = videoSizeFromDSI.m_uiWidth;
-            pClipCtxt->pVideoStream->m_videoHeight =
-                videoSizeFromDSI.m_uiHeight;
-        }
-        else
-        {
-
-#endif
-
-            M4OSA_TRACE3_0(
-                "M4VSS3GPP_intClipOpen: Mp4/H263/H264 stream; set current vid dec");
-            err = M4VSS3GPP_setCurrentVideoDecoder(&pClipCtxt->ShellAPI,
-                pClipCtxt->pVideoStream->m_basicProperties.m_streamType);
-            M4ERR_CHECK_RETURN(err);
-
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-
-            decoderUserData =
-                pClipCtxt->ShellAPI.m_pCurrentVideoDecoderUserData;
-
-#else
-
-            decoderUserData = M4OSA_NULL;
-
-#endif
-
-            err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctCreate(
-                &pClipCtxt->pViDecCtxt,
-                &pClipCtxt->pVideoStream->m_basicProperties,
-                pClipCtxt->ShellAPI.m_pReader,
-                pClipCtxt->ShellAPI.m_pReaderDataIt,
-                &pClipCtxt->VideoAU, decoderUserData);
-
-            if( ( ((M4OSA_UInt32)M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err)
-                || (((M4OSA_UInt32)M4ERR_DECODER_H263_NOT_BASELINE) == err) )
-            {
-                /**
-                * Our decoder is not compatible with H263 profile other than 0.
-                * So it returns this internal error code.
-                * We translate it to our own error code */
-                return M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED;
-            }
-            else if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctCreate returns 0x%x",
-                    err);
-                return err;
-            }
-            M4OSA_TRACE3_1(
-                "M4VSS3GPP_intClipOpen: Vid dec started; pViDecCtxt=0x%x",
-                pClipCtxt->pViDecCtxt);
-
-            if( M4DA_StreamTypeVideoMpeg4Avc
-                == pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
-            {
-                FilterOption.m_pFilterFunction =
-                    (M4OSA_Void *) &M4VIFI_ResizeBilinearYUV420toYUV420;
-                FilterOption.m_pFilterUserData = M4OSA_NULL;
-                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
-                    pClipCtxt->pViDecCtxt, M4DECODER_kOptionID_OutputFilter,
-                    (M4OSA_DataOption) &FilterOption);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctSetOption returns 0x%x",
-                        err);
-                    return err;
-                }
-                else
-                {
-                    M4OSA_TRACE3_0(
-                        "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctSetOption\
-                        M4DECODER_kOptionID_OutputFilter OK");
-                }
-            }
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-
-        }
-
-#endif
-
-    }
-
-    /**
-    * Init Audio decoder */
-    if( M4OSA_NULL != pClipCtxt->pAudioStream )
-    {
-        err = M4VSS3GPP_intClipPrepareAudioDecoder(pClipCtxt);
-        M4ERR_CHECK_RETURN(err);
-        M4OSA_TRACE3_1("M4VSS3GPP_intClipOpen: Audio dec started; context=0x%x",
-            pClipCtxt->pAudioDecCtxt);
-    }
-    else
-    {
-        pClipCtxt->AudioAU.m_streamID = 0;
-        pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
-        pClipCtxt->AudioAU.m_size = 0;
-        pClipCtxt->AudioAU.m_CTS = 0;
-        pClipCtxt->AudioAU.m_DTS = 0;
-        pClipCtxt->AudioAU.m_attribute = 0;
-        pClipCtxt->AudioAU.m_maxsize = 0;
-        pClipCtxt->AudioAU.m_structSize = sizeof(pClipCtxt->AudioAU);
-    }
-
-    /**
-    * Get the duration of the longest stream */
-    if( M4OSA_TRUE == pClipCtxt->pSettings->ClipProperties.bAnalysed )
-    {
-        /* If already calculated set it to previous value */
-        /* Because fast open and full open can return a different value,
-           it can mismatch user settings */
-        /* Video track is more important than audio track (if video track is shorter than
-           audio track, it can led to cut larger than expected) */
-        iDuration = pClipCtxt->pSettings->ClipProperties.uiClipVideoDuration;
-
-        if( iDuration == 0 )
-        {
-            iDuration = pClipCtxt->pSettings->ClipProperties.uiClipDuration;
-        }
-    }
-    else
-    {
-        /* Else compute it from streams */
-        iDuration = 0;
-
-        if( M4OSA_NULL != pClipCtxt->pVideoStream )
-        {
-            iDuration = (M4OSA_Int32)(
-                pClipCtxt->pVideoStream->m_basicProperties.m_duration);
-        }
-
-        if( ( M4OSA_NULL != pClipCtxt->pAudioStream) && ((M4OSA_Int32)(
-            pClipCtxt->pAudioStream->m_basicProperties.m_duration)
-            > iDuration) && iDuration == 0 )
-        {
-            iDuration = (M4OSA_Int32)(
-                pClipCtxt->pAudioStream->m_basicProperties.m_duration);
-        }
-    }
-
-    /**
-    * If end time is not used, we set it to the video track duration */
-    if( 0 == pClipCtxt->pSettings->uiEndCutTime )
-    {
-        pClipCtxt->pSettings->uiEndCutTime = (M4OSA_UInt32)iDuration;
-    }
-
-    pClipCtxt->iEndTime = (M4OSA_Int32)pClipCtxt->pSettings->uiEndCutTime;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intClipOpen(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack()
- * @brief    Delete the audio track. Clip will be like if it had no audio track
- * @note
- * @param   pClipCtxt            (IN) Internal clip context
- ******************************************************************************
- */
-M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack( M4VSS3GPP_ClipContext *pClipCtxt )
-{
-    /**
-    * But we don't have to free the audio stream. It will be freed by the reader when closing it*/
-    pClipCtxt->pAudioStream = M4OSA_NULL;
-
-    /**
-    * We will return a constant silence AMR AU.
-    * We set it here once, instead of at each read audio step. */
-    pClipCtxt->pAudioFramePtr = (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
-    pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
-
-    /**
-    * Free the decoded audio buffer (it needs to be re-allocated to store silence
-      frame eventually)*/
-    if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
-    {
-        free(pClipCtxt->AudioDecBufferOut.m_dataAddress);
-        pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
-    }
-
-    return;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCurrentTime()
- * @brief    Jump to the previous RAP and decode up to the current video time
- * @param   pClipCtxt    (IN) Internal clip context
- * @param   iCts        (IN) Target CTS
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCts( M4VSS3GPP_ClipContext *pClipCtxt,
-                                              M4OSA_Int32 iCts )
-{
-    M4OSA_Int32 iRapCts, iClipCts;
-    M4_MediaTime dDecodeTime;
-    M4OSA_Bool bClipJump = M4OSA_FALSE;
-    M4OSA_ERR err;
-
-    /**
-    * Compute the time in the clip base */
-    iClipCts = iCts - pClipCtxt->iVoffset;
-
-    /**
-    * If we were reading the clip, we must jump to the previous RAP
-    * to decode from that point. */
-    if( M4VSS3GPP_kClipStatus_READ == pClipCtxt->Vstatus )
-    {
-        /**
-        * The decoder must be told to jump */
-        bClipJump = M4OSA_TRUE;
-        pClipCtxt->iVideoDecCts = iClipCts;
-
-        /**
-        * Remember the clip reading state */
-        pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_DECODE_UP_TO;
-    }
-
-    /**
-    * If we are in decodeUpTo() process, check if we need to do
-    one more step or if decoding is finished */
-    if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pClipCtxt->Vstatus )
-    {
-        /* Do a step of 500 ms decoding */
-        pClipCtxt->iVideoDecCts += 500;
-
-        if( pClipCtxt->iVideoDecCts > iClipCts )
-        {
-            /* Target time reached, we switch back to DECODE mode */
-            pClipCtxt->iVideoDecCts = iClipCts;
-            pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_DECODE;
-        }
-
-        M4OSA_TRACE2_1("c ,,,, decode up to : %ld", pClipCtxt->iVideoDecCts);
-    }
-    else
-    {
-        /* Just decode at current clip cts */
-        pClipCtxt->iVideoDecCts = iClipCts;
-
-        M4OSA_TRACE2_1("d ,,,, decode up to : %ld", pClipCtxt->iVideoDecCts);
-    }
-
-    /**
-    * Decode up to the target */
-    M4OSA_TRACE3_2(
-        "M4VSS3GPP_intClipDecodeVideoUpToCts: Decoding upTo CTS %.3f, pClipCtxt=0x%x",
-        dDecodeTime, pClipCtxt);
-
-    dDecodeTime = (M4OSA_Double)pClipCtxt->iVideoDecCts;
-    pClipCtxt->isRenderDup = M4OSA_FALSE;
-    err =
-        pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDecode(pClipCtxt->pViDecCtxt,
-        &dDecodeTime, bClipJump, 0);
-
-    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err)
-        && (err != M4WAR_VIDEORENDERER_NO_NEW_FRAME) )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intClipDecodeVideoUpToCts: m_pFctDecode returns 0x%x!",
-            err);
-        return err;
-    }
-
-    if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
-    {
-        pClipCtxt->isRenderDup = M4OSA_TRUE;
-    }
-
-    /**
-    * Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intClipDecodeVideoUpToCts: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame()
- * @brief    Read one AU frame in the clip
- * @note
- * @param   pClipCtxt            (IN) Internal clip context
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame(
-    M4VSS3GPP_ClipContext *pClipCtxt )
-{
-    M4OSA_ERR err;
-
-    /* ------------------------------ */
-    /* ---------- NO AUDIO ---------- */
-    /* ------------------------------ */
-
-    if( M4OSA_NULL == pClipCtxt->pAudioStream )
-    {
-        /* If there is no audio track, we return silence AUs */
-        pClipCtxt->pAudioFramePtr =
-            (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
-        pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
-        pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
-
-        M4OSA_TRACE2_0("b #### blank track");
-    }
-
-    /* ---------------------------------- */
-    /* ---------- AMR-NB, EVRC ---------- */
-    /* ---------------------------------- */
-
-    else if( ( M4VIDEOEDITING_kAMR_NB
-        == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
-        || (M4VIDEOEDITING_kEVRC
-        == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
-    {
-        if( M4OSA_FALSE == pClipCtxt->bAudioFrameAvailable )
-        {
-            /**
-            * No AU available, so we must must read one from the original track reader */
-            err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
-                pClipCtxt->pReaderContext,
-                (M4_StreamHandler *)pClipCtxt->pAudioStream,
-                &pClipCtxt->AudioAU);
-
-            if( M4NO_ERROR == err )
-            {
-                /**
-                * Set the current AMR frame position at the beginning of the read AU */
-                pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
-
-                /**
-                * Set the AMR frame CTS */
-                pClipCtxt->iAudioFrameCts =
-                    (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS
-                    * pClipCtxt->scale_audio + 0.5);
-            }
-            else if( ( M4WAR_NO_MORE_AU == err) && (M4VIDEOEDITING_kAMR_NB
-                == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
-            {
-                /**
-                * If there is less audio than the stream duration indicated,
-                * we return silence at the end of the stream. */
-                pClipCtxt->pAudioFramePtr =
-                    (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
-                pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
-                pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
-
-                M4OSA_TRACE2_0("a #### silence AU");
-
-                /**
-                * Return with M4WAR_NO_MORE_AU */
-                M4OSA_TRACE3_0(
-                    "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: \
-                    returning M4WAR_NO_MORE_AU (silence)");
-                return M4WAR_NO_MORE_AU;
-            }
-            else /**< fatal error (or no silence in EVRC) */
-            {
-                M4OSA_TRACE3_1(
-                    "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: m_pFctGetNextAu() returns 0x%x",
-                    err);
-                return err;
-            }
-        }
-        else /* bAudioFrameAvailable */
-        {
-            /**
-            * Go to the next AMR frame in the AU */
-            pClipCtxt->pAudioFramePtr += pClipCtxt->uiAudioFrameSize;
-
-            /**
-            * Increment CTS: one AMR frame is 20 ms long */
-            pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
-        }
-
-        /**
-        * Get the size of the pointed AMR frame */
-        switch( pClipCtxt->pSettings->ClipProperties.AudioStreamType )
-        {
-            case M4VIDEOEDITING_kAMR_NB:
-                pClipCtxt->uiAudioFrameSize =
-                    (M4OSA_UInt16)M4VSS3GPP_intGetFrameSize_AMRNB(
-                    pClipCtxt->pAudioFramePtr);
-                break;
-
-            case M4VIDEOEDITING_kEVRC:
-                pClipCtxt->uiAudioFrameSize =
-                    (M4OSA_UInt16)M4VSS3GPP_intGetFrameSize_EVRC(
-                    pClipCtxt->pAudioFramePtr);
-                break;
-            default:
-                break;
-        }
-
-        if( 0 == pClipCtxt->uiAudioFrameSize )
-        {
-            M4OSA_TRACE3_0(
-                "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: AU frame size == 0,\
-                returning M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AMR_AU");
-            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
-        }
-        else if( pClipCtxt->uiAudioFrameSize > pClipCtxt->AudioAU.m_size )
-        {
-            M4OSA_TRACE3_0(
-                "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: AU frame size greater than AU size!,\
-                returning M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AMR_AU");
-            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
-        }
-
-        /**
-        * Check if the end of the current AU has been reached or not */
-        if( ( pClipCtxt->pAudioFramePtr + pClipCtxt->uiAudioFrameSize)
-            < (pClipCtxt->AudioAU.m_dataAddress + pClipCtxt->AudioAU.m_size) )
-        {
-            pClipCtxt->bAudioFrameAvailable = M4OSA_TRUE;
-        }
-        else
-        {
-            pClipCtxt->bAudioFrameAvailable =
-                M4OSA_FALSE; /**< will be used for next call */
-        }
-    }
-
-    /* ------------------------- */
-    /* ---------- AAC ---------- */
-    /* ------------------------- */
-
-    else if( ( M4VIDEOEDITING_kAAC
-        == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
-        || (M4VIDEOEDITING_kAACplus
-        == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
-        || (M4VIDEOEDITING_keAACplus
-        == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
-    {
-        err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
-            pClipCtxt->pReaderContext,
-            (M4_StreamHandler *)pClipCtxt->pAudioStream,
-            &pClipCtxt->AudioAU);
-
-        if( M4NO_ERROR == err )
-        {
-            pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
-            pClipCtxt->uiAudioFrameSize =
-                (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
-            pClipCtxt->iAudioFrameCts =
-                (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
-                + 0.5);
-
-            /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
-            /* (cts is not an integer with frequency 24 kHz for example) */
-            pClipCtxt->iAudioFrameCts = ( ( pClipCtxt->iAudioFrameCts
-                + pClipCtxt->iSilenceFrameDuration / 2)
-                / pClipCtxt->iSilenceFrameDuration)
-                * pClipCtxt->iSilenceFrameDuration;
-        }
-        else if( M4WAR_NO_MORE_AU == err )
-        {
-            /**
-            * If there is less audio than the stream duration indicated,
-            * we return silence at the end of the stream. */
-            pClipCtxt->pAudioFramePtr =
-                (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
-            pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
-            pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
-
-            M4OSA_TRACE2_0("a #### silence AU");
-
-            /**
-            * Return with M4WAR_NO_MORE_AU */
-            M4OSA_TRACE3_0(
-                "M4VSS3GPP_intClipReadNextAudioFrame()-AAC:\
-                returning M4WAR_NO_MORE_AU (silence)");
-            return M4WAR_NO_MORE_AU;
-        }
-        else /**< fatal error */
-        {
-            M4OSA_TRACE3_1(
-                "M4VSS3GPP_intClipReadNextAudioFrame()-AAC: m_pFctGetNextAu() returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    /* --------------------------------- */
-    /* ---------- MP3, others ---------- */
-    /* --------------------------------- */
-
-    else
-    {
-        err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
-            pClipCtxt->pReaderContext,
-            (M4_StreamHandler *)pClipCtxt->pAudioStream,
-            &pClipCtxt->AudioAU);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE3_1(
-                "M4VSS3GPP_intClipReadNextAudioFrame()-MP3: m_pFctGetNextAu() returns 0x%x",
-                err);
-            return err;
-        }
-
-        pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
-        pClipCtxt->uiAudioFrameSize = (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
-        pClipCtxt->iAudioFrameCts =
-            (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
-            + 0.5);
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0(
-        "M4VSS3GPP_intClipReadNextAudioFrame(): returning M4NO_ERROR");
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder()
- * @brief    Creates and initialize the audio decoder for the clip.
- * @note
- * @param   pClipCtxt        (IN) internal clip context
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder(
-    M4VSS3GPP_ClipContext *pClipCtxt )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4_StreamType audiotype;
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-    M4_AACType iAacType = 0;
-
-#endif
-
-    /**
-    * Set the proper audio decoder */
-
-    audiotype = pClipCtxt->pAudioStream->m_basicProperties.m_streamType;
-
-    //EVRC
-    if( M4DA_StreamTypeAudioEvrc
-        != audiotype ) /* decoder not supported yet, but allow to do null encoding */
-
-        err = M4VSS3GPP_setCurrentAudioDecoder(&pClipCtxt->ShellAPI, audiotype);
-    M4ERR_CHECK_RETURN(err);
-
-    /**
-    * Creates the audio decoder */
-    if( M4OSA_NULL == pClipCtxt->ShellAPI.m_pAudioDecoder )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intClipPrepareAudioDecoder(): Fails to initiate the audio decoder.");
-        return M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED;
-    }
-
-    if( M4OSA_NULL == pClipCtxt->pAudioDecCtxt )
-    {
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-        if( M4OSA_TRUE == pClipCtxt->ShellAPI.bAllowFreeingOMXCodecInterface )
-        {
-            if( M4DA_StreamTypeAudioAac == audiotype ) {
-                err = M4VSS3GPP_intCheckAndGetCodecAacProperties(
-                       pClipCtxt);
-            } else if (M4DA_StreamTypeAudioPcm != audiotype) {
-                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
-                &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
-                M4OSA_NULL);
-            } else {
-                err = M4NO_ERROR;
-            }
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intClipPrepareAudioDecoder: m_pAudioDecoder->m_pFctCreateAudioDec\
-                    returns 0x%x", err);
-                return err;
-            }
-        }
-        else
-        {
-            M4OSA_TRACE3_1(
-                "M4VSS3GPP_intClipPrepareAudioDecoder:\
-                Creating external audio decoder of type 0x%x", audiotype);
-            /* External OMX codecs are used*/
-            if( M4DA_StreamTypeAudioAac == audiotype )
-            {
-                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
-                    &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
-                    pClipCtxt->ShellAPI.pCurrentAudioDecoderUserData);
-
-                if( M4NO_ERROR == err )
-                {
-                    /* AAC properties*/
-                    /*get from Reader; temporary, till Audio decoder shell API
-                      available to get the AAC properties*/
-                    pClipCtxt->AacProperties.aNumChan =
-                        pClipCtxt->pAudioStream->m_nbChannels;
-                    pClipCtxt->AacProperties.aSampFreq =
-                        pClipCtxt->pAudioStream->m_samplingFrequency;
-
-                    err = pClipCtxt->ShellAPI.m_pAudioDecoder->
-                        m_pFctGetOptionAudioDec(pClipCtxt->pAudioDecCtxt,
-                        M4AD_kOptionID_StreamType,
-                        (M4OSA_DataOption) &iAacType);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intClipPrepareAudioDecoder:\
-                            m_pAudioDecoder->m_pFctGetOptionAudioDec returns err 0x%x", err);
-                        iAacType = M4_kAAC; //set to default
-                        err = M4NO_ERROR;
-                    }
-                    else {
-                        M4OSA_TRACE3_1(
-                        "M4VSS3GPP_intClipPrepareAudioDecoder: \
-                        m_pAudioDecoder->m_pFctGetOptionAudioDec returns streamType %d",
-                        iAacType);
-                       }
-                    switch( iAacType )
-                    {
-                        case M4_kAAC:
-                            pClipCtxt->AacProperties.aSBRPresent = 0;
-                            pClipCtxt->AacProperties.aPSPresent = 0;
-                            break;
-
-                        case M4_kAACplus:
-                            pClipCtxt->AacProperties.aSBRPresent = 1;
-                            pClipCtxt->AacProperties.aPSPresent = 0;
-                            pClipCtxt->AacProperties.aExtensionSampFreq =
-                                pClipCtxt->pAudioStream->m_samplingFrequency;
-                            break;
-
-                        case M4_keAACplus:
-                            pClipCtxt->AacProperties.aSBRPresent = 1;
-                            pClipCtxt->AacProperties.aPSPresent = 1;
-                            pClipCtxt->AacProperties.aExtensionSampFreq =
-                                pClipCtxt->pAudioStream->m_samplingFrequency;
-                            break;
-                        default:
-                            break;
-                    }
-                    M4OSA_TRACE3_2(
-                        "M4VSS3GPP_intClipPrepareAudioDecoder: AAC NBChans=%d, SamplFreq=%d",
-                        pClipCtxt->AacProperties.aNumChan,
-                        pClipCtxt->AacProperties.aSampFreq);
-                }
-            }
-            else
-                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
-                &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
-                pClipCtxt->ShellAPI.pCurrentAudioDecoderUserData);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intClipPrepareAudioDecoder:\
-                    m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
-                    err);
-                return err;
-            }
-        }
-
-#else
-        /* Trick, I use pUserData to retrieve aac properties,
-           waiting for some better implementation... */
-
-        if( M4DA_StreamTypeAudioAac == audiotype )
-            err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
-            &pClipCtxt->pAudioDecCtxt,
-            pClipCtxt->pAudioStream, &(pClipCtxt->AacProperties));
-        else
-            err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
-            &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
-            M4OSA_NULL /* to be changed with HW interfaces */);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intClipPrepareAudioDecoder:\
-                m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
-                err);
-            return err;
-        }
-
-#endif
-
-    }
-
-    if( M4DA_StreamTypeAudioAmrNarrowBand == audiotype ) {
-        /* AMR DECODER CONFIGURATION */
-
-        /* nothing specific to do */
-    }
-    else if( M4DA_StreamTypeAudioEvrc == audiotype ) {
-        /* EVRC DECODER CONFIGURATION */
-
-        /* nothing specific to do */
-    }
-    else if( M4DA_StreamTypeAudioMp3 == audiotype ) {
-        /* MP3 DECODER CONFIGURATION */
-
-        /* nothing specific to do */
-    }
-    else if( M4DA_StreamTypeAudioAac == audiotype )
-    {
-        /* AAC DECODER CONFIGURATION */
-
-        /* Decode high quality aac but disable PS and SBR */
-        /* Because we have to mix different kind of AAC so we must take the lowest capability */
-        /* In MCS it was not needed because there is only one stream */
-        M4_AacDecoderConfig AacDecParam;
-
-        AacDecParam.m_AACDecoderProfile = AAC_kAAC;
-        AacDecParam.m_DownSamplingMode = AAC_kDS_OFF;
-
-        if( M4ENCODER_kMono == pClipCtxt->pAudioStream->m_nbChannels )
-        {
-            AacDecParam.m_OutputMode = AAC_kMono;
-        }
-        else
-        {
-            AacDecParam.m_OutputMode = AAC_kStereo;
-        }
-
-        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
-            pClipCtxt->pAudioDecCtxt,
-            M4AD_kOptionID_UserParam, (M4OSA_DataOption) &AacDecParam);
-    }
-
-    if( M4OSA_NULL != pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec ) {
-        pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
-         pClipCtxt->pAudioDecCtxt, M4AD_kOptionID_3gpReaderInterface,
-         (M4OSA_DataOption) pClipCtxt->ShellAPI.m_pReaderDataIt);
-
-        pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
-         pClipCtxt->pAudioDecCtxt, M4AD_kOptionID_AudioAU,
-         (M4OSA_DataOption) &pClipCtxt->AudioAU);
-    }
-
-    if( M4OSA_NULL != pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec )
-    {
-        /* Not implemented in all decoders */
-        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec(
-            pClipCtxt->pAudioDecCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intClipPrepareAudioDecoder:\
-                m_pAudioDecoder->m_pFctStartAudioDec returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * Allocate output buffer for the audio decoder */
-    pClipCtxt->AudioDecBufferOut.m_bufferSize =
-        pClipCtxt->pAudioStream->m_byteFrameLength
-        * pClipCtxt->pAudioStream->m_byteSampleSize
-        * pClipCtxt->pAudioStream->m_nbChannels;
-    pClipCtxt->AudioDecBufferOut.m_dataAddress =
-        (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pClipCtxt->AudioDecBufferOut.m_bufferSize
-        * sizeof(M4OSA_Int16),
-        M4VSS3GPP, (M4OSA_Char *)"AudioDecBufferOut.m_bufferSize");
-
-    if( M4OSA_NULL == pClipCtxt->AudioDecBufferOut.m_dataAddress )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intClipPrepareAudioDecoder():\
-            unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame()
- * @brief    Decode the current AUDIO frame.
- * @note
- * @param   pClipCtxt        (IN) internal clip context
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame(
-    M4VSS3GPP_ClipContext *pClipCtxt )
-{
-    M4OSA_ERR err;
-
-    /**
-    * Silence mode */
-    if( pClipCtxt->pSilenceFrameData
-        == (M4OSA_UInt8 *)pClipCtxt->pAudioFramePtr )
-    {
-        if( pClipCtxt->AudioDecBufferOut.m_dataAddress == M4OSA_NULL )
-        {
-            /**
-            * Allocate output buffer for the audio decoder */
-            pClipCtxt->AudioDecBufferOut.m_bufferSize =
-                pClipCtxt->uiSilencePcmSize;
-            pClipCtxt->AudioDecBufferOut.m_dataAddress =
-                (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(
-                pClipCtxt->AudioDecBufferOut.m_bufferSize
-                * sizeof(M4OSA_Int16),
-                M4VSS3GPP,(M4OSA_Char *) "AudioDecBufferOut.m_bufferSize");
-
-            if( M4OSA_NULL == pClipCtxt->AudioDecBufferOut.m_dataAddress )
-            {
-                M4OSA_TRACE1_0(
-                    "M4VSS3GPP_intClipDecodeCurrentAudioFrame():\
-                    unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
-                return M4ERR_ALLOC;
-            }
-        }
-
-        /* Fill it with 0 (= pcm silence) */
-        memset(pClipCtxt->AudioDecBufferOut.m_dataAddress,0,
-             pClipCtxt->AudioDecBufferOut.m_bufferSize * sizeof(M4OSA_Int16));
-    }
-    else if (pClipCtxt->pSettings->FileType == M4VIDEOEDITING_kFileType_PCM)
-    {
-        pClipCtxt->AudioDecBufferIn.m_dataAddress = (M4OSA_MemAddr8) pClipCtxt->pAudioFramePtr;
-        pClipCtxt->AudioDecBufferIn.m_bufferSize  = pClipCtxt->uiAudioFrameSize;
-
-        memcpy((void *)pClipCtxt->AudioDecBufferOut.m_dataAddress,
-            (void *)pClipCtxt->AudioDecBufferIn.m_dataAddress, pClipCtxt->AudioDecBufferIn.m_bufferSize);
-        pClipCtxt->AudioDecBufferOut.m_bufferSize = pClipCtxt->AudioDecBufferIn.m_bufferSize;
-        /**
-        * Return with no error */
-
-        M4OSA_TRACE3_0("M4VSS3GPP_intClipDecodeCurrentAudioFrame(): returning M4NO_ERROR");
-        return M4NO_ERROR;
-    }
-    /**
-    * Standard decoding mode */
-    else
-    {
-        /**
-        * Decode current AMR frame */
-        if ( pClipCtxt->pAudioFramePtr != M4OSA_NULL ) {
-            pClipCtxt->AudioDecBufferIn.m_dataAddress =
-             (M4OSA_MemAddr8)pClipCtxt->pAudioFramePtr;
-            pClipCtxt->AudioDecBufferIn.m_bufferSize =
-             pClipCtxt->uiAudioFrameSize;
-            pClipCtxt->AudioDecBufferIn.m_timeStampUs =
-             (int64_t) (pClipCtxt->iAudioFrameCts * 1000LL);
-
-            err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStepAudioDec(
-             pClipCtxt->pAudioDecCtxt,
-             &pClipCtxt->AudioDecBufferIn, &pClipCtxt->AudioDecBufferOut,
-             M4OSA_FALSE);
-        } else {
-            // Pass Null input buffer
-            // Reader invoked from Audio decoder source
-            err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStepAudioDec(
-             pClipCtxt->pAudioDecCtxt,
-             M4OSA_NULL, &pClipCtxt->AudioDecBufferOut,
-             M4OSA_FALSE);
-        }
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intClipDecodeCurrentAudioFrame():\
-                m_pAudioDecoder->m_pFctStepAudio returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0(
-        "M4VSS3GPP_intClipDecodeCurrentAudioFrame(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt()
- * @brief    Jump in the audio track of the clip.
- * @note
- * @param   pClipCtxt            (IN) internal clip context
- * @param   pJumpCts            (IN/OUT) in:target CTS, out: reached CTS
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt( M4VSS3GPP_ClipContext *pClipCtxt,
-                                       M4OSA_Int32 *pJumpCts )
-{
-    M4OSA_ERR err;
-    M4OSA_Int32 iTargetCts;
-    M4OSA_Int32 iJumpCtsMs;
-
-    /**
-    *    Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
-        "M4VSS3GPP_intClipJumpAudioAt: pClipCtxt is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pJumpCts), M4ERR_PARAMETER,
-        "M4VSS3GPP_intClipJumpAudioAt: pJumpCts is M4OSA_NULL");
-
-    iTargetCts = *pJumpCts;
-
-    /**
-    * If there is no audio stream, we simulate a jump at the target jump CTS */
-    if( M4OSA_NULL == pClipCtxt->pAudioStream )
-    {
-        /**
-        * the target CTS will be reached at next ReadFrame call (thus the -20) */
-        *pJumpCts = iTargetCts - pClipCtxt->iSilenceFrameDuration;
-
-        /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
-        /* (cts is not an integer with frequency 24 kHz for example) */
-        *pJumpCts = ( ( *pJumpCts + pClipCtxt->iSilenceFrameDuration / 2)
-            / pClipCtxt->iSilenceFrameDuration)
-            * pClipCtxt->iSilenceFrameDuration;
-        pClipCtxt->iAudioFrameCts =
-            *
-            pJumpCts; /* simulate a read at jump position for later silence AUs */
-    }
-    else
-    {
-        M4OSA_Int32 current_time = 0;
-        M4OSA_Int32 loop_counter = 0;
-
-        if( (M4DA_StreamTypeAudioMp3
-            == pClipCtxt->pAudioStream->m_basicProperties.m_streamType) )
-        {
-            while( ( loop_counter < M4VSS3GPP_MP3_JUMPED_AU_NUMBER_MAX)
-                && (current_time < iTargetCts) )
-            {
-                err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
-                    pClipCtxt->pReaderContext,
-                    (M4_StreamHandler *)pClipCtxt->pAudioStream,
-                    &pClipCtxt->AudioAU);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE3_1(
-                        "M4VSS3GPP_intClipJumpAudioAt: m_pFctGetNextAu() returns 0x%x",
-                        err);
-                    return err;
-                }
-
-                current_time = (M4OSA_Int32)pClipCtxt->AudioAU.m_CTS;
-                loop_counter++;
-            }
-
-            /**
-            * The current AU is stored */
-            pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
-            pClipCtxt->uiAudioFrameSize =
-                (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
-            pClipCtxt->iAudioFrameCts =
-                (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
-                + 0.5);
-
-            *pJumpCts = pClipCtxt->iAudioFrameCts;
-        }
-        else
-        {
-            /**
-            * Jump in the audio stream */
-            iJumpCtsMs =
-                (M4OSA_Int32)(*pJumpCts / pClipCtxt->scale_audio + 0.5);
-
-            err = pClipCtxt->ShellAPI.m_pReader->m_pFctJump(
-                pClipCtxt->pReaderContext,
-                (M4_StreamHandler *)pClipCtxt->pAudioStream,
-                &iJumpCtsMs);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intClipJumpAudioAt(): m_pFctJump() returns 0x%x",
-                    err);
-                return err;
-            }
-
-            *pJumpCts =
-                (M4OSA_Int32)(iJumpCtsMs * pClipCtxt->scale_audio + 0.5);
-
-            /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
-            /* (cts is not an integer with frequency 24 kHz for example) */
-            *pJumpCts = ( ( *pJumpCts + pClipCtxt->iSilenceFrameDuration / 2)
-                / pClipCtxt->iSilenceFrameDuration)
-                * pClipCtxt->iSilenceFrameDuration;
-            pClipCtxt->iAudioFrameCts = 0; /* No frame read yet */
-
-            /**
-            * To detect some may-be bugs, I prefer to reset all these after a jump */
-            pClipCtxt->bAudioFrameAvailable = M4OSA_FALSE;
-            pClipCtxt->pAudioFramePtr = M4OSA_NULL;
-
-            /**
-            * In AMR, we have to manage multi-framed AUs,
-            but also in AAC the jump can be 1 AU too much backward */
-            if( *pJumpCts < iTargetCts )
-            {
-                /**
-                * Jump doesn't read any AU, we must read at least one */
-                err = M4VSS3GPP_intClipReadNextAudioFrame(pClipCtxt);
-
-                if( M4OSA_ERR_IS_ERROR(err) )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intClipJumpAudioAt():\
-                        M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x",
-                        err);
-                    return err;
-                }
-
-                /**
-                * Read AU frames as long as we reach the AU before the target CTS
-                * (so the target will be reached when the user call ReadNextAudioFrame). */
-                while( pClipCtxt->iAudioFrameCts
-                    < (iTargetCts - pClipCtxt->iSilenceFrameDuration) )
-                {
-                    err = M4VSS3GPP_intClipReadNextAudioFrame(pClipCtxt);
-
-                    if( M4OSA_ERR_IS_ERROR(err) )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intClipJumpAudioAt():\
-                            M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-
-                /**
-                * Return the CTS that will be reached at next ReadFrame */
-                *pJumpCts = pClipCtxt->iAudioFrameCts
-                    + pClipCtxt->iSilenceFrameDuration;
-            }
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intClipJumpAudioAt(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipClose()
- * @brief    Close a clip. Destroy the context.
- * @note
- * @param   pClipCtxt            (IN) Internal clip context
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intClipClose( M4VSS3GPP_ClipContext *pClipCtxt )
-{
-    M4OSA_ERR err;
-
-    /**
-    *    Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
-        "M4VSS3GPP_intClipClose: pClipCtxt is M4OSA_NULL");
-
-    /**
-    * Free the video decoder context */
-    if( M4OSA_NULL != pClipCtxt->pViDecCtxt )
-    {
-        pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDestroy(
-            pClipCtxt->pViDecCtxt);
-        pClipCtxt->pViDecCtxt = M4OSA_NULL;
-    }
-
-    /**
-    * Free the audio decoder context  */
-    if( M4OSA_NULL != pClipCtxt->pAudioDecCtxt )
-    {
-        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctDestroyAudioDec(
-            pClipCtxt->pAudioDecCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intClipClose: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        pClipCtxt->pAudioDecCtxt = M4OSA_NULL;
-    }
-
-    /**
-    * Free the decoded audio buffer */
-    if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
-    {
-        free(pClipCtxt->AudioDecBufferOut.m_dataAddress);
-        pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
-    }
-
-    /**
-    * Audio AU is allocated by reader.
-    * If no audio track, audio AU is set at 'silent' (SID) by VSS.
-    * As a consequence, if audio AU is set to 'silent' (static)
-    it can't be free unless it is set to NULL */
-    if( ( (M4OSA_MemAddr8)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048
-        == pClipCtxt->AudioAU.m_dataAddress)
-        || ((M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData
-        == pClipCtxt->AudioAU.m_dataAddress) )
-    {
-        pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pClipCtxt->pReaderContext )
-    {
-        /**
-        * Close the 3GPP or MP3 reader */
-        err = pClipCtxt->ShellAPI.m_pReader->m_pFctClose(
-            pClipCtxt->pReaderContext);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intClipClose(): m_pReader->m_pFctClose returns 0x%x",
-                err);
-        }
-
-        /**
-        * Destroy the 3GPP or MP3 reader context */
-        err = pClipCtxt->ShellAPI.m_pReader->m_pFctDestroy(
-            pClipCtxt->pReaderContext);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intClipClose(): m_pReader->m_pFctDestroy returns 0x%x",
-                err);
-        }
-
-        pClipCtxt->pReaderContext = M4OSA_NULL;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_1("M4VSS3GPP_intClipClose(Ctxt=0x%x): returning M4NO_ERROR",
-        pClipCtxt);
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR M4VSS3GPP_intClipCleanUp( M4VSS3GPP_ClipContext *pClipCtxt )
-{
-    M4OSA_ERR err = M4NO_ERROR, err2;
-
-    /**
-    *    Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
-        "M4VSS3GPP_intClipCleanUp: pClipCtxt is M4OSA_NULL");
-
-    /**
-    * Free the video decoder context */
-    if( M4OSA_NULL != pClipCtxt->pViDecCtxt )
-    {
-        pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDestroy(
-            pClipCtxt->pViDecCtxt);
-        pClipCtxt->pViDecCtxt = M4OSA_NULL;
-    }
-
-    /**
-    * Free the audio decoder context  */
-    if( M4OSA_NULL != pClipCtxt->pAudioDecCtxt )
-    {
-        err2 = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctDestroyAudioDec(
-            pClipCtxt->pAudioDecCtxt);
-
-        if( M4NO_ERROR != err2 )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intClipCleanUp: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-            if( M4NO_ERROR != err )
-                err = err2;
-        }
-
-        pClipCtxt->pAudioDecCtxt = M4OSA_NULL;
-    }
-
-    /**
-    * Free the decoded audio buffer */
-    if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
-    {
-        free(pClipCtxt->AudioDecBufferOut.m_dataAddress);
-        pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
-    }
-
-    /**
-    * Audio AU is allocated by reader.
-    * If no audio track, audio AU is set at 'silent' (SID) by VSS.
-    * As a consequence, if audio AU is set to 'silent' (static)
-    it can't be free unless it is set to NULL */
-    if( ( (M4OSA_MemAddr8)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048
-        == pClipCtxt->AudioAU.m_dataAddress)
-        || ((M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData
-        == pClipCtxt->AudioAU.m_dataAddress) )
-    {
-        pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pClipCtxt->pReaderContext )
-    {
-        /**
-        * Close the 3GPP or MP3 reader */
-        err2 = pClipCtxt->ShellAPI.m_pReader->m_pFctClose(
-            pClipCtxt->pReaderContext);
-
-        if( M4NO_ERROR != err2 )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intClipCleanUp(): m_pReader->m_pFctClose returns 0x%x",
-                err);
-
-            if( M4NO_ERROR != err )
-                err = err2;
-        }
-
-        /**
-        * Destroy the 3GPP or MP3 reader context */
-        err2 = pClipCtxt->ShellAPI.m_pReader->m_pFctDestroy(
-            pClipCtxt->pReaderContext);
-
-        if( M4NO_ERROR != err2 )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intClipCleanUp(): m_pReader->m_pFctDestroy returns 0x%x",
-                err);
-
-            if( M4NO_ERROR != err )
-                err = err2;
-        }
-
-        pClipCtxt->pReaderContext = M4OSA_NULL;
-    }
-
-    if(pClipCtxt->pPlaneYuv != M4OSA_NULL) {
-        if(pClipCtxt->pPlaneYuv[0].pac_data != M4OSA_NULL) {
-            free(pClipCtxt->pPlaneYuv[0].pac_data);
-            pClipCtxt->pPlaneYuv[0].pac_data = M4OSA_NULL;
-        }
-        free(pClipCtxt->pPlaneYuv);
-        pClipCtxt->pPlaneYuv = M4OSA_NULL;
-    }
-
-    if(pClipCtxt->pPlaneYuvWithEffect != M4OSA_NULL) {
-        if(pClipCtxt->pPlaneYuvWithEffect[0].pac_data != M4OSA_NULL) {
-            free(pClipCtxt->pPlaneYuvWithEffect[0].pac_data);
-            pClipCtxt->pPlaneYuvWithEffect[0].pac_data = M4OSA_NULL;
-        }
-        free(pClipCtxt->pPlaneYuvWithEffect);
-        pClipCtxt->pPlaneYuvWithEffect = M4OSA_NULL;
-    }
-    /**
-    * Free the shells interfaces */
-    M4VSS3GPP_unRegisterAllWriters(&pClipCtxt->ShellAPI);
-    M4VSS3GPP_unRegisterAllEncoders(&pClipCtxt->ShellAPI);
-    M4VSS3GPP_unRegisterAllReaders(&pClipCtxt->ShellAPI);
-    M4VSS3GPP_unRegisterAllDecoders(&pClipCtxt->ShellAPI);
-
-    M4OSA_TRACE3_1("M4VSS3GPP_intClipCleanUp: pClipCtxt=0x%x", pClipCtxt);
-    /**
-    * Free the clip context */
-    free(pClipCtxt);
-
-    return err;
-}
-
-/**
- ******************************************************************************
- * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB()
- * @brief   Return the length, in bytes, of the AMR Narrow-Band frame contained in the given buffer
- * @note
- * @param   pAudioFrame   (IN) AMRNB frame
- * @return  M4NO_ERROR: No error
- ******************************************************************************
- */
-
-M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB( M4OSA_MemAddr8 pAudioFrame )
-{
-    M4OSA_UInt32 frameSize = 0;
-    M4OSA_UInt32 frameType = ( ( *pAudioFrame) &(0xF << 3)) >> 3;
-
-    switch( frameType )
-    {
-        case 0:
-            frameSize = 95;
-            break; /*  4750 bps */
-
-        case 1:
-            frameSize = 103;
-            break; /*  5150 bps */
-
-        case 2:
-            frameSize = 118;
-            break; /*  5900 bps */
-
-        case 3:
-            frameSize = 134;
-            break; /*  6700 bps */
-
-        case 4:
-            frameSize = 148;
-            break; /*  7400 bps */
-
-        case 5:
-            frameSize = 159;
-            break; /*  7950 bps */
-
-        case 6:
-            frameSize = 204;
-            break; /* 10200 bps */
-
-        case 7:
-            frameSize = 244;
-            break; /* 12000 bps */
-
-        case 8:
-            frameSize = 39;
-            break; /* SID (Silence) */
-
-        case 15:
-            frameSize = 0;
-            break; /* No data */
-
-        default:
-            M4OSA_TRACE3_0(
-                "M4VSS3GPP_intGetFrameSize_AMRNB(): Corrupted AMR frame! returning 0.");
-            return 0;
-    }
-
-    return (1 + (( frameSize + 7) / 8));
-}
-
-/**
- ******************************************************************************
- * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC()
- * @brief   Return the length, in bytes, of the EVRC frame contained in the given buffer
- * @note
- *     0 1 2 3
- *    +-+-+-+-+
- *    |fr type|              RFC 3558
- *    +-+-+-+-+
- *
- * Frame Type: 4 bits
- *    The frame type indicates the type of the corresponding codec data
- *    frame in the RTP packet.
- *
- * For EVRC and SMV codecs, the frame type values and size of the
- * associated codec data frame are described in the table below:
- *
- * Value   Rate      Total codec data frame size (in octets)
- * ---------------------------------------------------------
- *   0     Blank      0    (0 bit)
- *   1     1/8        2    (16 bits)
- *   2     1/4        5    (40 bits; not valid for EVRC)
- *   3     1/2       10    (80 bits)
- *   4     1         22    (171 bits; 5 padded at end with zeros)
- *   5     Erasure    0    (SHOULD NOT be transmitted by sender)
- *
- * @param   pCpAudioFrame   (IN) EVRC frame
- * @return  M4NO_ERROR: No error
- ******************************************************************************
- */
-M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC( M4OSA_MemAddr8 pAudioFrame )
-{
-    M4OSA_UInt32 frameSize = 0;
-    M4OSA_UInt32 frameType = ( *pAudioFrame) &0x0F;
-
-    switch( frameType )
-    {
-        case 0:
-            frameSize = 0;
-            break; /*  blank */
-
-        case 1:
-            frameSize = 16;
-            break; /*  1/8 */
-
-        case 2:
-            frameSize = 40;
-            break; /*  1/4 */
-
-        case 3:
-            frameSize = 80;
-            break; /*  1/2 */
-
-        case 4:
-            frameSize = 171;
-            break; /*  1 */
-
-        case 5:
-            frameSize = 0;
-            break; /*  erasure */
-
-        default:
-            M4OSA_TRACE3_0(
-                "M4VSS3GPP_intGetFrameSize_EVRC(): Corrupted EVRC frame! returning 0.");
-            return 0;
-    }
-
-    return (1 + (( frameSize + 7) / 8));
-}
-
-M4OSA_ERR M4VSS3GPP_intCheckAndGetCodecAacProperties(
-                                 M4VSS3GPP_ClipContext *pClipCtxt) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4AD_Buffer outputBuffer;
-    uint32_t optionValue =0;
-
-    // Decode first audio frame from clip to get properties from codec
-
-    err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
-                    &pClipCtxt->pAudioDecCtxt,
-                    pClipCtxt->pAudioStream, &(pClipCtxt->AacProperties));
-
-    pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
-     pClipCtxt->pAudioDecCtxt, M4AD_kOptionID_3gpReaderInterface,
-     (M4OSA_DataOption) pClipCtxt->ShellAPI.m_pReaderDataIt);
-
-    pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
-     pClipCtxt->pAudioDecCtxt, M4AD_kOptionID_AudioAU,
-     (M4OSA_DataOption) &pClipCtxt->AudioAU);
-
-    if( pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec != M4OSA_NULL ) {
-
-        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec(
-         pClipCtxt->pAudioDecCtxt);
-        if( M4NO_ERROR != err ) {
-
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCheckAndGetCodecAacProperties: \
-                 m_pFctStartAudioDec returns 0x%x", err);
-            return err;
-        }
-    }
-
-    /**
-    * Allocate output buffer for the audio decoder */
-    outputBuffer.m_bufferSize =
-        pClipCtxt->pAudioStream->m_byteFrameLength
-        * pClipCtxt->pAudioStream->m_byteSampleSize
-        * pClipCtxt->pAudioStream->m_nbChannels;
-
-    if( outputBuffer.m_bufferSize > 0 ) {
-
-        outputBuffer.m_dataAddress =
-            (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(outputBuffer.m_bufferSize \
-            *sizeof(short), M4VSS3GPP, (M4OSA_Char *)"outputBuffer.m_bufferSize");
-
-        if( M4OSA_NULL == outputBuffer.m_dataAddress ) {
-
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intCheckAndGetCodecAacProperties():\
-                 unable to allocate outputBuffer.m_dataAddress");
-            return M4ERR_ALLOC;
-        }
-    }
-
-    err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStepAudioDec(
-            pClipCtxt->pAudioDecCtxt, M4OSA_NULL, &outputBuffer, M4OSA_FALSE);
-
-    if ( err == M4WAR_INFO_FORMAT_CHANGE ) {
-
-        // Get the properties from codec node
-        pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctGetOptionAudioDec(
-         pClipCtxt->pAudioDecCtxt,
-           M4AD_kOptionID_AudioNbChannels, (M4OSA_DataOption) &optionValue);
-
-        pClipCtxt->AacProperties.aNumChan = optionValue;
-        // Reset Reader structure value also
-        pClipCtxt->pAudioStream->m_nbChannels = optionValue;
-
-        pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctGetOptionAudioDec(
-         pClipCtxt->pAudioDecCtxt,
-          M4AD_kOptionID_AudioSampFrequency, (M4OSA_DataOption) &optionValue);
-
-        pClipCtxt->AacProperties.aSampFreq = optionValue;
-        // Reset Reader structure value also
-        pClipCtxt->pAudioStream->m_samplingFrequency = optionValue;
-
-    } else if( err != M4NO_ERROR) {
-        M4OSA_TRACE1_1("M4VSS3GPP_intCheckAndGetCodecAacProperties:\
-            m_pFctStepAudioDec returns err = 0x%x", err);
-    }
-
-    free(outputBuffer.m_dataAddress);
-
-    // Reset the stream reader
-    err = pClipCtxt->ShellAPI.m_pReader->m_pFctReset(
-     pClipCtxt->pReaderContext,
-     (M4_StreamHandler *)pClipCtxt->pAudioStream);
-
-    if (M4NO_ERROR != err) {
-        M4OSA_TRACE1_1("M4VSS3GPP_intCheckAndGetCodecAacProperties\
-            Error in reseting reader: 0x%x", err);
-    }
-
-    return err;
-
-}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c b/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c
deleted file mode 100755
index e2c6d7a..0000000
--- a/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c
+++ /dev/null
@@ -1,1032 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_ClipAnalysis.c
- * @brief    Implementation of functions related to analysis of input clips
- * @note    All functions in this file are static, i.e. non public
- ******************************************************************************
- */
-
-/****************/
-/*** Includes ***/
-/****************/
-
-#include "NXPSW_CompilerSwitches.h"
-/**
- *    Our headers */
-#include "M4VSS3GPP_API.h"
-#include "M4VSS3GPP_ErrorCodes.h"
-#include "M4VSS3GPP_InternalTypes.h"
-#include "M4VSS3GPP_InternalFunctions.h"
-#include "M4VSS3GPP_InternalConfig.h"
-#include "M4VD_EXTERNAL_Interface.h"
-
-
-/**
- *    OSAL headers */
-#include "M4OSA_Memory.h" /* OSAL memory management */
-#include "M4OSA_Debug.h"  /* OSAL debug management */
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editAnalyseClip()
- * @brief    This function allows checking if a clip is compatible with VSS 3GPP editing
- * @note    It also fills a ClipAnalysis structure, which can be used to check if two
- *        clips are compatible
- * @param    pClip                (IN) File descriptor of the input 3GPP/MP3 clip file.
- * @param    pClipProperties        (IN) Pointer to a valid ClipProperties structure.
- * @param    FileType            (IN) Type of the input file (.3gp, .amr, .mp3)
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return   M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED
- * @return   M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
- * @return   M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED
- * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE
- * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE
- * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC
- * @return   M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
- * @return   M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE
- * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT
- * @return   M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editAnalyseClip( M4OSA_Void *pClip,
-                                    M4VIDEOEDITING_FileType FileType,
-                                    M4VIDEOEDITING_ClipProperties *pClipProperties,
-                                    M4OSA_FileReadPointer *pFileReadPtrFct )
-{
-    M4OSA_ERR err;
-    M4VSS3GPP_ClipContext *pClipContext;
-    M4VSS3GPP_ClipSettings ClipSettings;
-
-    M4OSA_TRACE3_2(
-        "M4VSS3GPP_editAnalyseClip called with pClip=0x%x, pClipProperties=0x%x",
-        pClip, pClipProperties);
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClip), M4ERR_PARAMETER,
-        "M4VSS3GPP_editAnalyseClip: pClip is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipProperties), M4ERR_PARAMETER,
-        "M4VSS3GPP_editAnalyseClip: pClipProperties is M4OSA_NULL");
-
-    /**
-    * Build dummy clip settings, in order to use the editClipOpen function */
-    ClipSettings.pFile = pClip;
-    ClipSettings.FileType = FileType;
-    ClipSettings.uiBeginCutTime = 0;
-    ClipSettings.uiEndCutTime = 0;
-
-    /* Clip properties not build yet, set at least this flag */
-    ClipSettings.ClipProperties.bAnalysed = M4OSA_FALSE;
-
-    /**
-    * Open the clip in fast open mode */
-    err = M4VSS3GPP_intClipInit(&pClipContext, pFileReadPtrFct);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intClipInit() returns 0x%x!",
-            err);
-
-        /**
-        * Free the clip */
-        if( M4OSA_NULL != pClipContext )
-        {
-            M4VSS3GPP_intClipCleanUp(pClipContext);
-        }
-        return err;
-    }
-
-    err = M4VSS3GPP_intClipOpen(pClipContext, &ClipSettings, M4OSA_FALSE,
-        M4OSA_TRUE, M4OSA_TRUE);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intClipOpen() returns 0x%x!",
-            err);
-
-        M4VSS3GPP_intClipCleanUp(pClipContext);
-
-        /**
-        * Here it is better to return the Editing specific error code */
-        if( ( ((M4OSA_UInt32)M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err)
-            || (((M4OSA_UInt32)M4ERR_DECODER_H263_NOT_BASELINE) == err) )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editAnalyseClip:\
-                M4VSS3GPP_intClipOpen() returns M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED");
-            return M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED;
-        }
-        return err;
-    }
-
-    /**
-    * Analyse the clip */
-    if(M4VIDEOEDITING_kFileType_ARGB8888 != pClipContext->pSettings->FileType) {
-        err = M4VSS3GPP_intBuildAnalysis(pClipContext, pClipProperties);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intBuildAnalysis() returns 0x%x!",
-                err);
-
-            /**
-            * Free the clip */
-            M4VSS3GPP_intClipCleanUp(pClipContext);
-            return err;
-        }
-    }
-    /**
-    * Free the clip */
-    err = M4VSS3GPP_intClipClose(pClipContext);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_editAnalyseClip: M4VSS_intClipClose() returns 0x%x!",
-            err);
-        M4VSS3GPP_intClipCleanUp(pClipContext);
-        return err;
-    }
-
-    M4VSS3GPP_intClipCleanUp(pClipContext);
-
-    /**
-    * Check the clip is compatible with VSS editing */
-    if(M4VIDEOEDITING_kFileType_ARGB8888 != ClipSettings.FileType) {
-        err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(pClipProperties);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editAnalyseClip:\
-                M4VSS3GPP_intCheckClipCompatibleWithVssEditing() returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_editAnalyseClip(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility()
- * @brief    This function allows checking if two clips are compatible with each other for
- *        VSS 3GPP editing assembly feature.
- * @note
- * @param    pClip1Properties        (IN) Clip analysis of the first clip
- * @param    pClip2Properties        (IN) Clip analysis of the second clip
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return    M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
- * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT
- * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE
- * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE
- * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING
- * @return  M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY
- * @return  M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility( M4VIDEOEDITING_ClipProperties *pClip1Properties,
-                                                M4VIDEOEDITING_ClipProperties *pClip2Properties )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_ERR video_err = M4NO_ERROR;
-    M4OSA_ERR audio_err = M4NO_ERROR;
-
-    M4OSA_Bool bClip1IsAAC = M4OSA_FALSE;
-    M4OSA_Bool bClip2IsAAC = M4OSA_FALSE;
-
-    M4OSA_TRACE3_2("M4VSS3GPP_editCheckClipCompatibility called with pClip1Analysis=0x%x,\
-                   pClip2Analysis=0x%x", pClip1Properties, pClip2Properties);
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClip1Properties), M4ERR_PARAMETER,
-        "M4VSS3GPP_editCheckClipCompatibility: pClip1Properties is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClip2Properties), M4ERR_PARAMETER,
-        "M4VSS3GPP_editCheckClipCompatibility: pClip2Properties is M4OSA_NULL");
-
-    if( ( M4VIDEOEDITING_kFileType_MP3 == pClip1Properties->FileType)
-        || (M4VIDEOEDITING_kFileType_AMR == pClip1Properties->FileType) )
-    {
-        if( pClip1Properties != pClip2Properties )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editCheckClipCompatibility: MP3 CAN ONLY BE CUT,\
-                returning M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY");
-            return M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY;
-        }
-        else
-        {
-            /* We are in VSS Splitter mode */
-            goto audio_analysis;
-        }
-    }
-
-    /********** Audio ************/
-
-audio_analysis:
-    if( M4VIDEOEDITING_kNoneAudio != pClip1Properties->
-        AudioStreamType ) /**< if there is an audio stream */
-    {
-        /**
-        * Check audio format is AAC */
-        switch( pClip1Properties->AudioStreamType )
-        {
-            case M4VIDEOEDITING_kAAC:
-            case M4VIDEOEDITING_kAACplus:
-            case M4VIDEOEDITING_keAACplus:
-                bClip1IsAAC = M4OSA_TRUE;
-                break;
-            default:
-                break;
-        }
-    }
-
-    if( M4VIDEOEDITING_kNoneAudio != pClip2Properties->
-        AudioStreamType ) /**< if there is an audio stream */
-    {
-        /**
-        * Check audio format is AAC */
-        switch( pClip2Properties->AudioStreamType )
-        {
-            case M4VIDEOEDITING_kAAC:
-            case M4VIDEOEDITING_kAACplus:
-            case M4VIDEOEDITING_keAACplus:
-                bClip2IsAAC = M4OSA_TRUE;
-                break;
-            default:
-                break;
-        }
-    }
-
-    /**
-    * If there is no audio, the clips are compatibles ... */
-    if( ( pClip1Properties->AudioStreamType != M4VIDEOEDITING_kNoneAudio)
-        && (pClip2Properties->AudioStreamType != M4VIDEOEDITING_kNoneAudio) )
-    {
-        /**
-        * Check both clips have same audio stream type
-        * And let_s say AAC, AAC+ and eAAC+ are mixable */
-        if( ( pClip1Properties->AudioStreamType
-            != pClip2Properties->AudioStreamType)
-            && (( M4OSA_FALSE == bClip1IsAAC) || (M4OSA_FALSE == bClip2IsAAC)) )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editCheckClipCompatibility:\
-                Clips don't have the same Audio Stream Type");
-
-            audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE;
-            goto analysis_done;
-        }
-
-        /**
-        * Check both clips have same number of channels */
-        if( pClip1Properties->uiNbChannels != pClip2Properties->uiNbChannels )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editCheckClipCompatibility: Clips don't have the same Nb of Channels");
-            audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS;
-            goto analysis_done;
-        }
-
-        /**
-        * Check both clips have same sampling frequency */
-        if( pClip1Properties->uiSamplingFrequency
-            != pClip2Properties->uiSamplingFrequency )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editCheckClipCompatibility:\
-                Clips don't have the same Sampling Frequency");
-            audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY;
-            goto analysis_done;
-        }
-    }
-
-    pClip2Properties->bAudioIsCompatibleWithMasterClip = M4OSA_TRUE;
-
-    /**
-    * Return with no error */
-
-analysis_done:
-    if( video_err != M4NO_ERROR )
-        return video_err;
-
-    if( audio_err != M4NO_ERROR )
-        return audio_err;
-
-    M4OSA_TRACE3_0(
-        "M4VSS3GPP_editCheckClipCompatibility(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intBuildAnalysis()
- * @brief    Get video and audio properties from the clip streams
- * @note    This function must return fatal errors only (errors that should not happen
- *        in the final integrated product).
- * @param   pClipCtxt            (IN) internal clip context
- * @param    pClipProperties        (OUT) Pointer to a valid ClipProperties structure.
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intBuildAnalysis( M4VSS3GPP_ClipContext *pClipCtxt,
-                                     M4VIDEOEDITING_ClipProperties *pClipProperties )
-{
-    M4OSA_ERR err;
-    M4DECODER_MPEG4_DecoderConfigInfo DecConfigInfo;
-    M4DECODER_VideoSize dummySize;
-    M4DECODER_AVCProfileLevel AVCProfle;
-
-    pClipProperties->bAnalysed = M4OSA_FALSE;
-
-    /**
-    * Reset video characteristics */
-    pClipProperties->VideoStreamType = M4VIDEOEDITING_kNoneVideo;
-    pClipProperties->uiClipVideoDuration = 0;
-    pClipProperties->uiVideoBitrate = 0;
-    pClipProperties->uiVideoMaxAuSize = 0;
-    pClipProperties->uiVideoWidth = 0;
-    pClipProperties->uiVideoHeight = 0;
-    pClipProperties->uiVideoTimeScale = 0;
-    pClipProperties->fAverageFrameRate = 0.0;
-    pClipProperties->uiVideoProfile =
-        M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
-    pClipProperties->uiVideoLevel =
-        M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
-    pClipProperties->bMPEG4dataPartition = M4OSA_FALSE;
-    pClipProperties->bMPEG4rvlc = M4OSA_FALSE;
-    pClipProperties->bMPEG4resynchMarker = M4OSA_FALSE;
-
-    memset((void *) &pClipProperties->ftyp,0,
-        sizeof(pClipProperties->ftyp));
-
-    /**
-    * Video Analysis */
-    if( M4OSA_NULL != pClipCtxt->pVideoStream )
-    {
-        pClipProperties->uiVideoWidth = pClipCtxt->pVideoStream->m_videoWidth;
-        pClipProperties->uiVideoHeight = pClipCtxt->pVideoStream->m_videoHeight;
-        pClipProperties->fAverageFrameRate =
-            pClipCtxt->pVideoStream->m_averageFrameRate;
-
-        switch( pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
-        {
-            case M4DA_StreamTypeVideoMpeg4:
-
-                pClipProperties->VideoStreamType = M4VIDEOEDITING_kMPEG4;
-
-   /* This issue is so incredibly stupid that it's depressing. Basically, a file can be analysed
-   outside of any context (besides that of the clip itself), so that for instance two clips can
-   be checked for compatibility before allocating an edit context for editing them. But this
-   means there is no way in heck to pass an external video decoder (to begin with) to this
-   function, as they work by being registered in an existing context; furthermore, it is actually
-   pretty overkill to use a full decoder for that, moreso a HARDWARE decoder just to get the
-   clip config info. In fact, the hardware itself doesn't provide this service, in the case of a
-   HW decoder, the shell builds the config info itself, so we don't need the actual decoder, only
-   a detached functionality of it. So in case HW/external decoders may be present, we instead use
-   directly the DSI parsing function of the shell HW decoder (which we know to be present, since
-   HW decoders are possible) to get the config info. Notice this function is used even if the
-   software decoder is actually present and even if it will end up being actually used: figuring
-   out the config does not involve actual decoding nor the particularities of a specific decoder,
-   it's the fact that it's MPEG4 that matters, so it should not be functionally any different
-   from the way it was done before (and it's light enough for performance not to be any problem
-         whatsoever). */
-
-                err = M4DECODER_EXTERNAL_ParseVideoDSI(pClipCtxt->pVideoStream->
-                            m_basicProperties.m_pDecoderSpecificInfo,
-                            pClipCtxt->pVideoStream->m_basicProperties.m_decoderSpecificInfoSize,
-                            &DecConfigInfo, &dummySize);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intBuildAnalysis():\
-                        M4DECODER_EXTERNAL_ParseVideoDSI returns 0x%08X", err);
-                    return err;
-                }
-
-                pClipProperties->uiVideoTimeScale =
-                    DecConfigInfo.uiTimeScale;
-                pClipProperties->bMPEG4dataPartition =
-                    DecConfigInfo.bDataPartition;
-                pClipProperties->bMPEG4rvlc =
-                    DecConfigInfo.bUseOfRVLC;
-                pClipProperties->bMPEG4resynchMarker =
-                    DecConfigInfo.uiUseOfResynchMarker;
-                err = getMPEG4ProfileAndLevel(DecConfigInfo.uiProfile,
-                            &(pClipProperties->uiVideoProfile),
-                            &(pClipProperties->uiVideoLevel));
-               if (M4NO_ERROR != err) {
-                    M4OSA_TRACE1_1("M4VSS3GPP_intBuildAnalysis(): \
-                         getMPEG4ProfileAndLevel returns 0x%08X", err);
-                    return err;
-                }
-                break;
-
-            case M4DA_StreamTypeVideoH263:
-
-                pClipProperties->VideoStreamType = M4VIDEOEDITING_kH263;
-                /* H263 time scale is always 30000 */
-                pClipProperties->uiVideoTimeScale = 30000;
-
-                err = getH263ProfileAndLevel(pClipCtxt->pVideoStream->
-                            m_basicProperties.m_pDecoderSpecificInfo,
-                            pClipCtxt->pVideoStream->m_basicProperties.m_decoderSpecificInfoSize,
-                            &pClipProperties->uiVideoProfile,
-                            &pClipProperties->uiVideoLevel);
-                if (M4NO_ERROR != err) {
-                    M4OSA_TRACE1_1("M4VSS3GPP_intBuildAnalysis(): \
-                         getH263ProfileAndLevel returns 0x%08X", err);
-                    return err;
-                }
-                break;
-
-            case M4DA_StreamTypeVideoMpeg4Avc:
-
-                pClipProperties->VideoStreamType = M4VIDEOEDITING_kH264;
-                err = getAVCProfileAndLevel(pClipCtxt->pVideoStream->
-                            m_basicProperties.m_pDecoderSpecificInfo,
-                            pClipCtxt->pVideoStream->m_basicProperties.m_decoderSpecificInfoSize,
-                            &pClipProperties->uiVideoProfile,
-                            &pClipProperties->uiVideoLevel);
-                if (M4NO_ERROR != err) {
-                    M4OSA_TRACE1_1("M4VSS3GPP_intBuildAnalysis(): \
-                         getAVCProfileAndLevel returns 0x%08X", err);
-                    return err;
-                }
-                break;
-
-            default:
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intBuildAnalysis: unknown input video format (0x%x),\
-                     returning M4NO_ERROR",
-                    pClipCtxt->pVideoStream->m_basicProperties.m_streamType);
-
-                 /** We do not return error here.
-                   *  The video format compatibility check will be done latter */
-                return M4NO_ERROR;
-        }
-
-        pClipProperties->uiClipVideoDuration =
-            (M4OSA_UInt32)pClipCtxt->pVideoStream->m_basicProperties.m_duration;
-        pClipProperties->uiVideoMaxAuSize =
-            pClipCtxt->pVideoStream->m_basicProperties.m_maxAUSize;
-
-        /* if video bitrate not available retrieve an estimation of the overall bitrate */
-        pClipProperties->uiVideoBitrate =
-            (M4OSA_UInt32)pClipCtxt->pVideoStream->
-            m_basicProperties.m_averageBitRate;
-
-        if( 0 == pClipProperties->uiVideoBitrate )
-        {
-            pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
-                pClipCtxt->pReaderContext, M4READER_kOptionID_Bitrate,
-                &pClipProperties->uiVideoBitrate);
-
-            if( M4OSA_NULL != pClipCtxt->pAudioStream )
-            {
-                /* we get the overall bitrate, substract the audio bitrate if any */
-                pClipProperties->uiVideoBitrate -=
-                    pClipCtxt->pAudioStream->m_basicProperties.m_averageBitRate;
-            }
-        }
-    }
-
-    /**
-    * Reset audio characteristics */
-    pClipProperties->AudioStreamType = M4VIDEOEDITING_kNoneAudio;
-    pClipProperties->uiClipAudioDuration = 0;
-    pClipProperties->uiAudioBitrate = 0;
-    pClipProperties->uiAudioMaxAuSize = 0;
-    pClipProperties->uiNbChannels = 0;
-    pClipProperties->uiSamplingFrequency = 0;
-    pClipProperties->uiExtendedSamplingFrequency = 0;
-    pClipProperties->uiDecodedPcmSize = 0;
-
-    /**
-    * Audio Analysis */
-    if( M4OSA_NULL != pClipCtxt->pAudioStream )
-    {
-        switch( pClipCtxt->pAudioStream->m_basicProperties.m_streamType )
-        {
-            case M4DA_StreamTypeAudioAmrNarrowBand:
-
-                pClipProperties->AudioStreamType = M4VIDEOEDITING_kAMR_NB;
-                break;
-
-            case M4DA_StreamTypeAudioAac:
-
-                pClipProperties->AudioStreamType = M4VIDEOEDITING_kAAC;
-                break;
-
-            case M4DA_StreamTypeAudioMp3:
-
-                pClipProperties->AudioStreamType = M4VIDEOEDITING_kMP3;
-                break;
-
-            case M4DA_StreamTypeAudioEvrc:
-
-                pClipProperties->AudioStreamType = M4VIDEOEDITING_kEVRC;
-                break;
-
-            case M4DA_StreamTypeAudioPcm:
-
-                pClipProperties->AudioStreamType = M4VIDEOEDITING_kPCM;
-                break;
-
-            default:
-
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intBuildAnalysis: unknown input audio format (0x%x),\
-                    returning M4NO_ERROR!",
-                    pClipCtxt->pAudioStream->m_basicProperties.m_streamType);
-                return
-                    M4NO_ERROR; /**< We do not return error here.
-                                The audio format compatibility check will be done latter */
-        }
-
-        pClipProperties->uiAudioMaxAuSize =
-            pClipCtxt->pAudioStream->m_basicProperties.m_maxAUSize;
-        pClipProperties->uiClipAudioDuration =
-            (M4OSA_UInt32)pClipCtxt->pAudioStream->m_basicProperties.m_duration;
-
-        pClipProperties->uiNbChannels = pClipCtxt->pAudioStream->m_nbChannels;
-        pClipProperties->uiSamplingFrequency =
-            pClipCtxt->pAudioStream->m_samplingFrequency;
-        pClipProperties->uiDecodedPcmSize =
-            pClipCtxt->pAudioStream->m_byteFrameLength
-            * pClipCtxt->pAudioStream->m_byteSampleSize
-            * pClipCtxt->pAudioStream->m_nbChannels;
-
-        /**
-        * Bugfix P4ME00001128: With some IMTC files, the AMR bit rate is 0 kbps
-        according the GetProperties function */
-        pClipProperties->uiAudioBitrate =
-            (M4OSA_UInt32)pClipCtxt->pAudioStream->
-            m_basicProperties.m_averageBitRate;
-
-        if( 0 == pClipProperties->uiAudioBitrate )
-        {
-            if( M4VIDEOEDITING_kAMR_NB == pClipProperties->AudioStreamType )
-            {
-                /**
-                *Better returning a guessed 12.2 kbps value than a sure-to-be-false 0 kbps value!*/
-                pClipProperties->uiAudioBitrate = M4VSS3GPP_AMR_DEFAULT_BITRATE;
-            }
-            else if( M4VIDEOEDITING_kEVRC == pClipProperties->AudioStreamType )
-            {
-                /**
-                *Better returning a guessed 9.2 kbps value than a sure-to-be-false 0 kbps value!*/
-                pClipProperties->uiAudioBitrate =
-                    M4VSS3GPP_EVRC_DEFAULT_BITRATE;
-            }
-            else
-            {
-                pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
-                    pClipCtxt->pReaderContext, M4READER_kOptionID_Bitrate,
-                    &pClipProperties->uiAudioBitrate);
-
-                if( M4OSA_NULL != pClipCtxt->pVideoStream )
-                {
-                    /* we get the overall bitrate, substract the video bitrate if any */
-                    pClipProperties->uiAudioBitrate -= pClipCtxt->pVideoStream->
-                        m_basicProperties.m_averageBitRate;
-                }
-            }
-        }
-
-        /* New aac properties */
-        if( M4DA_StreamTypeAudioAac
-            == pClipCtxt->pAudioStream->m_basicProperties.m_streamType )
-        {
-            pClipProperties->uiNbChannels = pClipCtxt->AacProperties.aNumChan;
-            pClipProperties->uiSamplingFrequency =
-                pClipCtxt->AacProperties.aSampFreq;
-
-            if( pClipCtxt->AacProperties.aSBRPresent )
-            {
-                pClipProperties->AudioStreamType = M4VIDEOEDITING_kAACplus;
-                pClipProperties->uiExtendedSamplingFrequency =
-                    pClipCtxt->AacProperties.aExtensionSampFreq;
-            }
-
-            if( pClipCtxt->AacProperties.aPSPresent )
-            {
-                pClipProperties->AudioStreamType = M4VIDEOEDITING_keAACplus;
-            }
-        }
-    }
-
-    /* Get 'ftyp' atom */
-    err = pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
-        pClipCtxt->pReaderContext,
-        M4READER_kOptionID_3gpFtypBox, &pClipProperties->ftyp);
-
-    /**
-    * We write the VSS 3GPP version in the clip analysis to be sure the integrator doesn't
-    * mix older analysis results with newer libraries */
-    pClipProperties->Version[0] = M4VIDEOEDITING_VERSION_MAJOR;
-    pClipProperties->Version[1] = M4VIDEOEDITING_VERSION_MINOR;
-    pClipProperties->Version[2] = M4VIDEOEDITING_VERSION_REVISION;
-
-    pClipProperties->FileType = pClipCtxt->pSettings->FileType;
-
-    if( pClipProperties->uiClipVideoDuration
-        > pClipProperties->uiClipAudioDuration )
-        pClipProperties->uiClipDuration = pClipProperties->uiClipVideoDuration;
-    else
-        pClipProperties->uiClipDuration = pClipProperties->uiClipAudioDuration;
-
-    /* Reset compatibility chart */
-    pClipProperties->bVideoIsEditable = M4OSA_FALSE;
-    pClipProperties->bAudioIsEditable = M4OSA_FALSE;
-    pClipProperties->bVideoIsCompatibleWithMasterClip = M4OSA_FALSE;
-    pClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
-
-    /* Analysis successfully completed */
-    pClipProperties->bAnalysed = M4OSA_TRUE;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intBuildAnalysis(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing()
- * @brief    Check if the clip is compatible with VSS editing
- * @note
- * @param   pClipCtxt            (IN) internal clip context
- * @param    pClipProperties     (OUT) Pointer to a valid ClipProperties structure.
- * @return    M4NO_ERROR:            No error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing(
-    M4VIDEOEDITING_ClipProperties *pClipProperties )
-{
-    M4OSA_UInt32 uiNbOfValidStreams = 0;
-    M4OSA_ERR video_err = M4NO_ERROR;
-    M4OSA_ERR audio_err = M4NO_ERROR;
-    /********* file type *********/
-
-    if( M4VIDEOEDITING_kFileType_AMR == pClipProperties->FileType )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing:\
-            returning M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED");
-        return M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED;
-    }
-
-    if( M4VIDEOEDITING_kFileType_MP3 == pClipProperties->FileType )
-    {
-        M4OSA_TRACE3_0(
-            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): returning M4NO_ERROR");
-        return M4NO_ERROR;
-    }
-
-    /********* Video *********/
-
-    if( M4VIDEOEDITING_kNoneVideo
-        != pClipProperties->VideoStreamType ) /**< if there is a video stream */
-    {
-        /* Check video format is MPEG-4, H263 or H264 */
-        switch( pClipProperties->VideoStreamType )
-        {
-            case M4VIDEOEDITING_kH263:
-            case M4VIDEOEDITING_kMPEG4:
-            case M4VIDEOEDITING_kH264:
-                uiNbOfValidStreams++;
-                pClipProperties->bVideoIsEditable = M4OSA_TRUE;
-                break;
-
-            default: /*< KO, we return error */
-                M4OSA_TRACE1_0(
-                    "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): unsupported video format");
-                video_err = M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
-                break;
-        }
-    }
-    else
-    {
-        /**
-        * Audio only stream are currently not supported by the VSS editing feature
-        (unless in the MP3 case) */
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): No video stream in clip");
-        video_err = M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE;
-    }
-
-    /********* Audio *********/
-    if( M4VIDEOEDITING_kNoneAudio != pClipProperties->
-        AudioStreamType ) /**< if there is an audio stream */
-    {
-        /**
-        * Check audio format is AMR-NB, EVRC or AAC */
-        switch( pClipProperties->AudioStreamType )
-        {
-            case M4VIDEOEDITING_kAMR_NB:
-                pClipProperties->bAudioIsEditable = M4OSA_TRUE;
-                uiNbOfValidStreams++;
-                break;
-
-            case M4VIDEOEDITING_kAAC:
-            case M4VIDEOEDITING_kAACplus:
-            case M4VIDEOEDITING_keAACplus:
-                switch( pClipProperties->uiSamplingFrequency )
-                {
-                case 8000:
-                case 16000:
-                case 22050:
-                case 24000:
-                case 32000:
-                case 44100:
-                case 48000:
-                    pClipProperties->bAudioIsEditable = M4OSA_TRUE;
-                    break;
-
-                default:
-                    break;
-                }
-                uiNbOfValidStreams++;
-                break;
-
-            case M4VIDEOEDITING_kEVRC:
-                /*< OK, we proceed, no return */
-                uiNbOfValidStreams++;
-                break;
-
-            default: /*< KO, we return error */
-                M4OSA_TRACE1_0(
-                    "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): unsupported audio format");
-                audio_err = M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
-                break;
-        }
-    }
-    else
-    {
-        /* Silence is always editable */
-        pClipProperties->bAudioIsEditable = M4OSA_TRUE;
-    }
-
-    /**
-    * Check there is at least one valid stream in the file... */
-    if( video_err != M4NO_ERROR )
-        return video_err;
-
-    if( audio_err != M4NO_ERROR )
-        return audio_err;
-
-    if( 0 == uiNbOfValidStreams )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): File contains no supported stream,\
-            returning M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE");
-        return M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0(
-        "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioMixingCompatibility()
- * @brief    This function allows checking if two clips are compatible with each other for
- *        VSS 3GPP audio mixing feature.
- * @note
- * @param    pC                            (IN) Context of the audio mixer
- * @param    pInputClipProperties        (IN) Clip analysis of the first clip
- * @param    pAddedClipProperties        (IN) Clip analysis of the second clip
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- * @return    M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
- * @return  M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP
- * @return  M4NO_ERROR
- ******************************************************************************
- */
-M4OSA_ERR
-M4VSS3GPP_intAudioMixingCompatibility( M4VSS3GPP_InternalAudioMixingContext
-                                      *pC, M4VIDEOEDITING_ClipProperties *pInputClipProperties,
-                                      M4VIDEOEDITING_ClipProperties *pAddedClipProperties )
-{
-    M4OSA_Bool bClip1IsAAC = M4OSA_FALSE;
-    M4OSA_Bool bClip2IsAAC = M4OSA_FALSE;
-
-    /**
-    * Reset settings */
-    pInputClipProperties->bAudioIsEditable = M4OSA_FALSE;
-    pAddedClipProperties->bAudioIsEditable = M4OSA_FALSE;
-    pInputClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
-    pAddedClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
-
-    /**
-    * Check that analysis has been generated by this version of the VSS3GPP library */
-    if( ( pInputClipProperties->Version[0] != M4VIDEOEDITING_VERSION_MAJOR)
-        || (pInputClipProperties->Version[1] != M4VIDEOEDITING_VERSION_MINOR)
-        || (pInputClipProperties->Version[2]
-    != M4VIDEOEDITING_VERSION_REVISION) )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intAudioMixingCompatibility: The clip analysis has been generated\
-            by another version, returning M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
-        return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION;
-    }
-
-    if( ( pAddedClipProperties->Version[0] != M4VIDEOEDITING_VERSION_MAJOR)
-        || (pAddedClipProperties->Version[1] != M4VIDEOEDITING_VERSION_MINOR)
-        || (pAddedClipProperties->Version[2]
-    != M4VIDEOEDITING_VERSION_REVISION) )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intAudioMixingCompatibility: The clip analysis has been generated\
-            by another version, returning M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
-        return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION;
-    }
-
-    /********* input file type *********/
-
-    if( M4VIDEOEDITING_kFileType_3GPP != pInputClipProperties->FileType )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intAudioMixingCompatibility:\
-            returning M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP");
-        return M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP;
-    }
-
-    /********* input audio *********/
-
-    if( M4VIDEOEDITING_kNoneAudio != pInputClipProperties->
-        AudioStreamType ) /**< if there is an audio stream */
-    {
-        /**
-        * Check audio format is AMR-NB or AAC */
-        switch( pInputClipProperties->AudioStreamType )
-        {
-            case M4VIDEOEDITING_kAMR_NB:
-                pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
-                break;
-
-            case M4VIDEOEDITING_kAAC:
-            case M4VIDEOEDITING_kAACplus:
-            case M4VIDEOEDITING_keAACplus:
-                switch( pInputClipProperties->uiSamplingFrequency )
-                {
-                case 8000:
-                case 16000:
-                case 22050:
-                case 24000:
-                case 32000:
-                case 44100:
-                case 48000:
-                    pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
-                    break;
-
-                default:
-                    break;
-            }
-            bClip1IsAAC = M4OSA_TRUE;
-            break;
-          default:
-            break;
-        }
-    }
-    else
-    {
-        /* Silence is always editable */
-        pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
-    }
-
-    /********* added audio *********/
-
-    if( M4VIDEOEDITING_kNoneAudio != pAddedClipProperties->
-        AudioStreamType ) /**< if there is an audio stream */
-    {
-        /**
-        * Check audio format is AMR-NB or AAC */
-        switch( pAddedClipProperties->AudioStreamType )
-        {
-            case M4VIDEOEDITING_kAMR_NB:
-                pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
-                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
-                    M4OSA_TRUE; /* I use this field to know if silence supported */
-                break;
-
-            case M4VIDEOEDITING_kAAC:
-            case M4VIDEOEDITING_kAACplus:
-            case M4VIDEOEDITING_keAACplus:
-                switch( pAddedClipProperties->uiSamplingFrequency )
-                {
-                case 8000:
-                case 16000:
-                case 22050:
-                case 24000:
-                case 32000:
-                case 44100:
-                case 48000:
-                    pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
-                    break;
-
-                default:
-                    break;
-                }
-                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
-                    M4OSA_TRUE; /* I use this field to know if silence supported */
-                bClip2IsAAC = M4OSA_TRUE;
-                break;
-
-            case M4VIDEOEDITING_kEVRC:
-                break;
-
-            case M4VIDEOEDITING_kPCM:
-                pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
-                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
-                    M4OSA_TRUE; /* I use this field to know if silence supported */
-
-                if( pAddedClipProperties->uiSamplingFrequency == 16000 )
-                {
-                    bClip2IsAAC = M4OSA_TRUE;
-                }
-                break;
-
-            case M4VIDEOEDITING_kMP3: /*RC*/
-                pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
-                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
-                    M4OSA_TRUE; /* I use this field to know if silence supported */
-                break;
-
-            default:
-                /* The writer cannot write this  into a 3gpp */
-                M4OSA_TRACE1_0(
-                    "M4VSS3GPP_intAudioMixingCompatibility:\
-                    returning M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM");
-                return M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM;
-        }
-    }
-    else
-    {
-        /* Silence is always editable */
-        pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
-        pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
-            M4OSA_TRUE; /* I use this field to know if silence supported */
-    }
-
-    if( pC->bRemoveOriginal == M4OSA_FALSE )
-    {
-        if( pInputClipProperties->uiSamplingFrequency
-            != pAddedClipProperties->uiSamplingFrequency )
-        {
-            /* We need to call SSRC in order to align ASF and/or nb of channels */
-            /* Moreover, audio encoder may be needed in case of audio replacing... */
-            pC->b_SSRCneeded = M4OSA_TRUE;
-        }
-
-        if( pInputClipProperties->uiNbChannels
-            < pAddedClipProperties->uiNbChannels )
-        {
-            /* Stereo to Mono */
-            pC->ChannelConversion = 1;
-        }
-        else if( pInputClipProperties->uiNbChannels
-            > pAddedClipProperties->uiNbChannels )
-        {
-            /* Mono to Stereo */
-            pC->ChannelConversion = 2;
-        }
-    }
-
-    pInputClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_TRUE;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0(
-        "M4VSS3GPP_intAudioMixingCompatibility(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c b/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c
deleted file mode 100755
index 1ced937..0000000
--- a/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c
+++ /dev/null
@@ -1,1037 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *************************************************************************
- * @file   M4VSS3GPP_Codecs.c
- * @brief  VSS implementation
- * @note   This file contains all functions related to audio/video
- *            codec manipulations.
- *************************************************************************
- */
-
-#include "NXPSW_CompilerSwitches.h"
-
-#include "M4OSA_Debug.h"             /**< Include for OSAL debug services */
-#include "M4VSS3GPP_ErrorCodes.h"
-#include "M4VSS3GPP_InternalTypes.h" /**< Internal types of the VSS */
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_clearInterfaceTables()
- * @brief    Clear encoders, decoders, reader and writers interfaces tables
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    The context is null
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_clearInterfaceTables( M4VSS3GPP_MediaAndCodecCtxt *pC )
-{
-    M4OSA_UInt8 i;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-
-    /* Initialisation that will allow to check if registering twice */
-    pC->pWriterGlobalFcts = M4OSA_NULL;
-    pC->pWriterDataFcts = M4OSA_NULL;
-    pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
-    pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
-    pC->pCurrentAudioEncoderUserData = M4OSA_NULL;
-    pC->pCurrentAudioDecoderUserData = M4OSA_NULL;
-
-    pC->pCurrentVideoEncoderExternalAPI = M4OSA_NULL;
-    pC->pCurrentVideoEncoderUserData = M4OSA_NULL;
-
-    for ( i = 0; i < M4WRITER_kType_NB; i++ )
-    {
-        pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
-        pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
-    }
-
-    for ( i = 0; i < M4ENCODER_kVideo_NB; i++ )
-    {
-        pC->pVideoEncoderInterface[i] = M4OSA_NULL;
-        pC->pVideoEncoderExternalAPITable[i] = M4OSA_NULL;
-        pC->pVideoEncoderUserDataTable[i] = M4OSA_NULL;
-    }
-
-    for ( i = 0; i < M4ENCODER_kAudio_NB; i++ )
-    {
-        pC->pAudioEncoderInterface[i] = M4OSA_NULL;
-        pC->pAudioEncoderFlag[i] = M4OSA_FALSE;
-        pC->pAudioEncoderUserDataTable[i] = M4OSA_NULL;
-    }
-
-    /* Initialisation that will allow to check if registering twice */
-    pC->m_pReader = M4OSA_NULL;
-    pC->m_pReaderDataIt = M4OSA_NULL;
-    pC->m_uiNbRegisteredReaders = 0;
-
-    for ( i = 0; i < M4READER_kMediaType_NB; i++ )
-    {
-        pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
-        pC->m_pReaderDataItTable[i] = M4OSA_NULL;
-    }
-
-    pC->m_pVideoDecoder = M4OSA_NULL;
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-
-    pC->m_pCurrentVideoDecoderUserData = M4OSA_NULL;
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-
-    pC->m_uiNbRegisteredVideoDec = 0;
-
-    for ( i = 0; i < M4DECODER_kVideoType_NB; i++ )
-    {
-        pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-
-        pC->m_pVideoDecoderUserDataTable[i] = M4OSA_NULL;
-
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-
-    }
-
-    pC->m_pAudioDecoder = M4OSA_NULL;
-
-    for ( i = 0; i < M4AD_kType_NB; i++ )
-    {
-        pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
-        pC->m_pAudioDecoderFlagTable[i] = M4OSA_FALSE;
-        pC->pAudioDecoderUserDataTable[i] = M4OSA_NULL;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerWriter()
- * @brief    This function will register a specific file format writer.
- * @note    According to the Mediatype, this function will store in the internal
- *        context the writer context.
- * @param    pContext:    (IN) Execution context.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER    pContext,pWtrGlobalInterface or pWtrDataInterface is M4OSA_NULL
- *                          (debug only), or invalid MediaType
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_registerWriter( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                   M4WRITER_OutputFileType MediaType,
-                                   M4WRITER_GlobalInterface *pWtrGlobalInterface,
-                                   M4WRITER_DataInterface *pWtrDataInterface )
-{
-    /**
-    *    Check input parameters */
-    M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
-        "VSS: context is M4OSA_NULL in M4VSS3GPP_registerWriter");
-    M4OSA_DEBUG_IF2((pWtrGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
-        "pWtrGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerWriter");
-    M4OSA_DEBUG_IF2((pWtrDataInterface == M4OSA_NULL), M4ERR_PARAMETER,
-        "pWtrDataInterface is M4OSA_NULL in M4VSS3GPP_registerWriter");
-
-    M4OSA_TRACE3_3(
-        "VSS: M4VSS3GPP_registerWriter called with pContext=0x%x, pWtrGlobalInterface=0x%x,\
-        pWtrDataInterface=0x%x",
-        pC, pWtrGlobalInterface, pWtrDataInterface);
-
-    if( ( MediaType == M4WRITER_kUnknown) || (MediaType >= M4WRITER_kType_NB) )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
-        return M4ERR_PARAMETER;
-    }
-
-    if( pC->WriterInterface[MediaType].pGlobalFcts != M4OSA_NULL )
-    {
-        /* a writer corresponding to this media type has already been registered !*/
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
-            "This media type has already been registered");
-        return M4ERR_PARAMETER;
-    }
-
-    /*
-    * Save writer interface in context */
-    pC->WriterInterface[MediaType].pGlobalFcts = pWtrGlobalInterface;
-    pC->WriterInterface[MediaType].pDataFcts = pWtrDataInterface;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerVideoEncoder()
- * @brief    This function will register a specific video encoder.
- * @note    According to the Mediatype, this function will store in the internal
- *        context the encoder context.
- * @param    pContext:    (IN) Execution context.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER    pContext or pEncGlobalInterface is M4OSA_NULL (debug only),
- *                          or invalid MediaType
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_registerVideoEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                         M4ENCODER_Format MediaType,
-                                         M4ENCODER_GlobalInterface *pEncGlobalInterface )
-{
-    /**
-    *    Check input parameters */
-    M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
-        "VSS: context is M4OSA_NULL in M4VSS3GPP_registerVideoEncoder");
-    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
-        "pEncGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerVideoEncoder");
-
-    M4OSA_TRACE3_3(
-        "VSS: M4VSS3GPP_registerEncoder called with pContext=0x%x, pEncGlobalInterface=0x%x,\
-        MediaType=0x%x",
-        pC, pEncGlobalInterface, MediaType);
-
-    if( MediaType >= M4ENCODER_kVideo_NB )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
-            "Invalid video encoder type");
-        return M4ERR_PARAMETER;
-    }
-
-    if( pC->pVideoEncoderInterface[MediaType] != M4OSA_NULL )
-    {
-        /* can be legitimate, in cases where we have one version that can use external encoders
-        but which still has the built-in one to be able to work without an external encoder; in
-        this case the new encoder simply replaces the old one (i.e. we unregister it first). */
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-        if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
-        {
-
-#endif
-
-            free(pC->pVideoEncoderInterface[MediaType]);
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-        }
-
-#endif
-
-        pC->pVideoEncoderInterface[MediaType] = M4OSA_NULL;
-    }
-
-    /*
-    * Save encoder interface in context */
-    pC->pVideoEncoderInterface[MediaType] = pEncGlobalInterface;
-    /* The actual userData and external API will be set by the registration function in the case
-    of an external encoder (add it as a parameter to this function in the long run?) */
-    pC->pVideoEncoderUserDataTable[MediaType] = M4OSA_NULL;
-    pC->pVideoEncoderExternalAPITable[MediaType] = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerAudioEncoder()
- * @brief    This function will register a specific audio encoder.
- * @note    According to the Mediatype, this function will store in the internal
- *        context the encoder context.
- * @param    pContext:                (IN) Execution context.
- * @param    mediaType:                (IN) The media type.
- * @param    pEncGlobalInterface:    (OUT) the encoder interface functions.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER: pContext or pEncGlobalInterface is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_registerAudioEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                         M4ENCODER_AudioFormat MediaType,
-                                         M4ENCODER_AudioGlobalInterface *pEncGlobalInterface )
-{
-    /**
-    *    Check input parameters */
-    M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
-        "VSS: context is M4OSA_NULL in M4VSS3GPP_registerAudioEncoder");
-    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
-        "pEncGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerAudioEncoder");
-
-    M4OSA_TRACE3_3(
-        "VSS: M4VSS3GPP_registerAudioEncoder called pContext=0x%x, pEncGlobalInterface=0x%x,\
-        MediaType=0x%x",
-        pC, pEncGlobalInterface, MediaType);
-
-    if( MediaType >= M4ENCODER_kAudio_NB )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
-            "Invalid audio encoder type");
-        return M4ERR_PARAMETER;
-    }
-
-    if( pC->pAudioEncoderInterface[MediaType] != M4OSA_NULL )
-    {
-        free(pC->pAudioEncoderInterface[MediaType]);
-        pC->pAudioEncoderInterface[MediaType] = M4OSA_NULL;
-    }
-    /*
-    * Save encoder interface in context */
-    pC->pAudioEncoderInterface[MediaType] = pEncGlobalInterface;
-    pC->pAudioEncoderFlag[MediaType] = M4OSA_FALSE; /* internal encoder */
-    pC->pAudioEncoderUserDataTable[MediaType] = M4OSA_NULL;
-
-    M4OSA_TRACE3_2(
-        "M4VSS3GPP_registerAudioEncoder: pC->pAudioEncoderInterface[0x%x] = 0x%x",
-        MediaType, pC->pAudioEncoderInterface[MediaType]);
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerReader()
- * @brief    Register reader.
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_registerReader( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                   M4READER_MediaType mediaType,
-                                   M4READER_GlobalInterface *pRdrGlobalInterface,
-                                   M4READER_DataInterface *pRdrDataInterface )
-{
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrGlobalInterface), M4ERR_PARAMETER,
-        "M4VSS3GPP_registerReader: invalid pointer on global interface");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrDataInterface), M4ERR_PARAMETER,
-        "M4VSS3GPP_registerReader: invalid pointer on data interface");
-
-    if( mediaType == M4READER_kMediaTypeUnknown
-        || mediaType >= M4READER_kMediaType_NB )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
-        return M4ERR_PARAMETER;
-    }
-
-    if( pC->m_pReaderGlobalItTable[mediaType] != M4OSA_NULL )
-    {
-        /* a reader corresponding to this media type has already been registered !*/
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
-            "This media type has already been registered");
-        return M4ERR_PARAMETER;
-    }
-
-    pC->m_pReaderGlobalItTable[mediaType] = pRdrGlobalInterface;
-    pC->m_pReaderDataItTable[mediaType] = pRdrDataInterface;
-
-    pC->m_uiNbRegisteredReaders++;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerVideoDecoder()
- * @brief    Register video decoder
- * @param    pContext                (IN/OUT) VSS context.
- * @param    decoderType            (IN) Decoder type
- * @param    pDecoderInterface    (IN) Decoder interface.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only),
- *                                or the decoder type is invalid
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_registerVideoDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                         M4DECODER_VideoType decoderType,
-                                         M4DECODER_VideoInterface *pDecoderInterface )
-{
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
-        "M4VSS3GPP_registerVideoDecoder: invalid pointer on decoder interface");
-
-    if( decoderType >= M4DECODER_kVideoType_NB )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
-            "Invalid video decoder type");
-        return M4ERR_PARAMETER;
-    }
-
-    if( pC->m_pVideoDecoderItTable[decoderType] != M4OSA_NULL )
-    {
-#ifndef M4VSS_ENABLE_EXTERNAL_DECODERS
-        /* a decoder corresponding to this media type has already been registered !*/
-
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
-            "Decoder has already been registered");
-        return M4ERR_PARAMETER;
-
-#else /* external decoders are possible */
-        /* can be legitimate, in cases where we have one version that can use external decoders
-        but which still has the built-in one to be able to work without an external decoder; in
-        this case the new decoder simply replaces the old one (i.e. we unregister it first). */
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-        if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
-        {
-
-#endif
-
-            free(pC->m_pVideoDecoderItTable[decoderType]);
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-        }
-
-#endif
-
-        pC->m_pVideoDecoderItTable[decoderType] = M4OSA_NULL;
-        /* oh, and don't forget the user data, too. */
-        if( pC->m_pVideoDecoderUserDataTable[decoderType] != M4OSA_NULL )
-        {
-            free(pC->m_pVideoDecoderUserDataTable[decoderType]);
-            pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
-        }
-#endif /* are external decoders possible? */
-
-    }
-
-    pC->m_pVideoDecoderItTable[decoderType] = pDecoderInterface;
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-
-    pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
-    /* The actual userData will be set by the registration function in the case
-    of an external decoder (add it as a parameter to this function in the long run?) */
-
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-
-    pC->m_uiNbRegisteredVideoDec++;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_registerAudioDecoder()
- * @brief    Register audio decoder
- * @note    This function is used internaly by the VSS to register NXP audio decoders,
- * @param    context                (IN/OUT) VSS context.
- * @param    decoderType            (IN) Audio decoder type
- * @param    pDecoderInterface    (IN) Audio decoder interface.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:   A parameter is null, or the decoder type is invalid(in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_registerAudioDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                         M4AD_Type decoderType, M4AD_Interface *pDecoderInterface)
-{
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
-        "M4VSS3GPP_registerAudioDecoder: invalid pointer on decoder interface");
-
-    if( decoderType >= M4AD_kType_NB )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
-            "Invalid audio decoder type");
-        return M4ERR_PARAMETER;
-    }
-    if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
-    {
-        free(pC->m_pAudioDecoderItTable[decoderType]);
-        pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
-
-        if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
-        {
-            free(pC->m_pAudioDecoderItTable[decoderType]);
-            pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
-        }
-    }
-
-
-
-    pC->m_pAudioDecoderItTable[decoderType] = pDecoderInterface;
-    pC->m_pAudioDecoderFlagTable[decoderType] =
-        M4OSA_FALSE; /* internal decoder */
-    pC->pAudioDecoderUserDataTable[decoderType] = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_unRegisterAllWriters()
- * @brief    Unregister writer
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_unRegisterAllWriters( M4VSS3GPP_MediaAndCodecCtxt *pC )
-{
-    M4OSA_Int32 i;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-
-    for ( i = 0; i < M4WRITER_kType_NB; i++ )
-    {
-        if( pC->WriterInterface[i].pGlobalFcts != M4OSA_NULL )
-        {
-            free(pC->WriterInterface[i].pGlobalFcts);
-            pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
-        }
-
-        if( pC->WriterInterface[i].pDataFcts != M4OSA_NULL )
-        {
-            free(pC->WriterInterface[i].pDataFcts);
-            pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
-        }
-    }
-
-    pC->pWriterGlobalFcts = M4OSA_NULL;
-    pC->pWriterDataFcts = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_unRegisterAllEncoders()
- * @brief    Unregister the encoders
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_unRegisterAllEncoders( M4VSS3GPP_MediaAndCodecCtxt *pC )
-{
-    M4OSA_Int32 i;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_TRACE3_1("M4VSS3GPP_unRegisterAllEncoders: pC=0x%x", pC);
-
-    for ( i = 0; i < M4ENCODER_kVideo_NB; i++ )
-    {
-        if( pC->pVideoEncoderInterface[i] != M4OSA_NULL )
-        {
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
-            {
-
-#endif
-
-                free(pC->pVideoEncoderInterface[i]);
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-            }
-
-#endif
-
-            pC->pVideoEncoderInterface[i] = M4OSA_NULL;
-        }
-    }
-
-    for ( i = 0; i < M4ENCODER_kAudio_NB; i++ )
-    {
-        if( pC->pAudioEncoderInterface[i] != M4OSA_NULL )
-        {
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
-            {
-
-#endif
-                /*Don't free external audio encoders interfaces*/
-
-                if( M4OSA_FALSE == pC->pAudioEncoderFlag[i] )
-                {
-                    free(pC->pAudioEncoderInterface[i]);
-                }
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-            }
-
-#endif
-
-            pC->pAudioEncoderInterface[i] = M4OSA_NULL;
-        }
-    }
-
-    pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
-    pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_unRegisterAllReaders()
- * @brief    Unregister reader
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_unRegisterAllReaders( M4VSS3GPP_MediaAndCodecCtxt *pC )
-{
-    M4OSA_Int32 i;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-
-    for ( i = 0; i < M4READER_kMediaType_NB; i++ )
-    {
-        if( pC->m_pReaderGlobalItTable[i] != M4OSA_NULL )
-        {
-            free(pC->m_pReaderGlobalItTable[i]);
-            pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
-        }
-
-        if( pC->m_pReaderDataItTable[i] != M4OSA_NULL )
-        {
-            free(pC->m_pReaderDataItTable[i]);
-            pC->m_pReaderDataItTable[i] = M4OSA_NULL;
-        }
-    }
-
-    pC->m_uiNbRegisteredReaders = 0;
-    pC->m_pReader = M4OSA_NULL;
-    pC->m_pReaderDataIt = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_unRegisterAllDecoders()
- * @brief    Unregister the decoders
- * @param    pContext            (IN/OUT) VSS context.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_unRegisterAllDecoders( M4VSS3GPP_MediaAndCodecCtxt *pC )
-{
-    M4OSA_Int32 i;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_TRACE3_1("M4VSS3GPP_unRegisterAllDecoders: pC=0x%x", pC);
-
-    for ( i = 0; i < M4DECODER_kVideoType_NB; i++ )
-    {
-        if( pC->m_pVideoDecoderItTable[i] != M4OSA_NULL )
-        {
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
-            {
-
-#endif
-
-                free(pC->m_pVideoDecoderItTable[i]);
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-            }
-
-#endif
-
-            pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
-
-        }
-    }
-
-    for ( i = 0; i < M4AD_kType_NB; i++ )
-    {
-        if( pC->m_pAudioDecoderItTable[i] != M4OSA_NULL )
-        {
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
-            {
-
-#endif
-                /*Don't free external audio decoders interfaces*/
-
-                if( M4OSA_FALSE == pC->m_pAudioDecoderFlagTable[i] )
-                {
-                    free(pC->m_pAudioDecoderItTable[i]);
-                }
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-            }
-
-#endif
-
-            pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
-        }
-    }
-
-    pC->m_uiNbRegisteredVideoDec = 0;
-    pC->m_pVideoDecoder = M4OSA_NULL;
-
-    pC->m_pAudioDecoder = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentWriter()
- * @brief    Set current writer
- * @param    pContext            (IN/OUT) VSS context.
- * @param    mediaType            (IN) Media type.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_setCurrentWriter( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                     M4VIDEOEDITING_FileType mediaType )
-{
-    M4WRITER_OutputFileType writerType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-
-    switch( mediaType )
-    {
-        case M4VIDEOEDITING_kFileType_3GPP:
-            writerType = M4WRITER_k3GPP;
-            break;
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
-                "Writer type not supported");
-            return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
-    }
-
-    pC->pWriterGlobalFcts = pC->WriterInterface[writerType].pGlobalFcts;
-    pC->pWriterDataFcts = pC->WriterInterface[writerType].pDataFcts;
-
-    if( pC->pWriterGlobalFcts == M4OSA_NULL
-        || pC->pWriterDataFcts == M4OSA_NULL )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
-            "Writer type not supported");
-        M4OSA_TRACE1_0("Writer type not supported");
-        return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
-    }
-
-    pC->pWriterDataFcts->pWriterContext = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentVideoEncoder()
- * @brief    Set a video encoder
- * @param    pContext            (IN/OUT) VSS context.
- * @param    MediaType           (IN) Encoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_setCurrentVideoEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                           M4SYS_StreamType mediaType )
-{
-    M4ENCODER_Format encoderType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentVideoEncoder: pC=0x%x, mediaType=0x%x",
-        pC, mediaType);
-
-    switch( mediaType )
-    {
-        case M4SYS_kH263:
-            encoderType = M4ENCODER_kH263;
-            break;
-
-        case M4SYS_kMPEG_4:
-            encoderType = M4ENCODER_kMPEG4;
-            break;
-
-        case M4SYS_kH264:
-            encoderType = M4ENCODER_kH264;
-            break;
-
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE,
-                M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT,
-                "Video encoder type not supported");
-            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
-    }
-
-    pC->pVideoEncoderGlobalFcts = pC->pVideoEncoderInterface[encoderType];
-    pC->pCurrentVideoEncoderExternalAPI =
-        pC->pVideoEncoderExternalAPITable[encoderType];
-    pC->pCurrentVideoEncoderUserData =
-        pC->pVideoEncoderUserDataTable[encoderType];
-
-    if( pC->pVideoEncoderGlobalFcts == M4OSA_NULL )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE,
-            M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT,
-            "Video encoder type not supported");
-        M4OSA_TRACE1_0("Video encoder type not supported");
-        return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentAudioEncoder()
- * @brief    Set an audio encoder
- * @param    context            (IN/OUT) VSS context.
- * @param    MediaType        (IN) Encoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_setCurrentAudioEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                           M4SYS_StreamType mediaType )
-{
-    M4ENCODER_AudioFormat encoderType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentAudioEncoder: pC=0x%x, mediaType=0x%x",
-        pC, mediaType);
-
-    switch( mediaType )
-    {
-        case M4SYS_kAMR:
-            M4OSA_TRACE3_0(
-                "M4VSS3GPP_setCurrentAudioEncoder: encoder type AMR");
-            encoderType = M4ENCODER_kAMRNB;
-            break;
-
-        case M4SYS_kAAC:
-            M4OSA_TRACE3_0(
-                "M4VSS3GPP_setCurrentAudioEncoder: encoder type AAC");
-            encoderType = M4ENCODER_kAAC;
-            break;
-
-       default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE,
-                M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT,
-                "Audio encoder type not supported");
-            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
-    }
-
-    pC->pAudioEncoderGlobalFcts = pC->pAudioEncoderInterface[encoderType];
-    pC->pCurrentAudioEncoderUserData =
-        pC->pAudioEncoderUserDataTable[encoderType];
-
-    M4OSA_TRACE3_3(
-        "M4VSS3GPP_setCurrentAudioEncoder: pC->pAudioEncoderInterface[0x%x]=0x%x,\
-        pC->pAudioEncoderGlobalFcts = 0x%x",
-        encoderType, pC->pAudioEncoderInterface[encoderType],
-        pC->pAudioEncoderGlobalFcts);
-
-    if( pC->pAudioEncoderGlobalFcts == M4OSA_NULL )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE,
-            M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT,
-            "Audio encoder type not supported");
-        M4OSA_TRACE1_0("Audio encoder type not supported");
-        return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentReader()
- * @brief    Set current reader
- * @param    pContext            (IN/OUT) VSS context.
- * @param    mediaType            (IN) Media type.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_setCurrentReader( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                     M4VIDEOEDITING_FileType mediaType )
-{
-    M4READER_MediaType readerType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-
-    switch( mediaType )
-    {
-        case M4VIDEOEDITING_kFileType_3GPP:
-        case M4VIDEOEDITING_kFileType_MP4:
-        case M4VIDEOEDITING_kFileType_M4V:
-            readerType = M4READER_kMediaType3GPP;
-            break;
-
-        case M4VIDEOEDITING_kFileType_AMR:
-            readerType = M4READER_kMediaTypeAMR;
-            break;
-
-        case M4VIDEOEDITING_kFileType_MP3:
-            readerType = M4READER_kMediaTypeMP3;
-            break;
-
-        case M4VIDEOEDITING_kFileType_PCM:
-            readerType = M4READER_kMediaTypePCM;
-            break;
-
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
-                "Reader type not supported");
-            return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
-    }
-
-    pC->m_pReader = pC->m_pReaderGlobalItTable[readerType];
-    pC->m_pReaderDataIt = pC->m_pReaderDataItTable[readerType];
-
-    if( pC->m_pReader == M4OSA_NULL || pC->m_pReaderDataIt == M4OSA_NULL )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
-            "Reader type not supported");
-        M4OSA_TRACE1_0("Reader type not supported");
-        return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
-    }
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentVideoDecoder()
- * @brief    Set a video decoder
- * @param    pContext            (IN/OUT) VSS context.
- * @param    decoderType        (IN) Decoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
- * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_setCurrentVideoDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                           M4_StreamType mediaType )
-{
-    M4DECODER_VideoType decoderType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentVideoDecoder: pC=0x%x, mediaType=0x%x",
-        pC, mediaType);
-
-    switch( mediaType )
-    {
-        case M4DA_StreamTypeVideoMpeg4:
-        case M4DA_StreamTypeVideoH263:
-            decoderType = M4DECODER_kVideoTypeMPEG4;
-            break;
-
-        case M4DA_StreamTypeVideoMpeg4Avc:
-            decoderType = M4DECODER_kVideoTypeAVC;
-            break;
-        case M4DA_StreamTypeVideoARGB8888:
-            decoderType = M4DECODER_kVideoTypeYUV420P;
-            break;
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE,
-                M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT,
-                "Video decoder type not supported");
-            return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
-    }
-
-    pC->m_pVideoDecoder = pC->m_pVideoDecoderItTable[decoderType];
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-
-    pC->m_pCurrentVideoDecoderUserData =
-        pC->m_pVideoDecoderUserDataTable[decoderType];
-
-#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
-
-    if( pC->m_pVideoDecoder == M4OSA_NULL )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE,
-            M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT,
-            "Video decoder type not supported");
-        M4OSA_TRACE1_0("Video decoder type not supported");
-        return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ************************************************************************
- * M4OSA_ERR   M4VSS3GPP_setCurrentAudioDecoder()
- * @brief    Set an audio decoder
- * @param    context            (IN/OUT) VSS context.
- * @param    decoderType        (IN) Decoder type
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
- ************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_setCurrentAudioDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
-                                           M4_StreamType mediaType )
-{
-    M4AD_Type decoderType;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentAudioDecoder: pC=0x%x, mediaType=0x%x",
-        pC, mediaType);
-
-    switch( mediaType )
-    {
-        case M4DA_StreamTypeAudioAmrNarrowBand:
-            decoderType = M4AD_kTypeAMRNB;
-            break;
-
-        case M4DA_StreamTypeAudioAac:
-        case M4DA_StreamTypeAudioAacADTS:
-        case M4DA_StreamTypeAudioAacADIF:
-            decoderType = M4AD_kTypeAAC;
-            break;
-
-        case M4DA_StreamTypeAudioMp3:
-            decoderType = M4AD_kTypeMP3;
-            break;
-
-        case M4DA_StreamTypeAudioPcm:
-            decoderType = M4AD_kTypePCM;
-            break;
-
-        default:
-            M4OSA_DEBUG_IF1(M4OSA_TRUE,
-                M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT,
-                "Audio decoder type not supported");
-            return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
-    }
-
-    pC->m_pAudioDecoder = pC->m_pAudioDecoderItTable[decoderType];
-    pC->pCurrentAudioDecoderUserData =
-        pC->pAudioDecoderUserDataTable[decoderType];
-
-    if( pC->m_pAudioDecoder == M4OSA_NULL )
-    {
-        M4OSA_DEBUG_IF1(M4OSA_TRUE,
-            M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT,
-            "Audio decoder type not supported");
-        M4OSA_TRACE1_0("Audio decoder type not supported");
-        return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
-    }
-
-    return M4NO_ERROR;
-}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Edit.c b/libvideoeditor/vss/src/M4VSS3GPP_Edit.c
deleted file mode 100755
index df8b7d5..0000000
--- a/libvideoeditor/vss/src/M4VSS3GPP_Edit.c
+++ /dev/null
@@ -1,3475 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_Edit.c
- * @brief    Video Studio Service 3GPP edit API implementation.
- * @note
- ******************************************************************************
- */
-
-/****************/
-/*** Includes ***/
-/****************/
-
-#include "NXPSW_CompilerSwitches.h"
-/**
- * Our headers */
-#include "M4VSS3GPP_API.h"
-#include "M4VSS3GPP_InternalTypes.h"
-#include "M4VSS3GPP_InternalFunctions.h"
-#include "M4VSS3GPP_InternalConfig.h"
-#include "M4VSS3GPP_ErrorCodes.h"
-
-
-/**
- * OSAL headers */
-#include "M4OSA_Memory.h"   /**< OSAL memory management */
-#include "M4OSA_Debug.h"    /**< OSAL debug management */
-#include "M4OSA_CharStar.h" /**< OSAL string management */
-
-#ifdef WIN32
-#include "string.h"         /**< for strcpy (Don't want to get dependencies
-                                 with M4OSA_String...) */
-
-#endif                      /* WIN32 */
-#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
-#include "M4VD_EXTERNAL_Interface.h"
-#endif
-
-/************************************************************************/
-/* Static local functions                                               */
-/************************************************************************/
-static M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck(
-    M4VSS3GPP_ClipSettings *pClip );
-static M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck(
-    M4VSS3GPP_TransitionSettings *pTransition );
-static M4OSA_Void M4VSS3GPP_intFreeSettingsList(
-    M4VSS3GPP_InternalEditContext *pC );
-static M4OSA_ERR
-M4VSS3GPP_intCreateMP3OutputFile( M4VSS3GPP_InternalEditContext *pC,
-                                 M4OSA_Void *pOutputFile );
-static M4OSA_ERR M4VSS3GPP_intSwitchToNextClip(
-    M4VSS3GPP_InternalEditContext *pC );
-static M4OSA_ERR
-M4VSS3GPP_intComputeOutputVideoAndAudioDsi( M4VSS3GPP_InternalEditContext *pC,
-                                           M4OSA_UInt8 uiMasterClip );
-static M4OSA_Void M4VSS3GPP_intComputeOutputAverageVideoBitrate(
-    M4VSS3GPP_InternalEditContext *pC );
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_GetVersion()
- * @brief    Get the VSS 3GPP version.
- * @note    Can be called anytime. Do not need any context.
- * @param    pVersionInfo        (OUT) Pointer to a version info structure
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_GetVersion( M4_VersionInfo *pVersionInfo )
-{
-    M4OSA_TRACE3_1("M4VSS3GPP_GetVersion called with pVersionInfo=0x%x",
-        pVersionInfo);
-
-    /**
-    *    Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pVersionInfo), M4ERR_PARAMETER,
-        "M4VSS3GPP_GetVersion: pVersionInfo is M4OSA_NULL");
-
-    pVersionInfo->m_major = M4VSS_VERSION_MAJOR;
-    pVersionInfo->m_minor = M4VSS_VERSION_MINOR;
-    pVersionInfo->m_revision = M4VSS_VERSION_REVISION;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editInit()
- * @brief    Initializes the VSS 3GPP edit operation (allocates an execution context).
- * @note
- * @param    pContext            (OUT) Pointer on the VSS 3GPP edit context to allocate
- * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
- * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        There is no more available memory
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editInit( M4VSS3GPP_EditContext *pContext,
-                             M4OSA_FileReadPointer *pFileReadPtrFct,
-                             M4OSA_FileWriterPointer *pFileWritePtrFct )
-{
-    M4VSS3GPP_InternalEditContext *pC;
-    M4OSA_ERR err;
-    M4OSA_UInt32 i;
-
-    M4OSA_TRACE3_3(
-        "M4VSS3GPP_editInit called with pContext=0x%x, \
-        pFileReadPtrFct=0x%x, pFileWritePtrFct=0x%x",
-        pContext, pFileReadPtrFct, pFileWritePtrFct);
-
-    /**
-    * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4VSS3GPP_editInit: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
-        "M4VSS3GPP_editInit: pFileReadPtrFct is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
-        "M4VSS3GPP_editInit: pFileWritePtrFct is M4OSA_NULL");
-
-    /**
-    * Allocate the VSS context and return it to the user */
-    pC = (M4VSS3GPP_InternalEditContext
-        *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_InternalEditContext),
-        M4VSS3GPP, (M4OSA_Char *)"M4VSS3GPP_InternalContext");
-    *pContext = pC;
-        /* Inialization of context Variables */
-    memset((void *)pC, 0,sizeof(M4VSS3GPP_InternalEditContext));
-
-    if( M4OSA_NULL == pC )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_editInit(): unable to allocate M4VSS3GPP_InternalContext,\
-            returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-
-    /* Init the context. */
-    pC->uiClipNumber = 0;
-    pC->pClipList = M4OSA_NULL;
-    pC->pTransitionList = M4OSA_NULL;
-    pC->pEffectsList = M4OSA_NULL;
-    pC->pActiveEffectsList = M4OSA_NULL;
-    pC->pActiveEffectsList1 = M4OSA_NULL;
-    pC->bClip1ActiveFramingEffect = M4OSA_FALSE;
-    pC->bClip2ActiveFramingEffect = M4OSA_FALSE;
-    pC->uiCurrentClip = 0;
-    pC->pC1 = M4OSA_NULL;
-    pC->pC2 = M4OSA_NULL;
-    pC->yuv1[0].pac_data = pC->yuv1[1].pac_data = pC->
-        yuv1[2].pac_data = M4OSA_NULL;
-    pC->yuv2[0].pac_data = pC->yuv2[1].pac_data = pC->
-        yuv2[2].pac_data = M4OSA_NULL;
-    pC->yuv3[0].pac_data = pC->yuv3[1].pac_data = pC->
-        yuv3[2].pac_data = M4OSA_NULL;
-    pC->yuv4[0].pac_data = pC->yuv4[1].pac_data = pC->
-        yuv4[2].pac_data = M4OSA_NULL;
-    pC->bClip1AtBeginCut = M4OSA_FALSE;
-    pC->iClip1ActiveEffect = 0;
-    pC->iClip2ActiveEffect = 0;
-    pC->bTransitionEffect = M4OSA_FALSE;
-    pC->bSupportSilence = M4OSA_FALSE;
-
-    /**
-    * Init PC->ewc members */
-    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-    pC->ewc.dInputVidCts  = 0.0;
-    pC->ewc.dOutputVidCts = 0.0;
-    pC->ewc.dATo = 0.0;
-    pC->ewc.iOutputDuration = 0;
-    pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
-    pC->ewc.uiVideoBitrate = 0;
-    pC->ewc.uiVideoWidth = 0;
-    pC->ewc.uiVideoHeight = 0;
-    pC->ewc.uiVideoTimeScale = 0;
-    pC->ewc.bVideoDataPartitioning = M4OSA_FALSE;
-    pC->ewc.pVideoOutputDsi = M4OSA_NULL;
-    pC->ewc.uiVideoOutputDsiSize = 0;
-    pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
-    pC->ewc.uiNbChannels = 1;
-    pC->ewc.uiAudioBitrate = 0;
-    pC->ewc.uiSamplingFrequency = 0;
-    pC->ewc.pAudioOutputDsi = M4OSA_NULL;
-    pC->ewc.uiAudioOutputDsiSize = 0;
-    pC->ewc.pAudioEncCtxt = M4OSA_NULL;
-    pC->ewc.pAudioEncDSI.infoSize = 0;
-    pC->ewc.pAudioEncDSI.pInfo = M4OSA_NULL;
-    pC->ewc.uiSilencePcmSize = 0;
-    pC->ewc.pSilenceFrameData = M4OSA_NULL;
-    pC->ewc.uiSilenceFrameSize = 0;
-    pC->ewc.iSilenceFrameDuration = 0;
-    pC->ewc.scale_audio = 0.0;
-    pC->ewc.pEncContext = M4OSA_NULL;
-    pC->ewc.pDummyAuBuffer = M4OSA_NULL;
-    pC->ewc.iMpeg4GovOffset = 0;
-    pC->ewc.VppError = 0;
-    pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
-    pC->ewc.p3gpWriterContext = M4OSA_NULL;
-    pC->ewc.uiVideoMaxAuSize = 0;
-    pC->ewc.uiAudioMaxAuSize = 0;
-    /**
-    * Keep the OSAL file functions pointer set in our context */
-    pC->pOsaFileReadPtr = pFileReadPtrFct;
-    pC->pOsaFileWritPtr = pFileWritePtrFct;
-
-    /*
-    * Reset pointers for media and codecs interfaces */
-
-    err = M4VSS3GPP_clearInterfaceTables(&pC->ShellAPI);
-    M4ERR_CHECK_RETURN(err);
-
-    /*
-    *  Call the media and codecs subscription module */
-    err = M4VSS3GPP_subscribeMediaAndCodec(&pC->ShellAPI);
-    M4ERR_CHECK_RETURN(err);
-
-    /**
-    * Update main state automaton */
-    pC->State = M4VSS3GPP_kEditState_CREATED;
-    pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
-    pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
-    /* The flag is set to false at the beginning of every clip */
-    pC->m_bClipExternalHasStarted = M4OSA_FALSE;
-
-    pC->bIsMMS = M4OSA_FALSE;
-
-    pC->iInOutTimeOffset = 0;
-    pC->bEncodeTillEoF = M4OSA_FALSE;
-    pC->nbActiveEffects = 0;
-    pC->nbActiveEffects1 = 0;
-    pC->bIssecondClip = M4OSA_FALSE;
-    pC->m_air_context = M4OSA_NULL;
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_editInit(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editCreateClipSettings()
- * @brief    Allows filling a clip settings structure with default values
- *
- * @note    WARNING: pClipSettings->Effects[ ] will be allocated in this function.
- *                   pClipSettings->pFile      will be allocated in this function.
- *
- * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param   pFile               (IN) Clip file name
- * @param   filePathSize        (IN) Clip path size (needed for UTF 16 conversion)
- * @param    nbEffects           (IN) Nb of effect settings to allocate
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR
-M4VSS3GPP_editCreateClipSettings( M4VSS3GPP_ClipSettings *pClipSettings,
-                                 M4OSA_Void *pFile, M4OSA_UInt32 filePathSize,
-                                 M4OSA_UInt8 nbEffects )
-{
-    M4OSA_UInt8 uiFx;
-
-    M4OSA_TRACE3_1(
-        "M4VSS3GPP_editCreateClipSettings called with pClipSettings=0x%p",
-        pClipSettings);
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
-        "M4VSS3GPP_editCreateClipSettings: pClipSettings is NULL");
-
-    /**
-    * Set the clip settings to default */
-    pClipSettings->pFile = M4OSA_NULL;        /**< no file */
-    pClipSettings->FileType =
-        M4VIDEOEDITING_kFileType_Unsupported; /**< undefined */
-
-    if( M4OSA_NULL != pFile )
-    {
-        //pClipSettings->pFile = (M4OSA_Char*) M4OSA_32bitAlignedMalloc(strlen(pFile)+1, M4VSS3GPP,
-        // "pClipSettings->pFile");
-        /*FB: add clip path size because of utf 16 conversion*/
-        pClipSettings->pFile =
-            (M4OSA_Void *)M4OSA_32bitAlignedMalloc(filePathSize + 1, M4VSS3GPP,
-            (M4OSA_Char *)"pClipSettings->pFile");
-
-        if( M4OSA_NULL == pClipSettings->pFile )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editCreateClipSettings : ERROR allocating filename");
-            return M4ERR_ALLOC;
-        }
-        //memcpy(pClipSettings->pFile, pFile, strlen(pFile)+1);
-        /*FB: add clip path size because of utf 16 conversion*/
-        memcpy((void *)pClipSettings->pFile, (void *)pFile, filePathSize + 1);
-    }
-
-    /*FB: add file path size to support UTF16 conversion*/
-    pClipSettings->filePathSize = filePathSize + 1;
-    /**/
-    pClipSettings->ClipProperties.bAnalysed = M4OSA_FALSE;
-    pClipSettings->ClipProperties.FileType = 0;
-    pClipSettings->ClipProperties.Version[0] = 0;
-    pClipSettings->ClipProperties.Version[1] = 0;
-    pClipSettings->ClipProperties.Version[2] = 0;
-    pClipSettings->ClipProperties.uiClipDuration = 0;
-
-    pClipSettings->uiBeginCutTime = 0; /**< no begin cut */
-    pClipSettings->uiEndCutTime = 0;   /**< no end cut */
-    pClipSettings->ClipProperties.bSetImageData = M4OSA_FALSE;
-
-    /**
-    * Reset video characteristics */
-    pClipSettings->ClipProperties.VideoStreamType = M4VIDEOEDITING_kNoneVideo;
-    pClipSettings->ClipProperties.uiClipVideoDuration = 0;
-    pClipSettings->ClipProperties.uiVideoBitrate = 0;
-    pClipSettings->ClipProperties.uiVideoMaxAuSize = 0;
-    pClipSettings->ClipProperties.uiVideoWidth = 0;
-    pClipSettings->ClipProperties.uiVideoHeight = 0;
-    pClipSettings->ClipProperties.uiVideoTimeScale = 0;
-    pClipSettings->ClipProperties.fAverageFrameRate = 0.0;
-    pClipSettings->ClipProperties.uiVideoProfile =
-        M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
-    pClipSettings->ClipProperties.uiVideoLevel =
-        M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
-    pClipSettings->ClipProperties.bMPEG4dataPartition = M4OSA_FALSE;
-    pClipSettings->ClipProperties.bMPEG4rvlc = M4OSA_FALSE;
-    pClipSettings->ClipProperties.bMPEG4resynchMarker = M4OSA_FALSE;
-
-    /**
-    * Reset audio characteristics */
-    pClipSettings->ClipProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
-    pClipSettings->ClipProperties.uiClipAudioDuration = 0;
-    pClipSettings->ClipProperties.uiAudioBitrate = 0;
-    pClipSettings->ClipProperties.uiAudioMaxAuSize = 0;
-    pClipSettings->ClipProperties.uiNbChannels = 0;
-    pClipSettings->ClipProperties.uiSamplingFrequency = 0;
-    pClipSettings->ClipProperties.uiExtendedSamplingFrequency = 0;
-    pClipSettings->ClipProperties.uiDecodedPcmSize = 0;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_editSetDefaultSettings(): returning M4NO_ERROR");
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editDuplicateClipSettings()
- * @brief    Duplicates a clip settings structure, performing allocations if required
- *
- * @param    pClipSettingsDest    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param    pClipSettingsOrig    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param   bCopyEffects        (IN) Flag to know if we have to duplicate effects
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR
-M4VSS3GPP_editDuplicateClipSettings( M4VSS3GPP_ClipSettings *pClipSettingsDest,
-                                    M4VSS3GPP_ClipSettings *pClipSettingsOrig,
-                                    M4OSA_Bool bCopyEffects )
-{
-    M4OSA_UInt8 uiFx;
-
-    M4OSA_TRACE3_2(
-        "M4VSS3GPP_editDuplicateClipSettings called with dest=0x%p src=0x%p",
-        pClipSettingsDest, pClipSettingsOrig);
-
-    /* Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsDest), M4ERR_PARAMETER,
-        "M4VSS3GPP_editDuplicateClipSettings: pClipSettingsDest is NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsOrig), M4ERR_PARAMETER,
-        "M4VSS3GPP_editDuplicateClipSettings: pClipSettingsOrig is NULL");
-
-    /* Copy plain structure */
-    memcpy((void *)pClipSettingsDest,
-        (void *)pClipSettingsOrig, sizeof(M4VSS3GPP_ClipSettings));
-
-    /* Duplicate filename */
-    if( M4OSA_NULL != pClipSettingsOrig->pFile )
-    {
-        //pClipSettingsDest->pFile =
-        // (M4OSA_Char*) M4OSA_32bitAlignedMalloc(strlen(pClipSettingsOrig->pFile)+1, M4VSS3GPP,
-        // "pClipSettingsDest->pFile");
-        /*FB: clip path size is needed for utf 16 conversion*/
-        /*FB 2008/10/16: bad allocation size which raises a crash*/
-        pClipSettingsDest->pFile =
-            (M4OSA_Char *)M4OSA_32bitAlignedMalloc(pClipSettingsOrig->filePathSize + 1,
-            M4VSS3GPP, (M4OSA_Char *)"pClipSettingsDest->pFile");
-
-        if( M4OSA_NULL == pClipSettingsDest->pFile )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editDuplicateClipSettings : ERROR allocating filename");
-            return M4ERR_ALLOC;
-        }
-        /*FB: clip path size is needed for utf 16 conversion*/
-        //memcpy(pClipSettingsDest->pFile, pClipSettingsOrig->pFile,
-        // strlen(pClipSettingsOrig->pFile)+1);
-        /*FB 2008/10/16: bad allocation size which raises a crash*/
-        memcpy((void *)pClipSettingsDest->pFile, (void *)pClipSettingsOrig->pFile,
-            pClipSettingsOrig->filePathSize/*+1*/);
-        ( (M4OSA_Char
-            *)pClipSettingsDest->pFile)[pClipSettingsOrig->filePathSize] = '\0';
-    }
-
-    /* Duplicate effects */
-    /* Return with no error */
-
-    M4OSA_TRACE3_0(
-        "M4VSS3GPP_editDuplicateClipSettings(): returning M4NO_ERROR");
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editFreeClipSettings()
- * @brief    Free the pointers allocated in the ClipSetting structure (pFile, Effects).
- *
- * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editFreeClipSettings(
-    M4VSS3GPP_ClipSettings *pClipSettings )
-{
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
-        "M4VSS3GPP_editFreeClipSettings: pClipSettings is NULL");
-
-    /* free filename */
-    if( M4OSA_NULL != pClipSettings->pFile )
-    {
-        free(pClipSettings->pFile);
-        pClipSettings->pFile = M4OSA_NULL;
-    }
-
-    /* free effects settings */
-    /*    if(M4OSA_NULL != pClipSettings->Effects)
-    {
-    free(pClipSettings->Effects);
-    pClipSettings->Effects = M4OSA_NULL;
-    pClipSettings->nbEffects = 0;
-    } RC */
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editOpen()
- * @brief     Set the VSS input and output files.
- * @note      It opens the input file, but the output file may not be created yet.
- * @param     pContext           (IN) VSS edit context
- * @param     pSettings           (IN) Edit settings
- * @return    M4NO_ERROR:       No error
- * @return    M4ERR_PARAMETER:  At least one parameter is M4OSA_NULL (debug only)
- * @return    M4ERR_STATE:      VSS is not in an appropriate state for this function to be called
- * @return    M4ERR_ALLOC:      There is no more available memory
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editOpen( M4VSS3GPP_EditContext pContext,
-                             M4VSS3GPP_EditSettings *pSettings )
-{
-    M4VSS3GPP_InternalEditContext *pC =
-        (M4VSS3GPP_InternalEditContext *)pContext;
-
-    M4OSA_ERR err;
-    M4OSA_Int32 i;
-    M4VIDEOEDITING_FileType outputFileType =
-        M4VIDEOEDITING_kFileType_Unsupported; /**< 3GPP or MP3 (we don't do AMR output) */
-    M4OSA_UInt32 uiC1duration, uiC2duration;
-
-    M4OSA_TRACE3_2(
-        "M4VSS3GPP_editOpen called with pContext=0x%x, pSettings=0x%x",
-        pContext, pSettings);
-
-    /**
-    *    Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4VSS3GPP_editOpen: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings), M4ERR_PARAMETER,
-        "M4VSS3GPP_editOpen: pSettings is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings->pClipList), M4ERR_PARAMETER,
-        "M4VSS3GPP_editOpen: pSettings->pClipList is M4OSA_NULL");
-    M4OSA_DEBUG_IF2(( pSettings->uiClipNumber > 1)
-        && (M4OSA_NULL == pSettings->pTransitionList), M4ERR_PARAMETER,
-        "M4VSS3GPP_editOpen: pSettings->pTransitionList is M4OSA_NULL");
-
-    /**
-    * Check state automaton */
-    if( ( pC->State != M4VSS3GPP_kEditState_CREATED)
-        && (pC->State != M4VSS3GPP_kEditState_CLOSED) )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_editOpen: State error (0x%x)! Returning M4ERR_STATE",
-            pC->State);
-        return M4ERR_STATE;
-    }
-
-    /**
-    * Free any previously allocated internal settings list */
-    M4VSS3GPP_intFreeSettingsList(pC);
-
-    /**
-    * Copy the user settings in our context */
-    pC->uiClipNumber = pSettings->uiClipNumber;
-
-    /**
-    * Copy the clip list */
-    pC->pClipList =
-        (M4VSS3GPP_ClipSettings *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_ClipSettings)
-        * pC->uiClipNumber, M4VSS3GPP, (M4OSA_Char *)"pC->pClipList");
-
-    if( M4OSA_NULL == pC->pClipList )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_editOpen: unable to allocate pC->Settings.pClipList,\
-            returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-    for ( i = 0; i < pSettings->uiClipNumber; i++ )
-    {
-        M4VSS3GPP_editDuplicateClipSettings(&(pC->pClipList[i]),
-            pSettings->pClipList[i], M4OSA_TRUE);
-    }
-
-    /**
-    * Copy effects list RC */
-
-    /*FB bug fix 19.03.2008 if the number of effects is 0 -> crash*/
-    if( pSettings->nbEffects > 0 )
-    {
-        pC->nbEffects = pSettings->nbEffects;
-        pC->pEffectsList = (M4VSS3GPP_EffectSettings
-            *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_EffectSettings) * pC->nbEffects,
-            M4VSS3GPP, (M4OSA_Char *)"pC->pEffectsList");
-
-        if( M4OSA_NULL == pC->pEffectsList )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editOpen: unable to allocate pC->pEffectsList, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-
-        for ( i = 0; i < pC->nbEffects; i++ )
-        {
-            memcpy((void *) &(pC->pEffectsList[i]),
-                (void *) &(pSettings->Effects[i]),
-                sizeof(M4VSS3GPP_EffectSettings));
-        }
-
-        /**
-        * Allocate active effects list RC */
-        pC->pActiveEffectsList =
-            (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_UInt8) * pC->nbEffects,
-            M4VSS3GPP, (M4OSA_Char *)"pC->pActiveEffectsList");
-
-        if( M4OSA_NULL == pC->pActiveEffectsList )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editOpen: unable to allocate pC->pActiveEffectsList,\
-                returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-        /**
-         * Allocate active effects list */
-        pC->pActiveEffectsList1 =
-            (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_UInt8) * pC->nbEffects,
-            M4VSS3GPP, (M4OSA_Char *)"pC->pActiveEffectsList");
-        if (M4OSA_NULL == pC->pActiveEffectsList1)
-        {
-            M4OSA_TRACE1_0("M4VSS3GPP_editOpen: unable to allocate pC->pActiveEffectsList, \
-                           returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-
-    }
-    else
-    {
-        pC->nbEffects = 0;
-        pC->nbActiveEffects = 0;
-        pC->nbActiveEffects1 = 0;
-        pC->pEffectsList = M4OSA_NULL;
-        pC->pActiveEffectsList = M4OSA_NULL;
-        pC->pActiveEffectsList1 = M4OSA_NULL;
-        pC->bClip1ActiveFramingEffect = M4OSA_FALSE;
-        pC->bClip2ActiveFramingEffect = M4OSA_FALSE;
-    }
-
-    /**
-    * Test the clip analysis data, if it is not provided, analyse the clips by ourselves. */
-    for ( i = 0; i < pC->uiClipNumber; i++ )
-    {
-        if( M4OSA_FALSE == pC->pClipList[i].ClipProperties.bAnalysed )
-        {
-            /**< Analysis not provided by the integrator */
-            err = M4VSS3GPP_editAnalyseClip(pC->pClipList[i].pFile,
-                pC->pClipList[i].FileType, &pC->pClipList[i].ClipProperties,
-                pC->pOsaFileReadPtr);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_editOpen: M4VSS3GPP_editAnalyseClip returns 0x%x!",
-                    err);
-                return err;
-            }
-        }
-    }
-
-    /**
-    * Check clip compatibility */
-    for ( i = 0; i < pC->uiClipNumber; i++ )
-    {
-        if (pC->pClipList[i].FileType !=M4VIDEOEDITING_kFileType_ARGB8888) {
-            /**
-            * Check all the clips are compatible with VSS 3GPP */
-            err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(
-                &pC->pClipList[i].ClipProperties);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_2(
-                    "M4VSS3GPP_editOpen:\
-                    M4VSS3GPP_intCheckClipCompatibleWithVssEditing(%d) returns 0x%x!",
-                    i, err);
-                return err;
-            }
-        }
-
-        /**
-        * Check the master clip versus all the other ones.
-        (including master clip with itself, else variables for master clip
-        are not properly setted) */
-        if(pC->pClipList[i].FileType != M4VIDEOEDITING_kFileType_ARGB8888) {
-
-            err = M4VSS3GPP_editCheckClipCompatibility(
-                &pC->pClipList[pSettings->uiMasterClip].ClipProperties,
-                &pC->pClipList[i].ClipProperties);
-            /* in case of warning regarding audio incompatibility,
-                editing continues */
-            if( M4OSA_ERR_IS_ERROR(err) )
-            {
-                M4OSA_TRACE1_2(
-                    "M4VSS3GPP_editOpen: M4VSS3GPP_editCheckClipCompatibility \
-                        (%d) returns 0x%x!", i, err);
-                return err;
-            }
-        } else {
-            pC->pClipList[i].ClipProperties.bAudioIsCompatibleWithMasterClip =
-             M4OSA_FALSE;
-        }
-    }
-    /* Search audio tracks that cannot be edited :
-    *   - delete all audio effects for the clip
-    *   - if master clip is editable let the transition
-    (bad track will be replaced later with silence)
-    *   - if master clip is not editable switch to a dummy transition (only copy/paste) */
-    for ( i = 0; i < pC->uiClipNumber; i++ )
-    {
-        if( M4OSA_FALSE == pC->pClipList[i].ClipProperties.bAudioIsEditable )
-        {
-            M4OSA_UInt8 uiFx;
-
-            for ( uiFx = 0; uiFx < pC->nbEffects; uiFx++ )
-            {
-                pC->pEffectsList[uiFx].AudioEffectType
-                    = M4VSS3GPP_kAudioEffectType_None;
-            }
-
-            if( ( i < (pC->uiClipNumber - 1))
-                && (M4OSA_NULL != pSettings->pTransitionList[i])
-                && (M4OSA_FALSE == pC->pClipList[pSettings->
-                uiMasterClip].ClipProperties.bAudioIsEditable) )
-            {
-                pSettings->pTransitionList[i]->AudioTransitionType
-                    = M4VSS3GPP_kAudioTransitionType_None;
-            }
-        }
-    }
-
-    /**
-    * We add a transition of duration 0 at the end of the last clip.
-    * It will suppress a whole bunch a test latter in the processing... */
-    pC->pTransitionList = (M4VSS3GPP_TransitionSettings
-        *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_TransitionSettings)
-        * (pC->uiClipNumber), M4VSS3GPP, (M4OSA_Char *)"pC->pTransitionList");
-
-    if( M4OSA_NULL == pC->pTransitionList )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_editOpen: unable to allocate pC->Settings.pTransitionList,\
-            returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-    /**< copy transition settings */
-    for ( i = 0; i < (pSettings->uiClipNumber - 1); i++ )
-    {
-        memcpy((void *) &(pC->pTransitionList[i]),
-            (void *)pSettings->pTransitionList[i],
-            sizeof(M4VSS3GPP_TransitionSettings));
-    }
-
-    /**< We fill the last "dummy" transition */
-    pC->pTransitionList[pC->uiClipNumber - 1].uiTransitionDuration = 0;
-    pC->pTransitionList[pC->uiClipNumber
-        - 1].VideoTransitionType = M4VSS3GPP_kVideoTransitionType_None;
-    pC->pTransitionList[pC->uiClipNumber
-        - 1].AudioTransitionType = M4VSS3GPP_kAudioTransitionType_None;
-
-    /**
-    * Avoid weird clip settings */
-    for ( i = 0; i < pSettings->uiClipNumber; i++ )
-    {
-        if (pC->pClipList[i].FileType !=M4VIDEOEDITING_kFileType_ARGB8888) {
-            err = M4VSS3GPP_intClipSettingsSanityCheck(&pC->pClipList[i]);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_editOpen: M4VSS3GPP_intClipSettingsSanityCheck returns 0x%x!",
-                    err);
-                return err;
-            }
-        }
-    }
-
-    for ( i = 0; i < (pSettings->uiClipNumber - 1); i++ )
-    {
-        if (pC->pTransitionList[i].uiTransitionDuration != 0) {
-             if (pC->pClipList[i].FileType == M4VIDEOEDITING_kFileType_ARGB8888) {
-                 pC->pClipList[i].uiBeginCutTime = 0;
-                 pC->pClipList[i].uiEndCutTime =
-                     pC->pTransitionList[i].uiTransitionDuration;
-             }
-
-             if (pC->pClipList[i+1].FileType == M4VIDEOEDITING_kFileType_ARGB8888) {
-                 pC->pClipList[i+1].uiBeginCutTime = 0;
-                 pC->pClipList[i+1].uiEndCutTime =
-                     pC->pTransitionList[i].uiTransitionDuration;
-             }
-        } else {
-
-             if (pC->pClipList[i].FileType == M4VIDEOEDITING_kFileType_ARGB8888) {
-                 pC->pClipList[i].uiEndCutTime =
-                     pC->pClipList[i].uiEndCutTime - pC->pClipList[i].uiBeginCutTime;
-                 pC->pClipList[i].uiBeginCutTime = 0;
-             }
-
-             if (pC->pClipList[i+1].FileType == M4VIDEOEDITING_kFileType_ARGB8888) {
-                 pC->pClipList[i+1].uiEndCutTime =
-                     pC->pClipList[i+1].uiEndCutTime - pC->pClipList[i+1].uiBeginCutTime;
-                 pC->pClipList[i+1].uiBeginCutTime = 0;
-             }
-
-        }
-
-        /**
-        * Maximum transition duration between clip n and clip n+1 is the duration
-        * of the shortest clip */
-        if( 0 == pC->pClipList[i].uiEndCutTime )
-        {
-            uiC1duration = pC->pClipList[i].ClipProperties.uiClipVideoDuration;
-        }
-        else
-        {
-            /**< duration of clip n is the end cut time */
-            uiC1duration = pC->pClipList[i].uiEndCutTime;
-        }
-
-        /**< Substract begin cut */
-        uiC1duration -= pC->pClipList[i].uiBeginCutTime;
-
-        /**< Check that the transition is shorter than clip n */
-        if( pC->pTransitionList[i].uiTransitionDuration > uiC1duration )
-        {
-            pC->pTransitionList[i].uiTransitionDuration = uiC1duration - 1;
-        }
-
-        if( 0 == pC->pClipList[i + 1].uiEndCutTime )
-        {
-            uiC2duration =
-                pC->pClipList[i + 1].ClipProperties.uiClipVideoDuration;
-        }
-        else
-        {
-            /**< duration of clip n+1 is the end cut time */
-            uiC2duration = pC->pClipList[i + 1].uiEndCutTime;
-        }
-
-        /**< Substract begin cut */
-        uiC2duration -= pC->pClipList[i + 1].uiBeginCutTime;
-
-        /**< Check that the transition is shorter than clip n+1 */
-        if( pC->pTransitionList[i].uiTransitionDuration > uiC2duration )
-        {
-            pC->pTransitionList[i].uiTransitionDuration = uiC2duration - 1;
-        }
-
-        /**
-        * Avoid weird transition settings */
-        err =
-            M4VSS3GPP_intTransitionSettingsSanityCheck(&pC->pTransitionList[i]);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editOpen: M4VSS3GPP_intClipSettingsSanityCheck returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Check that two transitions are not overlapping
-          (no overlapping possible for first clip) */
-        if( i > 0 )
-        {
-            /**
-            * There is a transition overlap if the sum of the duration of
-              two consecutive transitions
-            * is higher than the duration of the clip in-between. */
-            if( ( pC->pTransitionList[i - 1].uiTransitionDuration
-                + pC->pTransitionList[i].uiTransitionDuration) >= uiC1duration )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_editOpen: Overlapping transitions on clip %d,\
-                    returning M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS",
-                    i);
-                return M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS;
-            }
-        }
-    }
-
-    /**
-    * Output clip duration */
-    for ( i = 0; i < pC->uiClipNumber; i++ )
-    {
-        /**
-        * Compute the sum of the clip duration */
-        if( 0 == pC->pClipList[i].uiEndCutTime )
-        {
-            pC->ewc.iOutputDuration +=
-                pC->
-                pClipList[
-                    i].ClipProperties.
-                        uiClipVideoDuration; /* Only video track duration is important to
-                                             avoid deviation if audio track is longer */
-        }
-        else
-        {
-            pC->ewc.iOutputDuration +=
-                pC->pClipList[i].uiEndCutTime; /**< Add end cut */
-        }
-
-        pC->ewc.iOutputDuration -=
-            pC->pClipList[i].uiBeginCutTime; /**< Remove begin cut */
-
-        /**
-        * Remove the duration of the transition (it is counted twice) */
-        pC->ewc.iOutputDuration -= pC->pTransitionList[i].uiTransitionDuration;
-    }
-
-    /* Get video properties from output properties */
-
-    /* Get output width and height */
-    switch(pC->xVSS.outputVideoSize) {
-        case M4VIDEOEDITING_kSQCIF:
-            pC->ewc.uiVideoWidth = 128;
-            pC->ewc.uiVideoHeight = 96;
-            break;
-        case M4VIDEOEDITING_kQQVGA:
-            pC->ewc.uiVideoWidth = 160;
-            pC->ewc.uiVideoHeight = 120;
-            break;
-        case M4VIDEOEDITING_kQCIF:
-            pC->ewc.uiVideoWidth = 176;
-            pC->ewc.uiVideoHeight = 144;
-            break;
-        case M4VIDEOEDITING_kQVGA:
-            pC->ewc.uiVideoWidth = 320;
-            pC->ewc.uiVideoHeight = 240;
-            break;
-        case M4VIDEOEDITING_kCIF:
-            pC->ewc.uiVideoWidth = 352;
-            pC->ewc.uiVideoHeight = 288;
-            break;
-        case M4VIDEOEDITING_kVGA:
-            pC->ewc.uiVideoWidth = 640;
-            pC->ewc.uiVideoHeight = 480;
-            break;
-            /* +PR LV5807 */
-        case M4VIDEOEDITING_kWVGA:
-            pC->ewc.uiVideoWidth = 800;
-            pC->ewc.uiVideoHeight = 480;
-            break;
-        case M4VIDEOEDITING_kNTSC:
-            pC->ewc.uiVideoWidth = 720;
-            pC->ewc.uiVideoHeight = 480;
-            break;
-            /* -PR LV5807 */
-            /* +CR Google */
-        case M4VIDEOEDITING_k640_360:
-            pC->ewc.uiVideoWidth = 640;
-            pC->ewc.uiVideoHeight = 360;
-            break;
-
-        case M4VIDEOEDITING_k854_480:
-            pC->ewc.uiVideoWidth = M4ENCODER_854_480_Width;
-            pC->ewc.uiVideoHeight = 480;
-            break;
-
-        case M4VIDEOEDITING_k1280_720:
-            pC->ewc.uiVideoWidth = 1280;
-            pC->ewc.uiVideoHeight = 720;
-            break;
-        case M4VIDEOEDITING_k1080_720:
-            pC->ewc.uiVideoWidth = M4ENCODER_1080_720_Width;
-
-            pC->ewc.uiVideoHeight = 720;
-            break;
-        case M4VIDEOEDITING_k960_720:
-            pC->ewc.uiVideoWidth = 960;
-            pC->ewc.uiVideoHeight = 720;
-            break;
-        case M4VIDEOEDITING_k1920_1080:
-            pC->ewc.uiVideoWidth = 1920;
-            pC->ewc.uiVideoHeight = 1088; // need to be multiples of 16
-            break;
-
-        default: /* If output video size is not given, we take QCIF size */
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editOpen: no output video size given, default to QCIF!");
-            pC->ewc.uiVideoWidth = 176;
-            pC->ewc.uiVideoHeight = 144;
-            pC->xVSS.outputVideoSize = M4VIDEOEDITING_kQCIF;
-            break;
-    }
-
-    pC->ewc.uiVideoTimeScale        = 30;
-    pC->ewc.bVideoDataPartitioning  = 0;
-    /* Set output video profile and level */
-    pC->ewc.outputVideoProfile = pC->xVSS.outputVideoProfile;
-    pC->ewc.outputVideoLevel = pC->xVSS.outputVideoLevel;
-
-    switch(pC->xVSS.outputVideoFormat) {
-        case M4VIDEOEDITING_kH263:
-            pC->ewc.VideoStreamType = M4SYS_kH263;
-            break;
-        case M4VIDEOEDITING_kMPEG4:
-            pC->ewc.VideoStreamType = M4SYS_kMPEG_4;
-            break;
-        case M4VIDEOEDITING_kH264:
-            pC->ewc.VideoStreamType = M4SYS_kH264;
-            break;
-        default:
-            pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
-            break;
-    }
-
-    /**
-    * Copy the audio properties of the master clip to the output properties */
-    pC->ewc.uiNbChannels =
-        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiNbChannels;
-    pC->ewc.uiAudioBitrate =
-        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiAudioBitrate;
-    pC->ewc.uiSamplingFrequency = pC->pClipList[pSettings->
-        uiMasterClip].ClipProperties.uiSamplingFrequency;
-    pC->ewc.uiSilencePcmSize =
-        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiDecodedPcmSize;
-    pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
-
-    switch( pC->pClipList[pSettings->uiMasterClip].ClipProperties.AudioStreamType )
-    {
-        case M4VIDEOEDITING_kAMR_NB:
-            pC->ewc.AudioStreamType = M4SYS_kAMR;
-            pC->ewc.pSilenceFrameData =
-                (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
-            pC->ewc.uiSilenceFrameSize =
-                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
-            pC->ewc.iSilenceFrameDuration =
-                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
-            pC->bSupportSilence = M4OSA_TRUE;
-            break;
-
-        case M4VIDEOEDITING_kAAC:
-        case M4VIDEOEDITING_kAACplus:
-        case M4VIDEOEDITING_keAACplus:
-            pC->ewc.AudioStreamType = M4SYS_kAAC;
-
-            if( pC->ewc.uiNbChannels == 1 )
-            {
-                pC->ewc.pSilenceFrameData =
-                    (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
-                pC->ewc.uiSilenceFrameSize = M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
-                pC->bSupportSilence = M4OSA_TRUE;
-            }
-            else
-            {
-                pC->ewc.pSilenceFrameData =
-                    (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
-                pC->ewc.uiSilenceFrameSize =
-                    M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
-                pC->bSupportSilence = M4OSA_TRUE;
-            }
-            pC->ewc.iSilenceFrameDuration =
-                1024; /* AAC is always 1024/Freq sample duration */
-            break;
-
-        case M4VIDEOEDITING_kMP3:
-            pC->ewc.AudioStreamType = M4SYS_kMP3;
-            pC->ewc.pSilenceFrameData = M4OSA_NULL;
-            pC->ewc.uiSilenceFrameSize = 0;
-            pC->ewc.iSilenceFrameDuration = 0;
-            /* Special case, mp3 core reader return a time in ms */
-            pC->ewc.scale_audio = 1.0;
-            break;
-
-        case M4VIDEOEDITING_kEVRC:
-            pC->ewc.AudioStreamType = M4SYS_kEVRC;
-            pC->ewc.pSilenceFrameData = M4OSA_NULL;
-            pC->ewc.uiSilenceFrameSize = 0;
-            pC->ewc.iSilenceFrameDuration = 160; /* EVRC frames are 20 ms at 8000 Hz
-                                             (makes it easier to factorize amr and evrc code) */
-            break;
-
-        default:
-            pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
-            break;
-    }
-
-    for (i=0; i<pC->uiClipNumber; i++) {
-        if (pC->pClipList[i].bTranscodingRequired == M4OSA_FALSE) {
-            /** If not transcoded in Analysis phase, check
-             * if transcoding required now
-             */
-            if ((pC->pClipList[i].ClipProperties.VideoStreamType !=
-                  pC->xVSS.outputVideoFormat)||
-                  (pC->pClipList[i].ClipProperties.uiVideoWidth !=
-                   pC->ewc.uiVideoWidth) ||
-                  (pC->pClipList[i].ClipProperties.uiVideoHeight !=
-                   pC->ewc.uiVideoHeight) ||
-                  (pC->pClipList[i].ClipProperties.VideoStreamType ==
-                   M4VIDEOEDITING_kH264) ||
-                  (pC->pClipList[i].ClipProperties.VideoStreamType ==
-                   M4VIDEOEDITING_kMPEG4 &&
-                   pC->pClipList[i].ClipProperties.uiVideoTimeScale !=
-                    pC->ewc.uiVideoTimeScale)) {
-                pC->pClipList[i].bTranscodingRequired = M4OSA_TRUE;
-            }
-        } else {
-            /** If bTranscodingRequired is true, it means the clip has
-             * been transcoded in Analysis phase.
-             */
-            pC->pClipList[i].bTranscodingRequired = M4OSA_FALSE;
-        }
-    }
-    /**
-    * We produce a 3gpp file, unless it is mp3 */
-    if( M4VIDEOEDITING_kMP3 == pC->
-        pClipList[pSettings->uiMasterClip].ClipProperties.AudioStreamType )
-        outputFileType = M4VIDEOEDITING_kFileType_MP3;
-    else
-        outputFileType = M4VIDEOEDITING_kFileType_3GPP;
-
-    /**
-    * Beware, a null duration would lead to a divide by zero error (better safe than sorry...) */
-    if( 0 == pC->ewc.iOutputDuration )
-    {
-        pC->ewc.iOutputDuration = 1;
-    }
-
-    /**
-    * Open first clip */
-    pC->uiCurrentClip = 0;
-
-    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-    pC->ewc.dInputVidCts  = 0.0;
-    pC->ewc.dOutputVidCts = 0.0;
-    pC->ewc.dATo = 0.0;
-
-    err = M4VSS3GPP_intSwitchToNextClip(pC);
-    /* RC: to know when a file has been processed */
-    if( M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_editOpen: M4VSS3GPP_intSwitchToNextClip() returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    * Do the video stuff in 3GPP Audio/Video case */
-    if( M4VIDEOEDITING_kFileType_3GPP == outputFileType )
-    {
-        /**
-        * Compute the Decoder Specific Info for the output video and audio streams */
-        err = M4VSS3GPP_intComputeOutputVideoAndAudioDsi(pC,
-            pSettings->uiMasterClip);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editOpen: M4VSS3GPP_intComputeOutputVideoAndAudioDsi() returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Compute the time increment for the transition file */
-        switch( pSettings->videoFrameRate )
-        {
-            case M4VIDEOEDITING_k5_FPS:
-                pC->dOutputFrameDuration = 1000.0 / 5.0;
-                break;
-
-            case M4VIDEOEDITING_k7_5_FPS:
-                pC->dOutputFrameDuration = 1000.0 / 7.5;
-                break;
-
-            case M4VIDEOEDITING_k10_FPS:
-                pC->dOutputFrameDuration = 1000.0 / 10.0;
-                break;
-
-            case M4VIDEOEDITING_k12_5_FPS:
-                pC->dOutputFrameDuration = 1000.0 / 12.5;
-                break;
-
-            case M4VIDEOEDITING_k15_FPS:
-                pC->dOutputFrameDuration = 1000.0 / 15.0;
-                break;
-
-            case M4VIDEOEDITING_k20_FPS:
-                pC->dOutputFrameDuration = 1000.0 / 20.0;
-                break;
-
-            case M4VIDEOEDITING_k25_FPS:
-                pC->dOutputFrameDuration = 1000.0 / 25.0;
-                break;
-
-            case M4VIDEOEDITING_k30_FPS:
-                pC->dOutputFrameDuration = 1000.0 / 30.0;
-                break;
-
-            default:
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_editOpen(): invalid videoFrameRate (0x%x),\
-                    returning M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE",
-                    pSettings->videoFrameRate);
-                return M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE;
-        }
-
-        if( M4SYS_kMPEG_4 == pC->ewc.VideoStreamType )
-        {
-            M4OSA_UInt32 uiAlpha;
-            /**
-            * MPEG-4 case.
-            * Time scale of the transition encoder must be the same than the
-            * timescale of the input files.
-            * So the frame duration must be compatible with this time scale,
-            * but without beeing too short.
-            * For that, we must compute alpha (integer) so that:
-            *             (alpha x 1000)/EncoderTimeScale > MinFrameDuration
-            **/
-
-            uiAlpha = (M4OSA_UInt32)(( pC->dOutputFrameDuration
-                * pC->ewc.uiVideoTimeScale) / 1000.0 + 0.5);
-
-            if( uiAlpha > 0 )
-            {
-                pC->dOutputFrameDuration =
-                    ( uiAlpha * 1000.0) / pC->ewc.uiVideoTimeScale;
-            }
-        }
-        else if( M4SYS_kH263 == pC->ewc.VideoStreamType )
-        {
-            switch( pSettings->videoFrameRate )
-            {
-                case M4VIDEOEDITING_k12_5_FPS:
-                case M4VIDEOEDITING_k20_FPS:
-                case M4VIDEOEDITING_k25_FPS:
-                    M4OSA_TRACE1_0(
-                        "M4VSS3GPP_editOpen(): invalid videoFrameRate for H263,\
-                        returning M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE");
-                    return M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE;
-               default:
-                  break;
-            }
-        }
-    }
-
-    /**
-    * Create the MP3 output file */
-    if( M4VIDEOEDITING_kFileType_MP3 == outputFileType )
-    {
-        M4READER_Buffer mp3tagBuffer;
-        err = M4VSS3GPP_intCreateMP3OutputFile(pC, pSettings->pOutputFile);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editOpen: M4VSS3GPP_intCreateMP3OutputFile returns 0x%x",
-                err);
-            return err;
-        }
-
-        /* The ID3v2 tag could be at any place in the mp3 file                             */
-        /* The mp3 reader only checks few bytes in the beginning of
-           stream to look for a ID3v2 tag  */
-        /* It means that if the ID3v2 tag is not at the beginning of the file the reader do
-        as there is no these metadata */
-
-        /* Retrieve the data of the ID3v2 Tag */
-        err = pC->pC1->ShellAPI.m_pReader->m_pFctGetOption(
-            pC->pC1->pReaderContext, M4READER_kOptionID_Mp3Id3v2Tag,
-            (M4OSA_DataOption) &mp3tagBuffer);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1("M4VSS3GPP_editOpen: M4MP3R_getOption returns 0x%x",
-                err);
-            return err;
-        }
-
-        /* Write the data of the ID3v2 Tag in the output file */
-        if( 0 != mp3tagBuffer.m_uiBufferSize )
-        {
-            err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
-                (M4OSA_MemAddr8)mp3tagBuffer.m_pData, mp3tagBuffer.m_uiBufferSize);
-
-            /**
-            * Free before the error checking anyway */
-            free(mp3tagBuffer.m_pData);
-
-            /**
-            * Error checking */
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_editOpen: WriteData(ID3v2Tag) returns 0x%x",
-                    err);
-                return err;
-            }
-
-            mp3tagBuffer.m_uiBufferSize = 0;
-            mp3tagBuffer.m_pData = M4OSA_NULL;
-        }
-    }
-    /**
-    * Create the 3GPP output file */
-    else if( M4VIDEOEDITING_kFileType_3GPP == outputFileType )
-    {
-        pC->ewc.uiVideoBitrate = pSettings->xVSS.outputVideoBitrate;
-
-        /**
-        * 11/12/2008 CR3283 MMS use case in VideoArtist: Set max output file size if needed */
-        if( pC->bIsMMS == M4OSA_TRUE )
-        {
-            err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
-                pC->pOsaFileWritPtr, pSettings->pOutputFile,
-                pC->pOsaFileReadPtr, pSettings->pTemporaryFile,
-                pSettings->xVSS.outputFileSize);
-        }
-        else
-        {
-            err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
-                pC->pOsaFileWritPtr, pSettings->pOutputFile,
-                pC->pOsaFileReadPtr, pSettings->pTemporaryFile, 0);
-        }
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editOpen: M4VSS3GPP_intCreate3GPPOutputFile returns 0x%x",
-                err);
-            return err;
-        }
-    }
-    /**
-    * Default error case */
-    else
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_editOpen: invalid outputFileType = 0x%x,\
-            returning M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR",
-            outputFileType);
-        return
-            M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR; /**< this is an internal error code
-                                                  unknown to the user */
-    }
-
-    /**
-    * Initialize state */
-    if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
-    {
-        /**
-        * In the MP3 case we use a special audio state */
-        pC->State = M4VSS3GPP_kEditState_MP3_JUMP;
-    }
-    else
-    {
-        /**
-        * We start with the video processing */
-        pC->State = M4VSS3GPP_kEditState_VIDEO;
-    }
-
-    /**
-    * Initialize state.
-    * The first clip is independant to the "virtual previous clips",
-    * so it's like if we where in Read/Write mode before it. */
-    pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
-    pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_editOpen(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editStep()
- * @brief    Perform one step of editing.
- * @note
- * @param     pContext           (IN) VSS 3GPP edit context
- * @param     pProgress          (OUT) Progress percentage (0 to 100) of the editing operation
- * @return    M4NO_ERROR:        No error
- * @return    M4ERR_PARAMETER:   pContext is M4OSA_NULL (debug only)
- * @return    M4ERR_STATE:       VSS 3GPP is not in an appropriate state for this
- *                               function to be called
- * @return    M4VSS3GPP_WAR_EDITING_DONE: Edition is done, user should now call
- *            M4VSS3GPP_editClose()
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editStep( M4VSS3GPP_EditContext pContext,
-                             M4OSA_UInt8 *pProgress )
-{
-    M4VSS3GPP_InternalEditContext *pC =
-        (M4VSS3GPP_InternalEditContext *)pContext;
-    M4OSA_UInt32 uiProgressAudio, uiProgressVideo, uiProgress;
-    M4OSA_ERR err;
-
-    M4OSA_TRACE3_1("M4VSS3GPP_editStep called with pContext=0x%x", pContext);
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4VSS3GPP_editStep: pContext is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pProgress), M4ERR_PARAMETER,
-        "M4VSS3GPP_editStep: pProgress is M4OSA_NULL");
-
-    /**
-    * Check state automaton and select correct processing */
-    switch( pC->State )
-    {
-        case M4VSS3GPP_kEditState_VIDEO:
-            err = M4VSS3GPP_intEditStepVideo(pC);
-            break;
-
-        case M4VSS3GPP_kEditState_AUDIO:
-            err = M4VSS3GPP_intEditStepAudio(pC);
-            break;
-
-        case M4VSS3GPP_kEditState_MP3:
-            err = M4VSS3GPP_intEditStepMP3(pC);
-            break;
-
-        case M4VSS3GPP_kEditState_MP3_JUMP:
-            err = M4VSS3GPP_intEditJumpMP3(pC);
-            break;
-
-        default:
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_editStep(): invalid internal state (0x%x), returning M4ERR_STATE");
-            return M4ERR_STATE;
-    }
-
-    /**
-    * Compute progress.
-    * We do the computing with 32bits precision because in some (very) extreme case, we may get
-    * values higher than 256 (...) */
-    uiProgressAudio =
-        ( (M4OSA_UInt32)(pC->ewc.dATo * 100)) / pC->ewc.iOutputDuration;
-    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-    uiProgressVideo = ((M4OSA_UInt32)(pC->ewc.dInputVidCts * 100)) / pC->ewc.iOutputDuration;
-
-    uiProgress = uiProgressAudio + uiProgressVideo;
-
-    if( ( pC->ewc.AudioStreamType != M4SYS_kAudioUnknown)
-        && (pC->ewc.VideoStreamType != M4SYS_kVideoUnknown) )
-        uiProgress /= 2;
-
-    /**
-    * Sanity check */
-    if( uiProgress > 100 )
-    {
-        *pProgress = 100;
-    }
-    else
-    {
-        *pProgress = (M4OSA_UInt8)uiProgress;
-    }
-
-    /**
-    * Return the error */
-    M4OSA_TRACE3_1("M4VSS3GPP_editStep(): returning 0x%x", err);
-    return err;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editClose()
- * @brief    Finish the VSS edit operation.
- * @note    The output 3GPP file is ready to be played after this call
- * @param    pContext           (IN) VSS edit context
- * @return    M4NO_ERROR:       No error
- * @return    M4ERR_PARAMETER:  pContext is M4OSA_NULL (debug only)
- * @return    M4ERR_STATE:      VSS is not in an appropriate state for this function to be called
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editClose( M4VSS3GPP_EditContext pContext )
-{
-    M4VSS3GPP_InternalEditContext *pC =
-        (M4VSS3GPP_InternalEditContext *)pContext;
-    M4OSA_ERR err;
-    M4OSA_ERR returnedError = M4NO_ERROR;
-    M4OSA_UInt32 lastCTS;
-
-    M4OSA_TRACE3_1("M4VSS3GPP_editClose called with pContext=0x%x", pContext);
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4VSS3GPP_editClose: pContext is M4OSA_NULL");
-
-    /**
-    * Check state automaton.
-    * In "theory", we should not authorize closing if we are in CREATED state.
-    * But in practice, in case the opening failed, it may have been partially done.
-    * In that case we have to free some opened ressources by calling Close. */
-    if( M4VSS3GPP_kEditState_CLOSED == pC->State )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_editClose: Wrong state (0x%x), returning M4ERR_STATE",
-            pC->State);
-        return M4ERR_STATE;
-    }
-
-    /**
-    * There may be an encoder to destroy */
-    err = M4VSS3GPP_intDestroyVideoEncoder(pC);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_editClose: M4VSS3GPP_editDestroyVideoEncoder() returns 0x%x!",
-            err);
-        /**< We do not return the error here because we still have stuff to free */
-        returnedError = err;
-    }
-
-    /**
-    * Close the output file */
-    if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
-    {
-        /**
-        * MP3 case */
-        if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
-        {
-            err = pC->pOsaFileWritPtr->closeWrite(pC->ewc.p3gpWriterContext);
-            pC->ewc.p3gpWriterContext = M4OSA_NULL;
-        }
-    }
-    else
-    {
-        /**
-        * Close the output 3GPP clip, if it has been opened */
-        if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
-        {
-            /* Update last Video CTS */
-            lastCTS = pC->ewc.iOutputDuration;
-
-            err = pC->ShellAPI.pWriterGlobalFcts->pFctSetOption(
-                pC->ewc.p3gpWriterContext,
-                (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_editClose: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
-                    err);
-            }
-
-            err = pC->ShellAPI.pWriterGlobalFcts->pFctCloseWrite(
-                pC->ewc.p3gpWriterContext);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_editClose: pFctCloseWrite(OUT) returns 0x%x!",
-                    err);
-                /**< We do not return the error here because we still have stuff to free */
-                if( M4NO_ERROR
-                    == returnedError ) /**< we return the first error that happened */
-                {
-                    returnedError = err;
-                }
-            }
-            pC->ewc.p3gpWriterContext = M4OSA_NULL;
-        }
-    }
-
-    /**
-    * Free the output video DSI, if it has been created */
-    if( M4OSA_NULL != pC->ewc.pVideoOutputDsi )
-    {
-        free(pC->ewc.pVideoOutputDsi);
-        pC->ewc.pVideoOutputDsi = M4OSA_NULL;
-    }
-
-    /**
-    * Free the output audio DSI, if it has been created */
-    if( M4OSA_NULL != pC->ewc.pAudioOutputDsi )
-    {
-        free(pC->ewc.pAudioOutputDsi);
-        pC->ewc.pAudioOutputDsi = M4OSA_NULL;
-    }
-
-    /**
-    * Close clip1, if needed */
-    if( M4OSA_NULL != pC->pC1 )
-    {
-        err = M4VSS3GPP_intClipCleanUp(pC->pC1);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editClose: M4VSS3GPP_intClipCleanUp(C1) returns 0x%x!",
-                err);
-            /**< We do not return the error here because we still have stuff to free */
-            if( M4NO_ERROR
-                == returnedError ) /**< we return the first error that happened */
-            {
-                returnedError = err;
-            }
-        }
-        pC->pC1 = M4OSA_NULL;
-    }
-
-    /**
-    * Close clip2, if needed */
-    if( M4OSA_NULL != pC->pC2 )
-    {
-        err = M4VSS3GPP_intClipCleanUp(pC->pC2);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editClose: M4VSS3GPP_intClipCleanUp(C2) returns 0x%x!",
-                err);
-            /**< We do not return the error here because we still have stuff to free */
-            if( M4NO_ERROR
-                == returnedError ) /**< we return the first error that happened */
-            {
-                returnedError = err;
-            }
-        }
-        pC->pC2 = M4OSA_NULL;
-    }
-
-    /**
-    * Free the temporary YUV planes */
-    if( M4OSA_NULL != pC->yuv1[0].pac_data )
-    {
-        free(pC->yuv1[0].pac_data);
-        pC->yuv1[0].pac_data = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->yuv1[1].pac_data )
-    {
-        free(pC->yuv1[1].pac_data);
-        pC->yuv1[1].pac_data = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->yuv1[2].pac_data )
-    {
-        free(pC->yuv1[2].pac_data);
-        pC->yuv1[2].pac_data = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->yuv2[0].pac_data )
-    {
-        free(pC->yuv2[0].pac_data);
-        pC->yuv2[0].pac_data = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->yuv2[1].pac_data )
-    {
-        free(pC->yuv2[1].pac_data);
-        pC->yuv2[1].pac_data = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->yuv2[2].pac_data )
-    {
-        free(pC->yuv2[2].pac_data);
-        pC->yuv2[2].pac_data = M4OSA_NULL;
-    }
-
-    /* RC */
-    if( M4OSA_NULL != pC->yuv3[0].pac_data )
-    {
-        free(pC->yuv3[0].pac_data);
-        pC->yuv3[0].pac_data = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->yuv3[1].pac_data )
-    {
-        free(pC->yuv3[1].pac_data);
-        pC->yuv3[1].pac_data = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->yuv3[2].pac_data )
-    {
-        free(pC->yuv3[2].pac_data);
-        pC->yuv3[2].pac_data = M4OSA_NULL;
-    }
-
-    /* RC */
-    if( M4OSA_NULL != pC->yuv4[0].pac_data )
-    {
-        free(pC->yuv4[0].pac_data);
-        pC->yuv4[0].pac_data = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->yuv4[1].pac_data )
-    {
-        free(pC->yuv4[1].pac_data);
-        pC->yuv4[1].pac_data = M4OSA_NULL;
-    }
-
-    if( M4OSA_NULL != pC->yuv4[2].pac_data )
-    {
-        free(pC->yuv4[2].pac_data);
-        pC->yuv4[2].pac_data = M4OSA_NULL;
-    }
-
-    /**
-    * RC Free effects list */
-    if( pC->pEffectsList != M4OSA_NULL )
-    {
-        free(pC->pEffectsList);
-        pC->pEffectsList = M4OSA_NULL;
-    }
-
-    /**
-    * RC Free active effects list */
-    if( pC->pActiveEffectsList != M4OSA_NULL )
-    {
-        free(pC->pActiveEffectsList);
-        pC->pActiveEffectsList = M4OSA_NULL;
-    }
-    /**
-     *  Free active effects list */
-    if(pC->pActiveEffectsList1 != M4OSA_NULL)
-    {
-        free(pC->pActiveEffectsList1);
-        pC->pActiveEffectsList1 = M4OSA_NULL;
-    }
-    if(pC->m_air_context != M4OSA_NULL) {
-        free(pC->m_air_context);
-        pC->m_air_context = M4OSA_NULL;
-    }
-    /**
-    * Update state automaton */
-    pC->State = M4VSS3GPP_kEditState_CLOSED;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_1("M4VSS3GPP_editClose(): returning 0x%x", returnedError);
-    return returnedError;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_editCleanUp()
- * @brief    Free all resources used by the VSS edit operation.
- * @note    The context is no more valid after this call
- * @param    pContext            (IN) VSS edit context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_editCleanUp( M4VSS3GPP_EditContext pContext )
-{
-    M4OSA_ERR err;
-    M4VSS3GPP_InternalEditContext *pC =
-        (M4VSS3GPP_InternalEditContext *)pContext;
-
-    M4OSA_TRACE3_1("M4VSS3GPP_editCleanUp called with pContext=0x%x", pContext);
-
-    /**
-    *    Check input parameter */
-    if( M4OSA_NULL == pContext )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_editCleanUp(): pContext is M4OSA_NULL, returning M4ERR_PARAMETER");
-        return M4ERR_PARAMETER;
-    }
-
-    /**
-    * Close, if needed.
-    * In "theory", we should not close if we are in CREATED state.
-    * But in practice, in case the opening failed, it may have been partially done.
-    * In that case we have to free some opened ressources by calling Close. */
-    if( M4VSS3GPP_kEditState_CLOSED != pC->State )
-    {
-        M4OSA_TRACE3_0("M4VSS3GPP_editCleanUp(): calling M4VSS3GPP_editClose");
-        err = M4VSS3GPP_editClose(pC);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editCleanUp(): M4VSS3GPP_editClose returns 0x%x",
-                err);
-        }
-    }
-
-    /**
-    * Free the video encoder dummy AU */
-    if( M4OSA_NULL != pC->ewc.pDummyAuBuffer )
-    {
-        free(pC->ewc.pDummyAuBuffer);
-        pC->ewc.pDummyAuBuffer = M4OSA_NULL;
-    }
-
-    /**
-    * Free the Audio encoder context */
-    if( M4OSA_NULL != pC->ewc.pAudioEncCtxt )
-    {
-        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctClose(
-            pC->ewc.pAudioEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editCleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctCleanUp(
-            pC->ewc.pAudioEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_editCleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        pC->ewc.pAudioEncCtxt = M4OSA_NULL;
-    }
-
-    /**
-    * Free the shells interfaces */
-    M4VSS3GPP_unRegisterAllWriters(&pC->ShellAPI);
-    M4VSS3GPP_unRegisterAllEncoders(&pC->ShellAPI);
-    M4VSS3GPP_unRegisterAllReaders(&pC->ShellAPI);
-    M4VSS3GPP_unRegisterAllDecoders(&pC->ShellAPI);
-
-    /**
-    * Free the settings copied in the internal context */
-    M4VSS3GPP_intFreeSettingsList(pC);
-
-    /**
-    * Finally, Free context */
-    free(pC);
-    pC = M4OSA_NULL;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_editCleanUp(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-#ifdef WIN32
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_GetErrorMessage()
- * @brief    Return a string describing the given error code
- * @note    The input string must be already allocated (and long enough!)
- * @param    err                (IN) Error code to get the description from
- * @param    sMessage        (IN/OUT) Allocated string in which the description will be copied
- * @return    M4NO_ERROR:        Input error is from the VSS3GPP module
- * @return    M4ERR_PARAMETER:Input error is not from the VSS3GPP module
- ******************************************************************************
- */
-
-M4OSA_ERR M4VSS3GPP_GetErrorMessage( M4OSA_ERR err, M4OSA_Char *sMessage )
-{
-    switch( err )
-    {
-        case M4VSS3GPP_WAR_EDITING_DONE:
-            strcpy(sMessage, "M4VSS3GPP_WAR_EDITING_DONE");
-            break;
-
-        case M4VSS3GPP_WAR_END_OF_AUDIO_MIXING:
-            strcpy(sMessage, "M4VSS3GPP_WAR_END_OF_AUDIO_MIXING");
-            break;
-
-        case M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE:
-            strcpy(sMessage, "M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE");
-            break;
-
-        case M4VSS3GPP_ERR_INVALID_FILE_TYPE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_FILE_TYPE");
-            break;
-
-        case M4VSS3GPP_ERR_INVALID_EFFECT_KIND:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_EFFECT_KIND");
-            break;
-
-        case M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE");
-            break;
-
-        case M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE");
-            break;
-
-        case M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE");
-            break;
-
-        case M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE");
-            break;
-
-        case M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE");
-            break;
-
-        case M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL:
-            strcpy(sMessage, "M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL");
-            break;
-
-        case M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL:
-            strcpy(sMessage, "M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL");
-            break;
-
-        case M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION:
-            strcpy(sMessage, "M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION");
-            break;
-
-        case M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT:
-            strcpy(sMessage, "M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT");
-            break;
-
-        case M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS:
-            strcpy(sMessage, "M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS");
-            break;
-
-        case M4VSS3GPP_ERR_INVALID_3GPP_FILE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_3GPP_FILE");
-            break;
-
-        case M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT:
-            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT");
-            break;
-
-        case M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT:
-            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT");
-            break;
-
-        case M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED:
-            strcpy(sMessage, "M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED");
-            break;
-
-        case M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE");
-            break;
-
-        case M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE");
-            break;
-
-        case M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU");
-            break;
-
-        case M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR:
-            strcpy(sMessage, "M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
-            break;
-
-        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT:
-            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT");
-            break;
-
-        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE");
-            break;
-
-        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE");
-            break;
-
-        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC:
-            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC");
-            break;
-
-        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT:
-            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT");
-            break;
-
-        case M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE:
-            strcpy(sMessage,
-                "M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE");
-            break;
-
-        case M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE:
-            strcpy(sMessage,
-                "M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE");
-            break;
-
-        case M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
-            break;
-
-        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT");
-            break;
-
-        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE");
-            break;
-
-        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE");
-            break;
-
-        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING:
-            strcpy(sMessage,
-                "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING");
-            break;
-
-        case M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY:
-            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY");
-            break;
-
-        case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE:
-            strcpy(sMessage, "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE");
-            break;
-
-        case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS:
-            strcpy(sMessage, "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS");
-            break;
-
-        case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY:
-            strcpy(sMessage,
-                "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY");
-            break;
-
-        case M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
-            break;
-
-        case M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO:
-            strcpy(sMessage, "M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO");
-            break;
-
-        case M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION:
-            strcpy(sMessage, "M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION");
-            break;
-
-        case M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT:
-            strcpy(sMessage, "M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT");
-            break;
-
-        case M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM:
-            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM");
-            break;
-
-        case M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED:
-            strcpy(sMessage, "M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED");
-            break;
-
-        case M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK:
-            strcpy(sMessage,
-                "M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK");
-            break;
-
-        case M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED:
-            strcpy(sMessage, "M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED");
-            break;
-
-        case M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP:
-            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP");
-            break;
-
-        case M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP:
-            strcpy(sMessage, "M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP");
-            break;
-
-        case M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED:
-            strcpy(sMessage, "M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED");
-            break;
-
-        case M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE:
-            strcpy(sMessage, "M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE");
-            break;
-
-        default: /**< Not a VSS3GPP error */
-            strcpy(sMessage, "");
-            return M4ERR_PARAMETER;
-    }
-    return M4NO_ERROR;
-}
-
-#endif /* WIN32 */
-
-/********************************************************/
-/********************************************************/
-/********************************************************/
-/****************   STATIC FUNCTIONS   ******************/
-/********************************************************/
-/********************************************************/
-/********************************************************/
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck()
- * @brief    Simplify the given clip settings
- * @note    This function may modify the given structure
- * @param   pClip    (IN/OUT) Clip settings
- * @return    M4NO_ERROR:            No error
- * @return    M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL:
- ******************************************************************************
- */
-
-static M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck(
-    M4VSS3GPP_ClipSettings *pClip )
-{
-    M4OSA_UInt8 uiFx;
-    M4OSA_UInt32
-        uiClipActualDuration; /**< the clip duration once the cuts are done */
-    M4OSA_UInt32 uiDuration;
-    M4VSS3GPP_EffectSettings *pFx;
-
-    /**
-    * If begin cut is too far, return an error */
-    uiDuration = pClip->ClipProperties.uiClipDuration;
-
-    if( pClip->uiBeginCutTime > uiDuration )
-    {
-        M4OSA_TRACE1_2(
-            "M4VSS3GPP_intClipSettingsSanityCheck: %d > %d,\
-            returning M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION",
-            pClip->uiBeginCutTime, uiDuration);
-        return M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION;
-    }
-
-    /**
-    * If end cut is too far, set to zero (it means no end cut) */
-    if( pClip->uiEndCutTime > uiDuration )
-    {
-        pClip->uiEndCutTime = 0;
-    }
-
-    /**
-    * Compute actual clip duration (once cuts are done) */
-    if( 0 == pClip->uiEndCutTime )
-    {
-        /**
-        * No end cut */
-        uiClipActualDuration = uiDuration - pClip->uiBeginCutTime;
-    }
-    else
-    {
-        if( pClip->uiBeginCutTime >= pClip->uiEndCutTime )
-        {
-            M4OSA_TRACE1_2(
-                "M4VSS3GPP_intClipSettingsSanityCheck: %d > %d,\
-                returning M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT",
-                pClip->uiBeginCutTime, pClip->uiEndCutTime);
-            return M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT;
-        }
-        uiClipActualDuration = pClip->uiEndCutTime - pClip->uiBeginCutTime;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck()
- * @brief    Simplify the given transition settings
- * @note     This function may modify the given structure
- * @param    pTransition    (IN/OUT) Transition settings
- * @return    M4NO_ERROR:            No error
- * @return    M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL:
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck(
-    M4VSS3GPP_TransitionSettings *pTransition )
-{
-    /**
-    * No transition */
-    if( 0 == pTransition->uiTransitionDuration )
-    {
-        pTransition->VideoTransitionType = M4VSS3GPP_kVideoTransitionType_None;
-        pTransition->AudioTransitionType = M4VSS3GPP_kAudioTransitionType_None;
-    }
-    else if( ( M4VSS3GPP_kVideoTransitionType_None
-        == pTransition->VideoTransitionType)
-        && (M4VSS3GPP_kAudioTransitionType_None
-        == pTransition->AudioTransitionType) )
-    {
-        pTransition->uiTransitionDuration = 0;
-    }
-
-    /**
-    * Check external transition function is set */
-    if( ( pTransition->VideoTransitionType
-        >= M4VSS3GPP_kVideoTransitionType_External)
-        && (M4OSA_NULL == pTransition->ExtVideoTransitionFct) )
-    {
-        return M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL;
-    }
-
-    /**
-    * Set minimal transition duration */
-    if( ( pTransition->uiTransitionDuration > 0)
-        && (pTransition->uiTransitionDuration
-        < M4VSS3GPP_MINIMAL_TRANSITION_DURATION) )
-    {
-        pTransition->uiTransitionDuration =
-            M4VSS3GPP_MINIMAL_TRANSITION_DURATION;
-    }
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intFreeSettingsList()
- * @brief    Free the settings copied in the internal context
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-static M4OSA_Void M4VSS3GPP_intFreeSettingsList(
-    M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_UInt32 i;
-
-    /**
-    * Free the settings list */
-    if( M4OSA_NULL != pC->pClipList )
-    {
-        for ( i = 0; i < pC->uiClipNumber; i++ )
-        {
-            M4VSS3GPP_editFreeClipSettings(&(pC->pClipList[i]));
-        }
-
-        free(pC->pClipList);
-        pC->pClipList = M4OSA_NULL;
-    }
-
-    /**
-    * Free the transition list */
-    if( M4OSA_NULL != pC->pTransitionList )
-    {
-        free(pC->pTransitionList);
-        pC->pTransitionList = M4OSA_NULL;
-    }
-}
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCreateMP3OutputFile()
- * @brief        Creates and prepare the output MP file
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-static M4OSA_ERR
-M4VSS3GPP_intCreateMP3OutputFile( M4VSS3GPP_InternalEditContext *pC,
-                                 M4OSA_Void *pOutputFile )
-{
-    M4OSA_ERR err;
-
-    err =
-        pC->pOsaFileWritPtr->openWrite(&pC->ewc.p3gpWriterContext, pOutputFile,
-        M4OSA_kFileWrite);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intCreateMP3OutputFile: WriteOpen returns 0x%x!", err);
-        return err;
-    }
-
-    return M4NO_ERROR;
-}
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCreate3GPPOutputFile()
- * @brief   Creates and prepare the output MP3 file
- * @note    Creates the writer, Creates the output file, Adds the streams,
-           Readies the writing process
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-M4OSA_ERR
-M4VSS3GPP_intCreate3GPPOutputFile( M4VSS3GPP_EncodeWriteContext *pC_ewc,
-                                  M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
-                                  M4OSA_FileWriterPointer *pOsaFileWritPtr,
-                                  M4OSA_Void *pOutputFile,
-                                  M4OSA_FileReadPointer *pOsaFileReadPtr,
-                                  M4OSA_Void *pTempFile,
-                                  M4OSA_UInt32 maxOutputFileSize )
-{
-    M4OSA_ERR err;
-    M4OSA_UInt32 uiVersion;
-    M4SYS_StreamIDValue temp;
-
-    M4OSA_TRACE3_2(
-        "M4VSS3GPP_intCreate3GPPOutputFile called with pC_ewc=0x%x, pOutputFile=0x%x",
-        pC_ewc, pOutputFile);
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pC_ewc), M4ERR_PARAMETER,
-        "M4VSS3GPP_intCreate3GPPOutputFile: pC_ewc is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pOutputFile), M4ERR_PARAMETER,
-        "M4VSS3GPP_intCreate3GPPOutputFile: pOutputFile is M4OSA_NULL");
-
-    /* Set writer */
-    err =
-        M4VSS3GPP_setCurrentWriter(pC_ShellAPI, M4VIDEOEDITING_kFileType_3GPP);
-    M4ERR_CHECK_RETURN(err);
-
-    /**
-    * Create the output file */
-    err = pC_ShellAPI->pWriterGlobalFcts->pFctOpen(&pC_ewc->p3gpWriterContext,
-        pOutputFile, pOsaFileWritPtr, pTempFile, pOsaFileReadPtr);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intCreate3GPPOutputFile: pWriterGlobalFcts->pFctOpen returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    * Set the signature option of the writer */
-    err =
-        pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(pC_ewc->p3gpWriterContext,
-        M4WRITER_kEmbeddedString, (M4OSA_DataOption)"NXP-SW : VSS    ");
-
-    if( ( M4NO_ERROR != err) && (((M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
-        != err) ) /* this option may not be implemented by some writers */
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intCreate3GPPOutputFile:\
-            pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /*11/12/2008 CR3283 MMS use case for VideoArtist:
-    Set the max output file size option in the writer so that the output file will be
-    smaller than the given file size limitation*/
-    if( maxOutputFileSize > 0 )
-    {
-        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
-            pC_ewc->p3gpWriterContext,
-            M4WRITER_kMaxFileSize, &maxOutputFileSize);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCreate3GPPOutputFile:\
-                writer set option M4WRITER_kMaxFileSize returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * Set the version option of the writer */
-    uiVersion =
-        (M4VIDEOEDITING_VERSION_MAJOR * 100 + M4VIDEOEDITING_VERSION_MINOR * 10
-        + M4VIDEOEDITING_VERSION_REVISION);
-    err =
-        pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(pC_ewc->p3gpWriterContext,
-        M4WRITER_kEmbeddedVersion, (M4OSA_DataOption) &uiVersion);
-
-    if( ( M4NO_ERROR != err) && (((M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
-        != err) ) /* this option may not be implemented by some writers */
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intCreate3GPPOutputFile:\
-            pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    if( M4SYS_kVideoUnknown != pC_ewc->VideoStreamType )
-    {
-        /**
-        * Set the video stream properties */
-        pC_ewc->WriterVideoStreamInfo.height = pC_ewc->uiVideoHeight;
-        pC_ewc->WriterVideoStreamInfo.width = pC_ewc->uiVideoWidth;
-        pC_ewc->WriterVideoStreamInfo.fps =
-            0.0; /**< Not used by the shell/core writer */
-        pC_ewc->WriterVideoStreamInfo.Header.pBuf =
-            pC_ewc->pVideoOutputDsi; /**< Previously computed output DSI */
-        pC_ewc->WriterVideoStreamInfo.Header.Size = pC_ewc->
-            uiVideoOutputDsiSize; /**< Previously computed output DSI size */
-
-        pC_ewc->WriterVideoStream.streamType = pC_ewc->VideoStreamType;
-
-        switch( pC_ewc->VideoStreamType )
-        {
-            case M4SYS_kMPEG_4:
-            case M4SYS_kH263:
-            case M4SYS_kH264:
-                /**< We HAVE to put a value here... */
-                pC_ewc->WriterVideoStream.averageBitrate =
-                    pC_ewc->uiVideoBitrate;
-                pC_ewc->WriterVideoStream.maxBitrate = pC_ewc->uiVideoBitrate;
-                break;
-
-            default:
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intCreate3GPPOutputFile: unknown input video format (0x%x),\
-                    returning M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT!",
-                    pC_ewc->VideoStreamType);
-                return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
-        }
-
-        pC_ewc->WriterVideoStream.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
-        pC_ewc->WriterVideoStream.timeScale =
-            0; /**< Not used by the shell/core writer */
-        pC_ewc->WriterVideoStream.profileLevel =
-            0; /**< Not used by the shell/core writer */
-        pC_ewc->WriterVideoStream.duration =
-            0; /**< Not used by the shell/core writer */
-
-        pC_ewc->WriterVideoStream.decoderSpecificInfoSize =
-            sizeof(M4WRITER_StreamVideoInfos);
-        pC_ewc->WriterVideoStream.decoderSpecificInfo =
-            (M4OSA_MemAddr32) &(pC_ewc->WriterVideoStreamInfo);
-
-        /**
-        * Add the video stream */
-        err = pC_ShellAPI->pWriterGlobalFcts->pFctAddStream(
-            pC_ewc->p3gpWriterContext, &pC_ewc->WriterVideoStream);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCreate3GPPOutputFile:\
-                pWriterGlobalFcts->pFctAddStream(video) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Update AU properties for video stream */
-        pC_ewc->WriterVideoAU.attribute = AU_RAP;
-        pC_ewc->WriterVideoAU.CTS = 0;
-        pC_ewc->WriterVideoAU.DTS = 0;    /** Reset time */
-        pC_ewc->WriterVideoAU.frag = M4OSA_NULL;
-        pC_ewc->WriterVideoAU.nbFrag = 0; /** No fragment */
-        pC_ewc->WriterVideoAU.size = 0;
-        pC_ewc->WriterVideoAU.dataAddress = M4OSA_NULL;
-        pC_ewc->WriterVideoAU.stream = &(pC_ewc->WriterVideoStream);
-
-        /**
-        * Set the writer max video AU size */
-        pC_ewc->uiVideoMaxAuSize = (M4OSA_UInt32)(1.5F
-            *(M4OSA_Float)(pC_ewc->WriterVideoStreamInfo.width
-            * pC_ewc->WriterVideoStreamInfo.height)
-            * M4VSS3GPP_VIDEO_MIN_COMPRESSION_RATIO);
-        temp.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
-        temp.value = pC_ewc->uiVideoMaxAuSize;
-        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
-            pC_ewc->p3gpWriterContext, (M4OSA_UInt32)M4WRITER_kMaxAUSize,
-            (M4OSA_DataOption) &temp);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCreate3GPPOutputFile:\
-                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Set the writer max video chunk size */
-        temp.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
-        temp.value = (M4OSA_UInt32)(pC_ewc->uiVideoMaxAuSize \
-            * M4VSS3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO); /**< from max AU size to
-                                                                  max Chunck size */
-        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
-            pC_ewc->p3gpWriterContext,
-            (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
-            (M4OSA_DataOption) &temp);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCreate3GPPOutputFile:\
-                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-
-    if( M4SYS_kAudioUnknown != pC_ewc->AudioStreamType )
-    {
-        M4WRITER_StreamAudioInfos streamAudioInfo;
-
-        streamAudioInfo.nbSamplesPerSec = 0; /**< unused by our shell writer */
-        streamAudioInfo.nbBitsPerSample = 0; /**< unused by our shell writer */
-        streamAudioInfo.nbChannels = 1;      /**< unused by our shell writer */
-
-        if( pC_ewc->pAudioOutputDsi != M4OSA_NULL )
-        {
-            /* If we copy the stream from the input, we copy its DSI */
-            streamAudioInfo.Header.Size = pC_ewc->uiAudioOutputDsiSize;
-            streamAudioInfo.Header.pBuf = pC_ewc->pAudioOutputDsi;
-        }
-        else
-        {
-            /* Writer will put a default DSI */
-            streamAudioInfo.Header.Size = 0;
-            streamAudioInfo.Header.pBuf = M4OSA_NULL;
-        }
-
-        pC_ewc->WriterAudioStream.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
-        pC_ewc->WriterAudioStream.streamType = pC_ewc->AudioStreamType;
-        pC_ewc->WriterAudioStream.duration =
-            0; /**< Not used by the shell/core writer */
-        pC_ewc->WriterAudioStream.profileLevel =
-            0; /**< Not used by the shell/core writer */
-        pC_ewc->WriterAudioStreamInfo.nbSamplesPerSec =
-            pC_ewc->uiSamplingFrequency;
-        pC_ewc->WriterAudioStream.timeScale = pC_ewc->uiSamplingFrequency;
-        pC_ewc->WriterAudioStreamInfo.nbChannels =
-            (M4OSA_UInt16)pC_ewc->uiNbChannels;
-        pC_ewc->WriterAudioStreamInfo.nbBitsPerSample =
-            0; /**< Not used by the shell/core writer */
-
-        /**
-        * Add the audio stream */
-        switch( pC_ewc->AudioStreamType )
-        {
-            case M4SYS_kAMR:
-                pC_ewc->WriterAudioStream.averageBitrate =
-                    0; /**< It is not used by the shell, the DSI is taken into account instead */
-                pC_ewc->WriterAudioStream.maxBitrate =
-                    0; /**< Not used by the shell/core writer */
-                break;
-
-            case M4SYS_kAAC:
-                pC_ewc->WriterAudioStream.averageBitrate =
-                    pC_ewc->uiAudioBitrate;
-                pC_ewc->WriterAudioStream.maxBitrate = pC_ewc->uiAudioBitrate;
-                break;
-
-            case M4SYS_kEVRC:
-                pC_ewc->WriterAudioStream.averageBitrate =
-                    0; /**< It is not used by the shell, the DSI is taken into account instead */
-                pC_ewc->WriterAudioStream.maxBitrate =
-                    0; /**< Not used by the shell/core writer */
-                break;
-
-            case M4SYS_kMP3: /**< there can't be MP3 track in 3GPP file -> error */
-            default:
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intCreate3GPPOutputFile: unknown output audio format (0x%x),\
-                    returning M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT!",
-                    pC_ewc->AudioStreamType);
-                return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
-        }
-
-        /**
-        * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos
-        in the DSI pointer... */
-        pC_ewc->WriterAudioStream.decoderSpecificInfo =
-            (M4OSA_MemAddr32) &streamAudioInfo;
-
-        /**
-        * Link the AU and the stream */
-        pC_ewc->WriterAudioAU.stream = &(pC_ewc->WriterAudioStream);
-        pC_ewc->WriterAudioAU.dataAddress = M4OSA_NULL;
-        pC_ewc->WriterAudioAU.size = 0;
-        pC_ewc->WriterAudioAU.CTS =
-            -pC_ewc->iSilenceFrameDuration; /** Reset time */
-        pC_ewc->WriterAudioAU.DTS = 0;
-        pC_ewc->WriterAudioAU.attribute = 0;
-        pC_ewc->WriterAudioAU.nbFrag = 0; /** No fragment */
-        pC_ewc->WriterAudioAU.frag = M4OSA_NULL;
-
-        err = pC_ShellAPI->pWriterGlobalFcts->pFctAddStream(
-            pC_ewc->p3gpWriterContext, &pC_ewc->WriterAudioStream);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCreate3GPPOutputFile:\
-                pWriterGlobalFcts->pFctAddStream(audio) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Set the writer max audio AU size */
-        pC_ewc->uiAudioMaxAuSize = M4VSS3GPP_AUDIO_MAX_AU_SIZE;
-        temp.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
-        temp.value = pC_ewc->uiAudioMaxAuSize;
-        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
-            pC_ewc->p3gpWriterContext, (M4OSA_UInt32)M4WRITER_kMaxAUSize,
-            (M4OSA_DataOption) &temp);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCreate3GPPOutputFile:\
-                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, audio) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * Set the writer max audio chunck size */
-        temp.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
-        temp.value = M4VSS3GPP_AUDIO_MAX_CHUNCK_SIZE;
-        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
-            pC_ewc->p3gpWriterContext,
-            (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
-            (M4OSA_DataOption) &temp);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCreate3GPPOutputFile:\
-                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, audio) returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * All streams added, we're now ready to write */
-    err = pC_ShellAPI->pWriterGlobalFcts->pFctStartWriting(
-        pC_ewc->p3gpWriterContext);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intCreate3GPPOutputFile:\
-            pWriterGlobalFcts->pFctStartWriting() returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intCreate3GPPOutputFile(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR  M4VSS3GPP_intComputeOutputVideoAndAudioDsi()
- * @brief    Generate a H263 or MPEG-4 decoder specific info compatible with all input video
- *            tracks. Copy audio dsi from master clip.
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-static M4OSA_ERR
-M4VSS3GPP_intComputeOutputVideoAndAudioDsi( M4VSS3GPP_InternalEditContext *pC,
-                                           M4OSA_UInt8 uiMasterClip )
-{
-    M4OSA_Int32 iResynchMarkerDsiIndex;
-    M4_StreamHandler *pStreamForDsi;
-    M4VSS3GPP_ClipContext *pClip;
-    M4OSA_ERR err;
-    M4OSA_UInt32 i;
-    M4DECODER_MPEG4_DecoderConfigInfo DecConfigInfo;
-    M4DECODER_VideoSize dummySize;
-    M4OSA_Bool bGetDSiFromEncoder = M4OSA_FALSE;
-
-    M4ENCODER_Header *encHeader;
-    M4SYS_StreamIDmemAddr streamHeader;
-
-    pStreamForDsi = M4OSA_NULL;
-    pClip = M4OSA_NULL;
-
-    /**
-    * H263 case */
-    if( M4SYS_kH263 == pC->ewc.VideoStreamType )
-    {
-        /**
-        * H263 output DSI is always 7 bytes */
-        pC->ewc.uiVideoOutputDsiSize = 7;
-        pC->ewc.pVideoOutputDsi =
-            (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->ewc.uiVideoOutputDsiSize,
-            M4VSS3GPP, (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H263)");
-
-        if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
-                unable to allocate pVideoOutputDsi (H263), returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-
-        /**
-        * (We override the input vendor info.
-        * At least we know that nothing special will be tried with PHLP-stamped
-          edited streams...) */
-        pC->ewc.pVideoOutputDsi[0] = 'P';
-        pC->ewc.pVideoOutputDsi[1] = 'H';
-        pC->ewc.pVideoOutputDsi[2] = 'L';
-        pC->ewc.pVideoOutputDsi[3] = 'P';
-
-        /**
-        * Decoder version is 0 */
-        pC->ewc.pVideoOutputDsi[4] = 0;
-
-        /**
-        * Level is the sixth byte in the DSI */
-        pC->ewc.pVideoOutputDsi[5] = pC->xVSS.outputVideoLevel;
-
-        /**
-        * Profile is the seventh byte in the DSI*/
-        pC->ewc.pVideoOutputDsi[6] = pC->xVSS.outputVideoProfile;
-    }
-
-    /**
-    * MPEG-4 case */
-    else if( M4SYS_kMPEG_4 == pC->ewc.VideoStreamType ||
-        M4SYS_kH264 == pC->ewc.VideoStreamType) {
-
-        /* For MPEG4 and H.264 encoder case
-        * Fetch the DSI from the shell video encoder, and feed it to the writer before
-        closing it. */
-
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: get DSI for H264 stream");
-
-        if( M4OSA_NULL == pC->ewc.pEncContext )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: pC->ewc.pEncContext is NULL");
-            err = M4VSS3GPP_intCreateVideoEncoder(pC);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
-                    M4VSS3GPP_intCreateVideoEncoder returned error 0x%x",
-                    err);
-            }
-        }
-
-        if( M4OSA_NULL != pC->ewc.pEncContext )
-        {
-            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctGetOption(
-                pC->ewc.pEncContext, M4ENCODER_kOptionID_EncoderHeader,
-                (M4OSA_DataOption) &encHeader);
-
-            if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
-                    failed to get the encoder header (err 0x%x)",
-                    err);
-                M4OSA_TRACE1_2(
-                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: encHeader->pBuf=0x%x, size=0x%x",
-                    encHeader->pBuf, encHeader->Size);
-            }
-            else
-            {
-                M4OSA_TRACE1_0(
-                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
-                     send DSI for video stream to 3GP writer");
-
-                /**
-                * Allocate and copy the new DSI */
-                pC->ewc.pVideoOutputDsi =
-                    (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(encHeader->Size, M4VSS3GPP,
-                    (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H264)");
-
-                if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
-                {
-                    M4OSA_TRACE1_0(
-                        "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
-                         unable to allocate pVideoOutputDsi, returning M4ERR_ALLOC");
-                    return M4ERR_ALLOC;
-                }
-                pC->ewc.uiVideoOutputDsiSize = (M4OSA_UInt16)encHeader->Size;
-                memcpy((void *)pC->ewc.pVideoOutputDsi, (void *)encHeader->pBuf,
-                    encHeader->Size);
-            }
-
-            err = M4VSS3GPP_intDestroyVideoEncoder(pC);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
-                    M4VSS3GPP_intDestroyVideoEncoder returned error 0x%x",
-                    err);
-            }
-        }
-        else
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
-                pC->ewc.pEncContext is NULL, cannot get the DSI");
-        }
-    }
-
-    pStreamForDsi = M4OSA_NULL;
-    pClip = M4OSA_NULL;
-
-    /* Compute Audio DSI */
-    if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
-    {
-        if( uiMasterClip == 0 )
-        {
-            /* Clip is already opened */
-            pStreamForDsi = &(pC->pC1->pAudioStream->m_basicProperties);
-        }
-        else
-        {
-            /**
-            * We can use the fast open mode to get the DSI */
-            err = M4VSS3GPP_intClipInit(&pClip, pC->pOsaFileReadPtr);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
-                    M4VSS3GPP_intClipInit() returns 0x%x!",
-                    err);
-
-                if( pClip != M4OSA_NULL )
-                {
-                    M4VSS3GPP_intClipCleanUp(pClip);
-                }
-                return err;
-            }
-
-            err = M4VSS3GPP_intClipOpen(pClip, &pC->pClipList[uiMasterClip],
-                M4OSA_FALSE, M4OSA_TRUE, M4OSA_TRUE);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
-                    M4VSS3GPP_intClipOpen() returns 0x%x!",
-                    err);
-                M4VSS3GPP_intClipCleanUp(pClip);
-                return err;
-            }
-
-            pStreamForDsi = &(pClip->pAudioStream->m_basicProperties);
-        }
-
-        /**
-        * Allocate and copy the new DSI */
-        pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(
-            pStreamForDsi->m_decoderSpecificInfoSize,
-            M4VSS3GPP, (M4OSA_Char *)"pC->ewc.pAudioOutputDsi");
-
-        if( M4OSA_NULL == pC->ewc.pAudioOutputDsi )
-        {
-            M4OSA_TRACE1_0(
-                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
-                unable to allocate pAudioOutputDsi, returning M4ERR_ALLOC");
-            return M4ERR_ALLOC;
-        }
-        pC->ewc.uiAudioOutputDsiSize =
-            (M4OSA_UInt16)pStreamForDsi->m_decoderSpecificInfoSize;
-        memcpy((void *)pC->ewc.pAudioOutputDsi,
-            (void *)pStreamForDsi->m_pDecoderSpecificInfo,
-            pC->ewc.uiAudioOutputDsiSize);
-
-        /**
-        * If a clip has been temporarily opened to get its DSI, close it */
-        if( M4OSA_NULL != pClip )
-        {
-            err = M4VSS3GPP_intClipCleanUp(pClip);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
-                    M4VSS3GPP_intClipCleanUp() returns 0x%x!",
-                    err);
-                return err;
-            }
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0(
-        "M4VSS3GPP_intComputeOutputVideoAndAudioDsi(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intSwitchToNextClip()
- * @brief    Switch from the current clip to the next one
- * @param   pC            (IN/OUT) Internal edit context
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intSwitchToNextClip(
-    M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_ERR err;
-
-    if( M4OSA_NULL != pC->pC1 )
-    {
-        if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame) {
-            if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame[0].pac_data) {
-                free(pC->pC1->m_pPreResizeFrame[0].pac_data);
-                pC->pC1->m_pPreResizeFrame[0].pac_data = M4OSA_NULL;
-            }
-            if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame[1].pac_data) {
-                free(pC->pC1->m_pPreResizeFrame[1].pac_data);
-                pC->pC1->m_pPreResizeFrame[1].pac_data = M4OSA_NULL;
-            }
-            if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame[2].pac_data) {
-                free(pC->pC1->m_pPreResizeFrame[2].pac_data);
-                pC->pC1->m_pPreResizeFrame[2].pac_data = M4OSA_NULL;
-            }
-            free(pC->pC1->m_pPreResizeFrame);
-            pC->pC1->m_pPreResizeFrame = M4OSA_NULL;
-        }
-        /**
-        * Close the current first clip */
-        err = M4VSS3GPP_intClipCleanUp(pC->pC1);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intSwitchToNextClip: M4VSS3GPP_intClipCleanUp(C1) returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        *  increment clip counter */
-        pC->uiCurrentClip++;
-    }
-
-    /**
-    * Check if we reached the last clip */
-    if( pC->uiCurrentClip >= pC->uiClipNumber )
-    {
-        pC->pC1 = M4OSA_NULL;
-        pC->State = M4VSS3GPP_kEditState_FINISHED;
-
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intSwitchToNextClip:\
-            M4VSS3GPP_intClipClose(C1) returns M4VSS3GPP_WAR_EDITING_DONE");
-        return M4VSS3GPP_WAR_EDITING_DONE;
-    }
-
-    /**
-    * If the next clip has already be opened, set it as first clip */
-    if( M4OSA_NULL != pC->pC2 )
-    {
-        pC->pC1 = pC->pC2;
-        if(M4OSA_NULL != pC->pC2->m_pPreResizeFrame) {
-            pC->pC1->m_pPreResizeFrame = pC->pC2->m_pPreResizeFrame;
-        }
-        pC->pC2 = M4OSA_NULL;
-        pC->bClip1ActiveFramingEffect = pC->bClip2ActiveFramingEffect;
-        pC->bClip2ActiveFramingEffect = M4OSA_FALSE;
-    }
-    /**
-    * else open it */
-    else
-    {
-        err = M4VSS3GPP_intOpenClip(pC, &pC->pC1,
-            &pC->pClipList[pC->uiCurrentClip]);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intSwitchToNextClip: M4VSS3GPP_intOpenClip() returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * If the second clip has not been opened yet,
-          that means that there has been no transition.
-        * So both output video and audio times are OK.
-        * So we can set both video2 and audio offsets */
-
-        /**
-        * Add current video output CTS to the clip video offset */
-
-        // Decorrelate input and output encoding timestamp to handle encoder prefetch
-        pC->pC1->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
-        /**
-        * Add current audio output CTS to the clip audio offset */
-        pC->pC1->iAoffset +=
-            (M4OSA_UInt32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
-
-        /**
-        * 2005-03-24: BugFix for audio-video synchro:
-        * There may be a portion of the duration of an audio AU of desynchro at each assembly.
-        * It leads to an audible desynchro when there are a lot of clips assembled.
-        * This bug fix allows to resynch the audio track when the delta is higher
-        * than one audio AU duration.
-        * We Step one AU in the second clip and we change the audio offset accordingly. */
-        if( ( pC->pC1->iAoffset
-            - (M4OSA_Int32)(pC->pC1->iVoffset *pC->pC1->scale_audio + 0.5))
-        > pC->ewc.iSilenceFrameDuration )
-        {
-            /**
-            * Advance one AMR frame */
-            err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
-
-            if( M4OSA_ERR_IS_ERROR(err) )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intSwitchToNextClip:\
-                    M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
-                    err);
-                return err;
-            }
-            /**
-            * Update audio offset accordingly*/
-            pC->pC1->iAoffset -= pC->ewc.iSilenceFrameDuration;
-        }
-    }
-
-    /**
-    * Init starting state for this clip processing */
-    if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
-    {
-        /**
-        * In the MP3 case we use a special audio state */
-        pC->State = M4VSS3GPP_kEditState_MP3_JUMP;
-    }
-    else
-    {
-        /**
-        * We start with the video processing */
-        pC->State = M4VSS3GPP_kEditState_VIDEO;
-
-        if( pC->Vstate != M4VSS3GPP_kEditVideoState_TRANSITION )
-        {
-            /* if not a transition then reset previous video state */
-            pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
-        }
-    }
-    /* The flags are set to false at the beginning of every clip */
-    pC->m_bClipExternalHasStarted = M4OSA_FALSE;
-    pC->bEncodeTillEoF = M4OSA_FALSE;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intSwitchToNextClip(): returning M4NO_ERROR");
-    /* RC: to know when a file has been processed */
-    return M4VSS3GPP_WAR_SWITCH_CLIP;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo()
- * @brief    Do what to do when the end of a clip video track is reached
- * @note    If there is audio on the current clip, process it, else switch to the next clip
- * @param   pC            (IN/OUT) Internal edit context
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo( M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_ERR err;
-
-    /**
-    * Video is done for this clip, now we do the audio */
-    if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
-    {
-        pC->State = M4VSS3GPP_kEditState_AUDIO;
-    }
-    else
-    {
-        /**
-        * Clip done, do the next one */
-        err = M4VSS3GPP_intSwitchToNextClip(pC);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intReachedEndOfVideo: M4VSS3GPP_intSwitchToNextClip() returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intReachedEndOfVideo(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio()
- * @brief    Do what to do when the end of a clip audio track is reached
- * @param   pC            (IN/OUT) Internal edit context
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio( M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_ERR err;
-
-    /**
-    * Clip done, do the next one */
-    err = M4VSS3GPP_intSwitchToNextClip(pC);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intReachedEndOfAudio: M4VSS3GPP_intSwitchToNextClip() returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Start with the video */
-    if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
-    {
-        pC->State = M4VSS3GPP_kEditState_VIDEO;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intReachedEndOfAudio(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intOpenClip()
- * @brief    Open next clip
- * @param   pC            (IN/OUT) Internal edit context
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intOpenClip( M4VSS3GPP_InternalEditContext *pC,
-                                M4VSS3GPP_ClipContext ** hClip,
-                                M4VSS3GPP_ClipSettings *pClipSettings )
-{
-    M4OSA_ERR err;
-    M4VSS3GPP_ClipContext *pClip; /**< shortcut */
-    M4VIDEOEDITING_ClipProperties *pClipProperties = M4OSA_NULL;
-    M4OSA_Int32 iCts;
-    M4OSA_UInt32 i;
-
-    M4OSA_TRACE2_1("M4VSS3GPP_intOpenClip: \"%s\"",
-        (M4OSA_Char *)pClipSettings->pFile);
-
-    err = M4VSS3GPP_intClipInit(hClip, pC->pOsaFileReadPtr);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipInit() returns 0x%x!",
-            err);
-
-        if( *hClip != M4OSA_NULL )
-        {
-            M4VSS3GPP_intClipCleanUp(*hClip);
-        }
-        return err;
-    }
-
-    /**
-    * Set shortcut */
-    pClip = *hClip;
-
-    if (pClipSettings->FileType == M4VIDEOEDITING_kFileType_ARGB8888 ) {
-        pClipProperties = &pClipSettings->ClipProperties;
-        pClip->pSettings = pClipSettings;
-        pClip->iEndTime = pClipSettings->uiEndCutTime;
-    }
-
-    err = M4VSS3GPP_intClipOpen(pClip, pClipSettings,
-              M4OSA_FALSE, M4OSA_FALSE, M4OSA_FALSE);
-    if (M4NO_ERROR != err) {
-        M4OSA_TRACE1_1("M4VSS3GPP_intOpenClip: \
-            M4VSS3GPP_intClipOpen() returns 0x%x!", err);
-        M4VSS3GPP_intClipCleanUp(pClip);
-        *hClip = M4OSA_NULL;
-        return err;
-    }
-
-    if (pClipSettings->FileType != M4VIDEOEDITING_kFileType_ARGB8888 ) {
-        pClipProperties = &pClip->pSettings->ClipProperties;
-    }
-
-    /**
-    * Copy common 'silence frame stuff' to ClipContext */
-    pClip->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
-    pClip->pSilenceFrameData = pC->ewc.pSilenceFrameData;
-    pClip->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
-    pClip->iSilenceFrameDuration = pC->ewc.iSilenceFrameDuration;
-    pClip->scale_audio = pC->ewc.scale_audio;
-
-    pClip->iAudioFrameCts = -pClip->iSilenceFrameDuration; /* Reset time */
-
-    /**
-    * If the audio track is not compatible with the output audio format,
-    * we remove it. So it will be replaced by silence */
-    if( M4OSA_FALSE == pClipProperties->bAudioIsCompatibleWithMasterClip )
-    {
-        M4VSS3GPP_intClipDeleteAudioTrack(pClip);
-    }
-
-    /**
-    * Actual begin cut */
-    if( 0 == pClipSettings->uiBeginCutTime )
-    {
-        pClip->iVoffset = 0;
-        pClip->iAoffset = 0;
-        pClip->iActualVideoBeginCut = 0;
-        pClip->iActualAudioBeginCut = 0;
-    }
-    else if(pClipSettings->FileType != M4VIDEOEDITING_kFileType_ARGB8888) {
-        if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
-        {
-            /**
-            * Jump the video to the target begin cut to get the actual begin cut value */
-            pClip->iActualVideoBeginCut =
-                (M4OSA_Int32)pClipSettings->uiBeginCutTime;
-            iCts = pClip->iActualVideoBeginCut;
-
-            err = pClip->ShellAPI.m_pReader->m_pFctJump(pClip->pReaderContext,
-                (M4_StreamHandler *)pClip->pVideoStream, &iCts);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intOpenClip: m_pFctJump(V) returns 0x%x!", err);
-                return err;
-            }
-
-            /**
-            * Update clip offset with the video begin cut */
-            pClip->iVoffset = -pClip->iActualVideoBeginCut;
-        }
-
-        if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
-        {
-            /**
-            * Jump the audio to the video actual begin cut */
-            if( M4VIDEOEDITING_kMP3 != pClipProperties->AudioStreamType )
-            {
-                pClip->iActualAudioBeginCut = pClip->iActualVideoBeginCut;
-                iCts = (M4OSA_Int32)(pClip->iActualAudioBeginCut
-                    * pClip->scale_audio + 0.5);
-
-                err = M4VSS3GPP_intClipJumpAudioAt(pClip, &iCts);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipJumpAudioAt(A) returns 0x%x!",
-                        err);
-                    return err;
-                }
-                /**
-                * Update clip offset with the audio begin cut */
-                pClip->iAoffset = -iCts;
-            }
-            else
-            {
-                /**
-                * For the MP3, the jump is not done because of the VBR,
-                  it could be not enough accurate */
-                pClip->iActualAudioBeginCut =
-                    (M4OSA_Int32)pClipSettings->uiBeginCutTime;
-            }
-        }
-    }
-
-    if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
-    {
-        if ((pClipSettings->FileType != M4VIDEOEDITING_kFileType_ARGB8888 )) {
-
-            /**
-            * Read the first Video AU of the clip */
-            err = pClip->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
-                pClip->pReaderContext,
-                (M4_StreamHandler *)pClip->pVideoStream, &pClip->VideoAU);
-
-            if( M4WAR_NO_MORE_AU == err )
-            {
-                /**
-                * If we (already!) reach the end of the clip, we filter the error.
-                * It will be correctly managed at the first step. */
-                err = M4NO_ERROR;
-            }
-            else if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1("M4VSS3GPP_intOpenClip: \
-                    m_pReaderDataIt->m_pFctGetNextAu() returns 0x%x!", err);
-                return err;
-            }
-        } else {
-            pClipProperties->uiVideoWidth  = pClipProperties->uiStillPicWidth;
-            pClipProperties->uiVideoHeight = pClipProperties->uiStillPicHeight;
-        }
-        /* state check not to allocate buffer during save start */
-
-
-        /******************************/
-        /* Video resize management   */
-        /******************************/
-        /**
-        * If the input clip is a rotate video or the output resolution is different
-        * from the input resolution, then the video frame needs to be rotated
-        * or resized, force to resize mode */
-        if (((M4OSA_UInt32)pC->ewc.uiVideoWidth !=
-                 pClipProperties->uiVideoWidth) ||
-            ((M4OSA_UInt32)pC->ewc.uiVideoHeight !=
-                 pClipProperties->uiVideoHeight) ||
-            pClipProperties->videoRotationDegrees != 0) {
-
-            if (pClip->m_pPreResizeFrame == M4OSA_NULL) {
-                /**
-                * Allocate the intermediate video plane that will
-                  receive the decoded image before resizing */
-                pClip->m_pPreResizeFrame =
-                 (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(
-                     3*sizeof(M4VIFI_ImagePlane), M4VSS3GPP,
-                     (M4OSA_Char *)"pPreResizeFrame");
-                if (M4OSA_NULL == pClip->m_pPreResizeFrame) {
-                    M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder(): \
-                        unable to allocate m_pPreResizeFrame");
-                    return M4ERR_ALLOC;
-                }
-
-                pClip->m_pPreResizeFrame[0].pac_data = M4OSA_NULL;
-                pClip->m_pPreResizeFrame[1].pac_data = M4OSA_NULL;
-                pClip->m_pPreResizeFrame[2].pac_data = M4OSA_NULL;
-
-                /**
-                * Allocate the Y plane */
-                pClip->m_pPreResizeFrame[0].u_topleft = 0;
-                pClip->m_pPreResizeFrame[0].u_width  =
-                    pClipProperties->uiVideoWidth;
-                pClip->m_pPreResizeFrame[0].u_height =
-                    pClipProperties->uiVideoHeight;
-                pClip->m_pPreResizeFrame[0].u_stride =
-                    pClip->m_pPreResizeFrame[0].u_width;
-
-                pClip->m_pPreResizeFrame[0].pac_data =
-                 (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc (
-                   pClip->m_pPreResizeFrame[0].u_stride * pClip->m_pPreResizeFrame[0].u_height,
-                   M4MCS, (M4OSA_Char *)"m_pPreResizeFrame[0].pac_data");
-                if (M4OSA_NULL == pClip->m_pPreResizeFrame[0].pac_data) {
-                    M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder(): \
-                        unable to allocate m_pPreResizeFrame[0].pac_data");
-                    free(pClip->m_pPreResizeFrame);
-                    return M4ERR_ALLOC;
-                }
-
-                /**
-                * Allocate the U plane */
-                pClip->m_pPreResizeFrame[1].u_topleft = 0;
-                pClip->m_pPreResizeFrame[1].u_width  =
-                    pClip->m_pPreResizeFrame[0].u_width >> 1;
-                pClip->m_pPreResizeFrame[1].u_height =
-                    pClip->m_pPreResizeFrame[0].u_height >> 1;
-                pClip->m_pPreResizeFrame[1].u_stride =
-                    pClip->m_pPreResizeFrame[1].u_width;
-
-                pClip->m_pPreResizeFrame[1].pac_data =
-                 (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc (
-                   pClip->m_pPreResizeFrame[1].u_stride * pClip->m_pPreResizeFrame[1].u_height,
-                   M4MCS, (M4OSA_Char *)"m_pPreResizeFrame[1].pac_data");
-                if (M4OSA_NULL == pClip->m_pPreResizeFrame[1].pac_data) {
-                    M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder(): \
-                        unable to allocate m_pPreResizeFrame[1].pac_data");
-                    free(pClip->m_pPreResizeFrame[0].pac_data);
-                    free(pClip->m_pPreResizeFrame);
-                    return M4ERR_ALLOC;
-                }
-
-                /**
-                * Allocate the V plane */
-                pClip->m_pPreResizeFrame[2].u_topleft = 0;
-                pClip->m_pPreResizeFrame[2].u_width =
-                    pClip->m_pPreResizeFrame[1].u_width;
-                pClip->m_pPreResizeFrame[2].u_height =
-                    pClip->m_pPreResizeFrame[1].u_height;
-                pClip->m_pPreResizeFrame[2].u_stride =
-                    pClip->m_pPreResizeFrame[2].u_width;
-
-                pClip->m_pPreResizeFrame[2].pac_data =
-                 (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc (
-                   pClip->m_pPreResizeFrame[2].u_stride * pClip->m_pPreResizeFrame[2].u_height,
-                   M4MCS, (M4OSA_Char *)"m_pPreResizeFrame[2].pac_data");
-                if (M4OSA_NULL == pClip->m_pPreResizeFrame[2].pac_data) {
-                    M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder(): \
-                        unable to allocate m_pPreResizeFrame[2].pac_data");
-                    free(pClip->m_pPreResizeFrame[0].pac_data);
-                    free(pClip->m_pPreResizeFrame[1].pac_data);
-                    free(pClip->m_pPreResizeFrame);
-                    return M4ERR_ALLOC;
-                }
-            }
-        }
-
-        /**
-        * The video is currently in reading mode */
-        pClip->Vstatus = M4VSS3GPP_kClipStatus_READ;
-    }
-
-    if( ( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType)
-        && (M4VIDEOEDITING_kMP3 != pClipProperties->AudioStreamType) )
-    {
-        /**
-        * Read the first Audio AU of the clip */
-        err = M4VSS3GPP_intClipReadNextAudioFrame(pClip);
-
-        if( M4OSA_ERR_IS_ERROR(err) )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
-                err);
-            return err;
-        }
-
-        /**
-        * The audio is currently in reading mode */
-        pClip->Astatus = M4VSS3GPP_kClipStatus_READ;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intOpenClip(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR  M4VSS3GPP_intComputeOutputAverageVideoBitrate()
- * @brief    Average bitrate of the output file, computed from input bitrates,
- *          durations, transitions and cuts.
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-static M4OSA_Void M4VSS3GPP_intComputeOutputAverageVideoBitrate(
-    M4VSS3GPP_InternalEditContext *pC )
-{
-    M4VSS3GPP_ClipSettings *pCS_0, *pCS_1, *pCS_2;
-    M4VSS3GPP_TransitionSettings *pT0, *pT2;
-    M4OSA_Int32 i;
-
-    M4OSA_UInt32 t0_duration, t2_duration;
-    M4OSA_UInt32 t0_bitrate, t2_bitrate;
-    M4OSA_UInt32 c1_duration;
-
-    M4OSA_UInt32 total_duration;
-    M4OSA_UInt32 total_bitsum;
-
-    total_duration = 0;
-    total_bitsum = 0;
-
-    /* Loop on the number of clips */
-    for ( i = 0; i < pC->uiClipNumber; i++ )
-    {
-        pCS_1 = &pC->pClipList[i];
-
-        t0_duration = 0;
-        t0_bitrate = pCS_1->ClipProperties.uiVideoBitrate;
-        t2_duration = 0;
-        t2_bitrate = pCS_1->ClipProperties.uiVideoBitrate;
-
-        /* Transition with the previous clip */
-        if( i > 0 )
-        {
-            pCS_0 = &pC->pClipList[i - 1];
-            pT0 = &pC->pTransitionList[i - 1];
-
-            if( pT0->VideoTransitionType
-                != M4VSS3GPP_kVideoTransitionType_None )
-            {
-                t0_duration = pT0->uiTransitionDuration;
-
-                if( pCS_0->ClipProperties.uiVideoBitrate > t0_bitrate )
-                {
-                    t0_bitrate = pCS_0->ClipProperties.uiVideoBitrate;
-                }
-            }
-        }
-
-        /* Transition with the next clip */
-        if( i < pC->uiClipNumber - 1 )
-        {
-            pCS_2 = &pC->pClipList[i + 1];
-            pT2 = &pC->pTransitionList[i];
-
-            if( pT2->VideoTransitionType
-                != M4VSS3GPP_kVideoTransitionType_None )
-            {
-                t2_duration = pT2->uiTransitionDuration;
-
-                if( pCS_2->ClipProperties.uiVideoBitrate > t2_bitrate )
-                {
-                    t2_bitrate = pCS_2->ClipProperties.uiVideoBitrate;
-                }
-            }
-        }
-
-        /* Check for cut times */
-        if( pCS_1->uiEndCutTime > 0 )
-            c1_duration = pCS_1->uiEndCutTime;
-        else
-            c1_duration = pCS_1->ClipProperties.uiClipVideoDuration;
-
-        if( pCS_1->uiBeginCutTime > 0 )
-            c1_duration -= pCS_1->uiBeginCutTime;
-
-        c1_duration -= t0_duration + t2_duration;
-
-        /* Compute bitsum and duration */
-        total_duration += c1_duration + t0_duration / 2 + t2_duration / 2;
-
-        total_bitsum +=
-            c1_duration * (pCS_1->ClipProperties.uiVideoBitrate / 1000)
-            + (t0_bitrate / 1000) * t0_duration / 2
-            + (t2_bitrate / 1000) * t2_duration / 2;
-    }
-
-    pC->ewc.uiVideoBitrate = ( total_bitsum / total_duration) * 1000;
-}
-
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c b/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c
deleted file mode 100755
index 746883d..0000000
--- a/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c
+++ /dev/null
@@ -1,2013 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_EditAudio.c
- * @brief    Video Studio Service 3GPP edit API implementation.
- * @note
- ******************************************************************************
- */
-
-/****************/
-/*** Includes ***/
-/****************/
-
-#include "NXPSW_CompilerSwitches.h"
-/**
- * Our header */
-#include "M4VSS3GPP_API.h"
-#include "M4VSS3GPP_InternalTypes.h"
-#include "M4VSS3GPP_InternalFunctions.h"
-#include "M4VSS3GPP_InternalConfig.h"
-#include "M4VSS3GPP_ErrorCodes.h"
-
-/**
- * OSAL headers */
-#include "M4OSA_Memory.h" /**< OSAL memory management */
-#include "M4OSA_Debug.h"  /**< OSAL debug management */
-
-#define PWR_FXP_FRACT_MAX            (32768)
-
-/************************************************************************/
-/* Static local functions                                               */
-/************************************************************************/
-static M4OSA_ERR M4VSS3GPP_intCheckAudioMode( M4VSS3GPP_InternalEditContext
-                                             *pC );
-static M4OSA_Void M4VSS3GPP_intCheckAudioEffects( M4VSS3GPP_InternalEditContext
-                                                 *pC, M4OSA_UInt8 uiClipNumber );
-static M4OSA_ERR M4VSS3GPP_intApplyAudioEffect( M4VSS3GPP_InternalEditContext
-                                               *pC, M4OSA_UInt8 uiClip1orClip2,
-                                               M4OSA_Int16 *pPCMdata,
-                                               M4OSA_UInt32 uiPCMsize );
-static M4OSA_ERR M4VSS3GPP_intAudioTransition( M4VSS3GPP_InternalEditContext
-                                              *pC, M4OSA_Int16 *pPCMdata1,
-                                              M4OSA_Int16 *pPCMdata2,
-                                              M4OSA_UInt32 uiPCMsize );
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intEditJumpMP3()
- * @brief    One step of jumping processing for the MP3 clip.
- * @note    On one step, the jump of several AU is done
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intEditJumpMP3( M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_ERR err;
-    M4VSS3GPP_ClipContext *pClip = pC->pC1; /**< shortcut */
-    M4OSA_Int32 JumpCts;
-
-    JumpCts = pClip->iActualAudioBeginCut;
-
-    err = M4VSS3GPP_intClipJumpAudioAt(pClip, &JumpCts);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipJumpAudioAt(A) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    if( JumpCts >= pClip->iActualAudioBeginCut )
-    {
-        pC->State = M4VSS3GPP_kEditState_MP3;
-
-        /**
-        * Update clip offset with the audio begin cut */
-        pClip->iAoffset = -JumpCts;
-
-        /**
-        * The audio is currently in reading mode */
-        pClip->Astatus = M4VSS3GPP_kClipStatus_READ;
-    }
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intEditStepMP3()
- * @brief    One step of audio processing for the MP3 clip
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intEditStepMP3( M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_ERR err;
-    M4VSS3GPP_ClipContext *pClip = pC->pC1; /**< shortcut */
-
-    /**
-    * Copy the input AU to the output AU */
-    err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
-        pClip->pAudioFramePtr, (M4OSA_UInt32)pClip->uiAudioFrameSize);
-
-    /**
-    * Read the next audio frame */
-    err = M4VSS3GPP_intClipReadNextAudioFrame(pClip);
-
-    if( M4OSA_ERR_IS_ERROR(err) )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intEditStepMP3: READ_WRITE:\
-            M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",    err);
-        return err;
-    }
-    else
-    {
-        /**
-        * Update current time (to=tc+T) */
-        pC->ewc.dATo =
-            ( pClip->iAudioFrameCts + pClip->iAoffset) / pClip->scale_audio;
-
-        if( (M4OSA_Int32)(pClip->iAudioFrameCts / pClip->scale_audio + 0.5)
-            >= pClip->iEndTime )
-        {
-            M4READER_Buffer mp3tagBuffer;
-
-            /**
-            * The duration is better respected if the first AU and last AU are both above
-            the cut time */
-            err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
-                pClip->pAudioFramePtr,
-                (M4OSA_UInt32)pClip->uiAudioFrameSize);
-
-            /* The ID3v1 tag is always at the end of the mp3 file so the end of the cutting
-            process is waited */
-            /* before writing the metadata in the output file*/
-
-            /* Retrieve the data of the ID3v1 Tag */
-            err = pClip->ShellAPI.m_pReader->m_pFctGetOption(
-                pClip->pReaderContext, M4READER_kOptionID_Mp3Id3v1Tag,
-                (M4OSA_DataOption) &mp3tagBuffer);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intEditStepMP3: M4MP3R_getOption returns 0x%x",
-                    err);
-                return err;
-            }
-
-            /* Write the data of the ID3v1 Tag in the output file */
-            if( 0 != mp3tagBuffer.m_uiBufferSize )
-            {
-                err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
-                    (M4OSA_MemAddr8)mp3tagBuffer.m_pData, mp3tagBuffer.m_uiBufferSize);
-                /**
-                * Free before the error checking anyway */
-                free(mp3tagBuffer.m_pData);
-
-                /**
-                * Error checking */
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepMP3:\
-                        pOsaFileWritPtr->writeData(ID3v1Tag) returns 0x%x",    err);
-                    return err;
-                }
-
-                mp3tagBuffer.m_uiBufferSize = 0;
-                mp3tagBuffer.m_pData = M4OSA_NULL;
-            }
-
-            /* The End Cut has been reached */
-            err = M4VSS3GPP_intReachedEndOfAudio(pC);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intEditStepMP3 : M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
-                    err);
-                return err;
-            }
-        }
-
-        if( ( M4WAR_NO_MORE_AU == err) && (M4OSA_FALSE
-            == pC->bSupportSilence) ) /**< Reached end of clip */
-        {
-            err = M4VSS3GPP_intReachedEndOfAudio(
-                pC); /**< Clip done, do the next one */
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intEditStepMP3: READ_WRITE:\
-                    M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
-                    err);
-                return err;
-            }
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intEditStepMP3: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intEditStepAudio()
- * @brief    One step of audio processing
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intEditStepAudio( M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_ERR err;
-    int32_t auTimeStamp = -1;
-
-    M4ENCODER_AudioBuffer pEncInBuffer;  /**< Encoder input buffer for api */
-    M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
-    M4OSA_Time
-        frameTimeDelta; /**< Duration of the encoded (then written) data */
-    M4OSA_Bool bStopAudio;
-
-    /**
-    * Check if we reached end cut */
-    if( ( pC->ewc.dATo - pC->pC1->iAoffset / pC->pC1->scale_audio + 0.5)
-        >= pC->pC1->iEndTime )
-    {
-        /**
-        * Audio is done for this clip */
-        err = M4VSS3GPP_intReachedEndOfAudio(pC);
-
-        /* RC: to know when a file has been processed */
-        if( M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
-                err);
-        }
-
-        return err;
-    }
-
-    /**
-    * Check Audio Mode, depending on the current output CTS */
-    err = M4VSS3GPP_intCheckAudioMode(
-        pC); /**< This function change the pC->Astate variable! */
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intCheckAudioMode returns 0x%x!",
-            err);
-        return err;
-    }
-
-    M4OSA_TRACE2_3("  AUDIO step : dATo = %f  state = %d  offset = %ld",
-        pC->ewc.dATo, pC->Astate, pC->pC1->iAoffset);
-
-    bStopAudio = M4OSA_FALSE;
-
-    switch( pC->Astate )
-    {
-            /* _________________ */
-            /*|                 |*/
-            /*| READ_WRITE MODE |*/
-            /*|_________________|*/
-
-        case M4VSS3GPP_kEditAudioState_READ_WRITE:
-            {
-                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio READ_WRITE");
-
-                /**
-                * Get the output AU to write into */
-                err = pC->ShellAPI.pWriterDataFcts->pStartAU(
-                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
-                    &pC->ewc.WriterAudioAU);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepAudio:\
-                        READ_WRITE: pWriterDataFcts->pStartAU returns 0x%x!",
-                        err);
-                    return err;
-                }
-
-                /**
-                * Compute output audio CTS */
-                pC->ewc.WriterAudioAU.CTS =
-                    pC->pC1->iAudioFrameCts + pC->pC1->iAoffset;
-
-                /**
-                * BZZZ bug fix (read-write case):
-                * Replace the first AMR AU of the stream with a silence AU.
-                * It removes annoying "BZZZ" audio glitch.
-                * It is not needed if there is a begin cut.
-                * It is not needed for the first clip.
-                * Because of another bugfix (2005-03-24), the first AU written may be
-                * the second one which CTS is 20. Hence the cts<21 test.
-                * (the BZZZ effect occurs even with the second AU!) */
-                if( ( M4OSA_FALSE == pC->pC1->bFirstAuWritten)
-                    && (0 != pC->uiCurrentClip) && (pC->pC1->iAudioFrameCts
-                    < (pC->ewc.iSilenceFrameDuration + 1)) )
-                {
-                    /**
-                    * Copy a silence AU to the output */
-                    pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
-                    memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
-                        (void *)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
-                    M4OSA_TRACE2_0("A #### silence AU");
-                }
-                else if( (M4OSA_UInt32)pC->pC1->uiAudioFrameSize
-                    < pC->ewc.uiAudioMaxAuSize )
-                {
-                    /**
-                    * Copy the input AU to the output AU */
-                    pC->ewc.WriterAudioAU.size =
-                        (M4OSA_UInt32)pC->pC1->uiAudioFrameSize;
-                    memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
-                        (void *)pC->pC1->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
-                }
-                else
-                {
-                    M4OSA_TRACE1_2(
-                        "M4VSS3GPP_intEditStepAudio: READ_WRITE: AU size greater than MaxAuSize \
-                        (%d>%d)! returning M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE",
-                        pC->pC1->uiAudioFrameSize, pC->ewc.uiAudioMaxAuSize);
-                    return M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE;
-                }
-
-                /**
-                * This boolean is only used to fix the BZZ bug... */
-                pC->pC1->bFirstAuWritten = M4OSA_TRUE;
-
-                M4OSA_TRACE2_2("B ---- write : cts  = %ld [ 0x%x ]",
-                    (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
-                    pC->ewc.WriterAudioAU.size);
-
-                /**
-                * Write the AU */
-                err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
-                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
-                    &pC->ewc.WriterAudioAU);
-
-                if( M4NO_ERROR != err )
-                {
-                    /*11/12/2008 CR 3283 MMS use case for VideoArtist
-                    the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output file
-                    size is reached
-                    The editing is then finished,
-                     the warning M4VSS3GPP_WAR_EDITING_DONE is returned*/
-                    if( M4WAR_WRITER_STOP_REQ == err )
-                    {
-                        M4OSA_TRACE1_0(
-                            "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
-                        return M4VSS3GPP_WAR_EDITING_DONE;
-                    }
-                    else
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepAudio:\
-                            READ_WRITE: pWriterDataFcts->pProcessAU returns 0x%x!",
-                            err);
-                        return err;
-                    }
-                }
-
-                /**
-                * Audio is now in read mode (there may be a "if(status!=READ)" here,
-                but it is removed for optimization) */
-                pC->pC1->Astatus = M4VSS3GPP_kClipStatus_READ;
-
-                /**
-                * Read the next audio frame */
-                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
-
-                M4OSA_TRACE2_3("C .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                    pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
-                    pC->pC1->iAoffset / pC->pC1->scale_audio,
-                    pC->pC1->uiAudioFrameSize);
-
-                if( M4OSA_ERR_IS_ERROR(err) )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepAudio: READ_WRITE:\
-                        M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
-                        err);
-                    return err;
-                }
-                else
-                {
-                    /**
-                    * Update current time (to=tc+T) */
-                    pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
-                        / pC->pC1->scale_audio;
-
-                    if( ( M4WAR_NO_MORE_AU == err)
-                        && (M4OSA_FALSE == pC->bSupportSilence) )
-                    {
-                        /**
-                        * If output is other than AMR or AAC
-                        (i.e. EVRC,we can't write silence into it)
-                        * So we simply end here.*/
-                        bStopAudio = M4OSA_TRUE;
-                    }
-                }
-            }
-            break;
-
-            /* ____________________ */
-            /*|                    |*/
-            /*| DECODE_ENCODE MODE |*/
-            /*|____________________|*/
-
-        case M4VSS3GPP_kEditAudioState_DECODE_ENCODE:
-            {
-                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio DECODE_ENCODE");
-
-                /**
-                * Get the output AU to write into */
-                err = pC->ShellAPI.pWriterDataFcts->pStartAU(
-                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
-                    &pC->ewc.WriterAudioAU);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
-                        pWriterDataFcts->pStartAU returns 0x%x!",
-                        err);
-                    return err;
-                }
-
-                /**
-                * If we were reading the clip, we must jump a few AU backward to decode/encode
-                (without writing result) from that point. */
-                if( M4VSS3GPP_kClipStatus_READ == pC->pC1->Astatus )
-                {
-                    M4OSA_Int32 iTargetCts, iCurrentCts;
-
-                    if( 0
-                        != pC->pC1->
-                        iAudioFrameCts ) /**<don't try to pre-decode if clip is at its beginning. */
-                    {
-                        /**
-                        * Jump a few AUs backward */
-                        iCurrentCts = pC->pC1->iAudioFrameCts;
-                        iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
-                            * pC->ewc.iSilenceFrameDuration;
-
-                        if( iTargetCts < 0 )
-                        {
-                            iTargetCts = 0; /**< Sanity check */
-                        }
-
-                        err = M4VSS3GPP_intClipJumpAudioAt(pC->pC1, &iTargetCts);
-
-                        if( M4NO_ERROR != err )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch:\
-                                M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
-                                err);
-                            return err;
-                        }
-
-                        err = M4VSS3GPP_intClipReadNextAudioFrame(
-                            pC->pC1); /**< read AU where we jumped */
-
-                        M4OSA_TRACE2_3("D .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                            pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
-                            pC->pC1->iAoffset / pC->pC1->scale_audio,
-                            pC->pC1->uiAudioFrameSize);
-
-                        if( M4OSA_ERR_IS_ERROR(err) )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch:\
-                                M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x!",
-                                err);
-                            return err;
-                        }
-
-                        /**
-                        * Decode/encode up to the wanted position */
-                        while( pC->pC1->iAudioFrameCts < iCurrentCts )
-                        {
-                            err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
-
-                            if( M4NO_ERROR != err )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch: \
-                                    M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
-                                    err);
-                                return err;
-                            }
-
-                            /* [Mono] or [Stereo interleaved] : all is in one buffer */
-                            pEncInBuffer.pTableBuffer[0] =
-                                pC->pC1->AudioDecBufferOut.m_dataAddress;
-                            pEncInBuffer.pTableBufferSize[0] =
-                                pC->pC1->AudioDecBufferOut.m_bufferSize;
-                            pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
-                            pEncInBuffer.pTableBufferSize[1] = 0;
-
-                            /* Time in ms from data size, because it is PCM16 samples */
-                            frameTimeDelta =
-                                pEncInBuffer.pTableBufferSize[0] / sizeof(short)
-                                / pC->ewc.uiNbChannels;
-
-                            /**
-                            * Prepare output buffer */
-                            pEncOutBuffer.pTableBuffer[0] =
-                                (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
-                            pEncOutBuffer.pTableBufferSize[0] = 0;
-
-                            M4OSA_TRACE2_0("E **** pre-encode");
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-                            /*OMX Audio decoder used.
-                            * OMX Audio dec shell does internal buffering and hence does not return
-                            a PCM buffer for every decodeStep call.*
-                            * So PCM buffer sizes might be 0. In this case donot call encode Step*/
-
-                            if( 0 != pEncInBuffer.pTableBufferSize[0] )
-                            {
-#endif
-                                /**
-                                * Encode the PCM audio */
-
-                                err =
-                                    pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
-                                    pC->ewc.pAudioEncCtxt,
-                                    &pEncInBuffer, &pEncOutBuffer);
-
-                                if( ( M4NO_ERROR != err)
-                                    && (M4WAR_NO_MORE_AU != err) )
-                                {
-                                    M4OSA_TRACE1_1(
-                                        "M4VSS3GPP_intEditStepAudio():\
-                                        pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
-                                        err);
-                                    return err;
-                                }
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-                            } //if(0 != pEncInBuffer.pTableBufferSize[0])
-
-#endif
-                            pC->pC1->pAudioFramePtr = M4OSA_NULL;
-
-                            // Get timestamp of last read AU
-                            pC->pC1->ShellAPI.m_pAudioDecoder->m_pFctGetOptionAudioDec(
-                             pC->pC1->pAudioDecCtxt, M4AD_kOptionID_AuCTS,
-                             (M4OSA_DataOption) &auTimeStamp);
-
-                            if (auTimeStamp == -1) {
-                                M4OSA_TRACE1_0("M4VSS3GPP_intEditStepAudio: \
-                                 invalid audio timestamp returned");
-                                return M4WAR_INVALID_TIME;
-                            }
-
-                            pC->pC1->iAudioFrameCts = auTimeStamp;
-
-                        }
-                    }
-
-                    /**
-                    * Audio is now OK for decoding */
-                    pC->pC1->Astatus = M4VSS3GPP_kClipStatus_DECODE;
-                }
-
-                /**
-                * Decode the input audio */
-                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
-                        M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x",
-                        err);
-                    return err;
-                }
-
-                pC->pC1->pAudioFramePtr = M4OSA_NULL;
-
-                // Get timestamp of last read AU
-                pC->pC1->ShellAPI.m_pAudioDecoder->m_pFctGetOptionAudioDec(
-                 pC->pC1->pAudioDecCtxt, M4AD_kOptionID_AuCTS,
-                 (M4OSA_DataOption) &auTimeStamp);
-
-                if (auTimeStamp == -1) {
-                    M4OSA_TRACE1_0("M4VSS3GPP_intEditStepAudio: invalid audio \
-                     timestamp returned");
-                    return M4WAR_INVALID_TIME;
-                }
-
-                pC->pC1->iAudioFrameCts = auTimeStamp;
-
-                /**
-                * Apply the effect */
-                if( pC->iClip1ActiveEffect >= 0 )
-                {
-                    err = M4VSS3GPP_intApplyAudioEffect(pC, 1, (M4OSA_Int16
-                        *)pC->pC1->AudioDecBufferOut.m_dataAddress,
-                        pC->pC1->AudioDecBufferOut.m_bufferSize);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
-                            M4VSS3GPP_intEndAudioEffect returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-
-                /**
-                * Compute output audio CTS */
-                pC->ewc.WriterAudioAU.CTS =
-                    pC->pC1->iAudioFrameCts + pC->pC1->iAoffset;
-
-                /* May happen with corrupted input files (which have stts entries not
-                multiple of SilenceFrameDuration) */
-                if( pC->ewc.WriterAudioAU.CTS < 0 )
-                {
-                    pC->ewc.WriterAudioAU.CTS = 0;
-                }
-
-                /**
-                * BZZZ bug fix (decode-encode case):
-                * (Yes, the Bzz bug may also occur when we re-encode. It doesn't
-                *  occur at the decode before the encode, but at the playback!)
-                * Replace the first AMR AU of the encoded stream with a silence AU.
-                * It removes annoying "BZZZ" audio glitch.
-                * It is not needed if there is a begin cut.
-                * It is not needed for the first clip.
-                * Because of another bugfix (2005-03-24), the first AU written may be
-                * the second one which CTS is 20. Hence the cts<21 test.
-                * (the BZZZ effect occurs even with the second AU!) */
-                if( ( M4OSA_FALSE == pC->pC1->bFirstAuWritten)
-                    && (0 != pC->uiCurrentClip) && (pC->pC1->iAudioFrameCts
-                    < (pC->ewc.iSilenceFrameDuration + 1)) )
-                {
-                    /**
-                    * Copy a silence AMR AU to the output */
-                    pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
-                    memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
-                        (void *)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
-                    M4OSA_TRACE2_0("G #### silence AU");
-                }
-                else
-                {
-                    /**
-                    * Encode the filtered PCM audio directly into the output AU */
-
-                    /* [Mono] or [Stereo interleaved] : all is in one buffer */
-                    pEncInBuffer.pTableBuffer[0] =
-                        pC->pC1->AudioDecBufferOut.m_dataAddress;
-                    pEncInBuffer.pTableBufferSize[0] =
-                        pC->pC1->AudioDecBufferOut.m_bufferSize;
-                    pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
-                    pEncInBuffer.pTableBufferSize[1] = 0;
-
-                    /* Time in ms from data size, because it is PCM16 samples */
-                    frameTimeDelta =
-                        pEncInBuffer.pTableBufferSize[0] / sizeof(short)
-                        / pC->ewc.uiNbChannels;
-
-                    /**
-                    * Prepare output buffer */
-                    pEncOutBuffer.pTableBuffer[0] =
-                        (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
-                    pEncOutBuffer.pTableBufferSize[0] = 0;
-
-                    M4OSA_TRACE2_0("H ++++ encode AU");
-
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-                    /*OMX Audio decoder used.
-                    * OMX Audio dec shell does internal buffering and hence does not return
-                    a PCM buffer for every decodeStep call.*
-                    * So PCM buffer sizes might be 0. In this case donot call encode Step*/
-
-                    if( 0 != pEncInBuffer.pTableBufferSize[0] )
-                    {
-
-#endif
-
-                        /**
-                        * Encode the PCM audio */
-
-                        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
-                            pC->ewc.pAudioEncCtxt,
-                            &pEncInBuffer, &pEncOutBuffer);
-
-                        if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepAudio():\
-                                pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
-                                err);
-                            return err;
-                        }
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-                    }
-
-#endif
-
-                    /**
-                    * Set AU size */
-
-                    pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
-                        0]; /**< Get the size of encoded data */
-                }
-
-                /**
-                * This boolean is only used to fix the BZZ bug... */
-                pC->pC1->bFirstAuWritten = M4OSA_TRUE;
-
-                M4OSA_TRACE2_2("I ---- write : cts  = %ld [ 0x%x ]",
-                    (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
-                    pC->ewc.WriterAudioAU.size);
-
-                /**
-                * Write the AU */
-                err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
-                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
-                    &pC->ewc.WriterAudioAU);
-
-                if( M4NO_ERROR != err )
-                {
-                    /*11/12/2008 CR 3283 MMS use case for VideoArtist
-                    the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output file
-                     size is reached
-                    The editing is then finished,
-                     the warning M4VSS3GPP_WAR_EDITING_DONE is returned*/
-                    if( M4WAR_WRITER_STOP_REQ == err )
-                    {
-                        M4OSA_TRACE1_0(
-                            "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
-                        return M4VSS3GPP_WAR_EDITING_DONE;
-                    }
-                    else
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
-                            pWriterDataFcts->pProcessAU returns 0x%x!",
-                            err);
-                        return err;
-                    }
-                }
-
-                /**
-                * Read the next audio frame */
-                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
-
-                M4OSA_TRACE2_3("J .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                    pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
-                    pC->pC1->iAoffset / pC->pC1->scale_audio,
-                    pC->pC1->uiAudioFrameSize);
-
-                if( M4OSA_ERR_IS_ERROR(err) )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
-                        M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
-                        err);
-                    return err;
-                }
-                else
-                {
-                    /**
-                    * Update current time (to=tc+T) */
-                    pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
-                        / pC->pC1->scale_audio;
-
-                    if( ( M4WAR_NO_MORE_AU == err)
-                        && (M4OSA_FALSE == pC->bSupportSilence) )
-                    {
-                        /**
-                        * If output is other than AMR or AAC
-                        (i.e. EVRC,we can't write silence into it)
-                        * So we simply end here.*/
-                        bStopAudio = M4OSA_TRUE;
-                    }
-                }
-            }
-            break;
-
-            /* _________________ */
-            /*|                 |*/
-            /*| TRANSITION MODE |*/
-            /*|_________________|*/
-
-        case M4VSS3GPP_kEditAudioState_TRANSITION:
-            {
-                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio TRANSITION");
-
-                /**
-                * Get the output AU to write into */
-                err = pC->ShellAPI.pWriterDataFcts->pStartAU(
-                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
-                    &pC->ewc.WriterAudioAU);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
-                        pWriterDataFcts->pStartAU returns 0x%x!",
-                        err);
-                    return err;
-                }
-
-                /**
-                * If we were reading the clip, we must jump a few AU backward to decode/encode
-                (without writing result) from that point. */
-                if( M4VSS3GPP_kClipStatus_READ == pC->pC1->Astatus )
-                {
-                    M4OSA_Int32 iTargetCts, iCurrentCts;
-
-                    if( 0
-                        != pC->pC1->
-                        iAudioFrameCts ) /**<don't try to pre-decode if clip is at its beginning.*/
-                    {
-                        /**
-                        * Jump a few AUs backward */
-                        iCurrentCts = pC->pC1->iAudioFrameCts;
-                        iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
-                            * pC->ewc.iSilenceFrameDuration;
-
-                        if( iTargetCts < 0 )
-                        {
-                            iTargetCts = 0; /**< Sanity check */
-                        }
-
-                        err = M4VSS3GPP_intClipJumpAudioAt(pC->pC1, &iTargetCts);
-
-                        if( M4NO_ERROR != err )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
-                                M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
-                                err);
-                            return err;
-                        }
-
-                        err = M4VSS3GPP_intClipReadNextAudioFrame(
-                            pC->pC1); /**< read AU where we jumped */
-
-                        M4OSA_TRACE2_3("K .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                            pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
-                            pC->pC1->iAoffset / pC->pC1->scale_audio,
-                            pC->pC1->uiAudioFrameSize);
-
-                        if( M4OSA_ERR_IS_ERROR(err) )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
-                                M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x!",
-                                err);
-                            return err;
-                        }
-
-                        /**
-                        * Decode/encode up to the wanted position */
-                        while( pC->pC1->iAudioFrameCts < iCurrentCts )
-                        {
-                            err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
-
-                            if( M4NO_ERROR != err )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
-                                    M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
-                                    err);
-                                return err;
-                            }
-
-                            /* [Mono] or [Stereo interleaved] : all is in one buffer */
-                            pEncInBuffer.pTableBuffer[0] =
-                                pC->pC1->AudioDecBufferOut.m_dataAddress;
-                            pEncInBuffer.pTableBufferSize[0] =
-                                pC->pC1->AudioDecBufferOut.m_bufferSize;
-                            pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
-                            pEncInBuffer.pTableBufferSize[1] = 0;
-
-                            /* Time in ms from data size, because it is PCM16 samples */
-                            frameTimeDelta =
-                                pEncInBuffer.pTableBufferSize[0] / sizeof(short)
-                                / pC->ewc.uiNbChannels;
-
-                            /**
-                            * Prepare output buffer */
-                            pEncOutBuffer.pTableBuffer[0] =
-                                (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
-                            pEncOutBuffer.pTableBufferSize[0] = 0;
-
-                            M4OSA_TRACE2_0("L **** pre-encode");
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-                            /*OMX Audio decoder used.
-                            * OMX Audio dec shell does internal buffering and hence does not return
-                            a PCM buffer for every decodeStep call.*
-                            * So PCM buffer sizes might be 0. In this case donot call encode Step*/
-
-                            if( 0 != pEncInBuffer.pTableBufferSize[0] )
-                            {
-
-#endif
-                                /**
-                                * Encode the PCM audio */
-
-                                err =
-                                    pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
-                                    pC->ewc.pAudioEncCtxt,
-                                    &pEncInBuffer, &pEncOutBuffer);
-
-                                if( ( M4NO_ERROR != err)
-                                    && (M4WAR_NO_MORE_AU != err) )
-                                {
-                                    M4OSA_TRACE1_1(
-                                        "M4VSS3GPP_intEditStepAudio():\
-                                        pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
-                                        err);
-                                    return err;
-                                }
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-                            }
-
-#endif
-
-                            err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
-
-                            M4OSA_TRACE2_3(
-                                "M .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                                pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
-                                pC->pC1->iAoffset / pC->pC1->scale_audio,
-                                pC->pC1->uiAudioFrameSize);
-
-                            if( M4OSA_ERR_IS_ERROR(err) )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
-                                    M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x!",
-                                    err);
-                                return err;
-                            }
-                        }
-                    }
-
-                    /**
-                    * Audio is now OK for decoding */
-                    pC->pC1->Astatus = M4VSS3GPP_kClipStatus_DECODE;
-                }
-
-                /**
-                * Decode the first input audio */
-                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
-                        M4VSS3GPP_intClipDecodeCurrentAudioFrame(C1) returns 0x%x",
-                        err);
-                    return err;
-                }
-
-                /**
-                * Decode the second input audio */
-                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC2);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
-                        M4VSS3GPP_intClipDecodeCurrentAudioFrame(C2) returns 0x%x",
-                        err);
-                    return err;
-                }
-
-                /**
-                * Check both clips decoded the same amount of PCM samples */
-                if( pC->pC1->AudioDecBufferOut.m_bufferSize
-                    != pC->pC2->AudioDecBufferOut.m_bufferSize )
-                {
-                    M4OSA_TRACE1_2(
-                        "ERR : AudioTransition: both clips AU must have the same decoded\
-                        PCM size! pc1 size=0x%x, pC2 size = 0x%x",
-                        pC->pC1->AudioDecBufferOut.m_bufferSize,
-                        pC->pC2->AudioDecBufferOut.m_bufferSize);
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-                    /*OMX Audio decoder used.
-                    * OMX Audio dec shell does internal buffering and hence does not return
-                    a PCM buffer for every decodeStep call.*
-                    * So PCM buffer sizes might be 0 or different for clip1 and clip2.
-                    * So no need to return error in this case */
-
-                    M4OSA_TRACE1_2(
-                        "M4VSS3GPP_intEditStepAudio: , pc1 AudBuff size=0x%x,\
-                         pC2 AudBuff size = 0x%x",
-                        pC->pC1->AudioDecBufferOut.m_bufferSize,
-                        pC->pC2->AudioDecBufferOut.m_bufferSize);
-
-#else
-
-                    return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
-
-#endif // M4VSS_SUPPORT_OMX_CODECS
-
-                }
-
-                /**
-                * Apply the audio effect on clip1 */
-                if( pC->iClip1ActiveEffect >= 0 )
-                {
-                    err = M4VSS3GPP_intApplyAudioEffect(pC, 1, (M4OSA_Int16
-                        *)pC->pC1->AudioDecBufferOut.m_dataAddress,
-                        pC->pC1->AudioDecBufferOut.m_bufferSize);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepAudio: TRANSITION:\
-                            M4VSS3GPP_intApplyAudioEffect(C1) returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-
-                /**
-                * Apply the audio effect on clip2 */
-                if( pC->iClip2ActiveEffect >= 0 )
-                {
-                    err = M4VSS3GPP_intApplyAudioEffect(pC, 2, (M4OSA_Int16
-                        *)pC->pC2->AudioDecBufferOut.m_dataAddress,
-                        pC->pC2->AudioDecBufferOut.m_bufferSize);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepAudio: TRANSITION:\
-                            M4VSS3GPP_intApplyAudioEffect(C2) returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-
-                /**
-                * Apply the transition effect */
-                err = M4VSS3GPP_intAudioTransition(pC,
-                    (M4OSA_Int16 *)pC->pC1->AudioDecBufferOut.m_dataAddress,
-                    (M4OSA_Int16 *)pC->pC2->AudioDecBufferOut.m_dataAddress,
-                    pC->pC1->AudioDecBufferOut.m_bufferSize);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
-                        M4VSS3GPP_intAudioTransition returns 0x%x",
-                        err);
-                    return err;
-                }
-
-                /* [Mono] or [Stereo interleaved] : all is in one buffer */
-                pEncInBuffer.pTableBuffer[0] =
-                    pC->pC1->AudioDecBufferOut.m_dataAddress;
-                pEncInBuffer.pTableBufferSize[0] =
-                    pC->pC1->AudioDecBufferOut.m_bufferSize;
-                pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
-                pEncInBuffer.pTableBufferSize[1] = 0;
-
-                /* Time in ms from data size, because it is PCM16 samples */
-                frameTimeDelta = pEncInBuffer.pTableBufferSize[0] / sizeof(short)
-                    / pC->ewc.uiNbChannels;
-
-                /**
-                * Prepare output buffer */
-                pEncOutBuffer.pTableBuffer[0] =
-                    (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
-                pEncOutBuffer.pTableBufferSize[0] = 0;
-
-                M4OSA_TRACE2_0("N **** blend AUs");
-
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-                /*OMX Audio decoder used.
-                * OMX Audio dec shell does internal buffering and hence does not return
-                a PCM buffer for every decodeStep call.*
-                * So PCM buffer sizes might be 0. In this case donot call encode Step*/
-
-                if( 0 != pEncInBuffer.pTableBufferSize[0] )
-                {
-
-#endif
-
-                    /**
-                    * Encode the PCM audio */
-
-                    err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
-                        pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
-
-                    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepAudio():\
-                            pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
-                            err);
-                        return err;
-                    }
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-                }
-
-#endif
-
-                /**
-                * Set AU cts and size */
-
-                pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
-                    0]; /**< Get the size of encoded data */
-                    pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
-
-                    M4OSA_TRACE2_2("O ---- write : cts  = %ld [ 0x%x ]",
-                        (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
-                        pC->ewc.WriterAudioAU.size);
-
-                    /**
-                    * Write the AU */
-                    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
-                        pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
-                        &pC->ewc.WriterAudioAU);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        /*11/12/2008 CR 3283 MMS use case for VideoArtist
-                        the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
-                         file size is reached
-                        The editing is then finished,the warning M4VSS3GPP_WAR_EDITING_DONE
-                        is returned*/
-                        if( M4WAR_WRITER_STOP_REQ == err )
-                        {
-                            M4OSA_TRACE1_0(
-                                "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
-                            return M4VSS3GPP_WAR_EDITING_DONE;
-                        }
-                        else
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepAudio: TRANSITION:\
-                                pWriterDataFcts->pProcessAU returns 0x%x!",
-                                err);
-                            return err;
-                        }
-                    }
-
-                    /**
-                    * Read the next audio frame */
-                    err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
-
-                    M4OSA_TRACE2_3("P .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                        pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
-                        pC->pC1->iAoffset / pC->pC1->scale_audio,
-                        pC->pC1->uiAudioFrameSize);
-
-                    if( M4OSA_ERR_IS_ERROR(err) )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepAudio: TRANSITION:\
-                            M4VSS3GPP_intClipReadNextAudioFrame(C1) returns 0x%x!",
-                            err);
-                        return err;
-                    }
-                    else
-                    {
-                        M4OSA_ERR secondaryError;
-
-                        /**
-                        * Update current time (to=tc+T) */
-                        pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
-                            / pC->pC1->scale_audio;
-
-                        /**
-                        * Read the next audio frame in the second clip */
-                        secondaryError = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC2);
-
-                        M4OSA_TRACE2_3("Q .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                            pC->pC2->iAudioFrameCts / pC->pC2->scale_audio,
-                            pC->pC2->iAoffset / pC->pC2->scale_audio,
-                            pC->pC2->uiAudioFrameSize);
-
-                        if( M4OSA_ERR_IS_ERROR(secondaryError) )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepAudio: TRANSITION:\
-                                M4VSS3GPP_intClipReadNextAudioFrame(C2) returns 0x%x!",
-                                secondaryError);
-                            return err;
-                        }
-
-                        if( ( ( M4WAR_NO_MORE_AU == err)
-                            || (M4WAR_NO_MORE_AU == secondaryError))
-                            && (M4OSA_FALSE == pC->bSupportSilence) )
-                        {
-                            /**
-                            * If output is other than AMR or AAC
-                              (i.e. EVRC,we can't write silence into it)
-                            * So we simply end here.*/
-                            bStopAudio = M4OSA_TRUE;
-                        }
-                    }
-            }
-            break;
-
-            /* ____________ */
-            /*|            |*/
-            /*| ERROR CASE |*/
-            /*|____________|*/
-
-        default:
-
-            M4OSA_TRACE3_1(
-                "M4VSS3GPP_intEditStepAudio: invalid internal state (0x%x), \
-                returning M4VSS3GPP_ERR_INTERNAL_STATE",
-                pC->Astate);
-            return M4VSS3GPP_ERR_INTERNAL_STATE;
-    }
-
-    /**
-    * Check if we are forced to stop audio */
-    if( M4OSA_TRUE == bStopAudio )
-    {
-        /**
-        * Audio is done for this clip */
-        err = M4VSS3GPP_intReachedEndOfAudio(pC);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCheckAudioMode()
- * @brief    Check which audio process mode we must use, depending on the output CTS.
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intCheckAudioMode( M4VSS3GPP_InternalEditContext
-                                             *pC )
-{
-    M4OSA_ERR err;
-    const M4OSA_Int32 TD = pC->pTransitionList[pC->
-        uiCurrentClip].uiTransitionDuration; /**< Transition duration */
-
-    const M4VSS3GPP_EditAudioState previousAstate = pC->Astate;
-
-    /**
-    * Check if Clip1 is on its begin cut, or in its begin effect or end effect zone */
-    M4VSS3GPP_intCheckAudioEffects(pC, 1);
-
-    /**
-    * Check if we are in the transition with next clip */
-    if( ( TD > 0) && ((M4OSA_Int32)(pC->ewc.dATo - pC->pC1->iAoffset
-        / pC->pC1->scale_audio + 0.5) >= (pC->pC1->iEndTime - TD)) )
-    {
-        /**
-        * We are in a transition */
-        pC->Astate = M4VSS3GPP_kEditAudioState_TRANSITION;
-        pC->bTransitionEffect = M4OSA_TRUE;
-
-        /**
-        * Do we enter the transition section ? */
-        if( M4VSS3GPP_kEditAudioState_TRANSITION != previousAstate )
-        {
-            /**
-            * Open second clip for transition, if not yet opened */
-            if( M4OSA_NULL == pC->pC2 )
-            {
-                err = M4VSS3GPP_intOpenClip(pC, &pC->pC2,
-                    &pC->pClipList[pC->uiCurrentClip + 1]);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intCheckAudioMode: M4VSS3GPP_intOpenClip() returns 0x%x!",
-                        err);
-                    return err;
-                }
-
-                /**
-                * In case of short transition and bad luck (...), there may be no video AU
-                * in the transition. In that case, the second clip has not been opened.
-                * So we must update the video offset here. */
-                // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                /**< Add current video output CTS to the clip offset */
-                pC->pC2->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
-            }
-
-            /**
-            * Add current audio output CTS to the clip offset
-            * (video offset has already been set when doing the video transition) */
-            pC->pC2->iAoffset +=
-                (M4OSA_UInt32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
-
-            /**
-            * 2005-03-24: BugFix for audio-video synchro:
-            * There may be a portion of the duration of an audio AU of desynchro at each assembly.
-            * It leads to an audible desynchro when there are a lot of clips assembled.
-            * This bug fix allows to resynch the audio track when the delta is higher
-            * than one audio AU duration.
-            * We Step one AU in the second clip and we change the audio offset accordingly. */
-            if( ( pC->pC2->iAoffset
-                - (M4OSA_Int32)(pC->pC2->iVoffset *pC->pC2->scale_audio + 0.5))
-                    > pC->ewc.iSilenceFrameDuration )
-            {
-                /**
-                * Advance one AMR frame */
-                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC2);
-
-                M4OSA_TRACE2_3("Z .... read  : cts  = %.0f + %.0f [ 0x%x ]",
-                    pC->pC2->iAudioFrameCts / pC->pC2->scale_audio,
-                    pC->pC2->iAoffset / pC->pC2->scale_audio,
-                    pC->pC2->uiAudioFrameSize);
-
-                if( M4OSA_ERR_IS_ERROR(err) )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intCheckAudioMode:\
-                        M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
-                        err);
-                    return err;
-                }
-                /**
-                * Update audio offset accordingly*/
-                pC->pC2->iAoffset -= pC->ewc.iSilenceFrameDuration;
-            }
-        }
-
-        /**
-        * Check begin and end effects for clip2 */
-        M4VSS3GPP_intCheckAudioEffects(pC, 2);
-    }
-    else
-    {
-        /**
-        * We are not in a transition */
-        pC->bTransitionEffect = M4OSA_FALSE;
-
-        /**
-        * Check if current mode is Read/Write or Decode/Encode */
-        if( pC->iClip1ActiveEffect >= 0 )
-        {
-            pC->Astate = M4VSS3GPP_kEditAudioState_DECODE_ENCODE;
-        }
-        else
-        {
-            pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
-        }
-    }
-
-    /**
-    * Check if we create/destroy an encoder */
-    if( ( M4VSS3GPP_kEditAudioState_READ_WRITE == previousAstate)
-        && /**< read mode */
-        (M4VSS3GPP_kEditAudioState_READ_WRITE != pC->Astate) ) /**< encode mode */
-    {
-        M4OSA_UInt32 uiAudioBitrate;
-
-        /* Compute max bitrate depending on input files bitrates and transitions */
-        if( pC->Astate == M4VSS3GPP_kEditAudioState_TRANSITION )
-        {
-            /* Max of the two blended files */
-            if( pC->pC1->pSettings->ClipProperties.uiAudioBitrate
-                > pC->pC2->pSettings->ClipProperties.uiAudioBitrate )
-                uiAudioBitrate =
-                pC->pC1->pSettings->ClipProperties.uiAudioBitrate;
-            else
-                uiAudioBitrate =
-                pC->pC2->pSettings->ClipProperties.uiAudioBitrate;
-        }
-        else
-        {
-            /* Same as input file */
-            uiAudioBitrate = pC->pC1->pSettings->ClipProperties.uiAudioBitrate;
-        }
-
-        /**
-        * Create the encoder */
-        err = M4VSS3GPP_intCreateAudioEncoder(&pC->ewc, &pC->ShellAPI,
-            uiAudioBitrate);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCheckAudioMode: M4VSS3GPP_intResetAudioEncoder() returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intCheckAudioMode(): returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_Void M4VSS3GPP_intCheckAudioEffects()
- * @brief    Check which audio effect must be applied at the current time
- ******************************************************************************
- */
-static M4OSA_Void M4VSS3GPP_intCheckAudioEffects( M4VSS3GPP_InternalEditContext
-                                                 *pC, M4OSA_UInt8 uiClipNumber )
-{
-    M4OSA_UInt8 uiClipIndex;
-    M4OSA_UInt8 uiFxIndex;
-    M4VSS3GPP_ClipContext *pClip;
-    M4VSS3GPP_EffectSettings *pFx;
-    M4OSA_Int32 BC, EC;
-    M4OSA_Int8 *piClipActiveEffect;
-    M4OSA_Int32 t;
-
-    if( 1 == uiClipNumber )
-    {
-        uiClipIndex = pC->uiCurrentClip;
-        pClip = pC->pC1;
-        piClipActiveEffect = &(pC->iClip1ActiveEffect);
-    }
-    else /**< (2 == uiClipNumber) */
-    {
-        uiClipIndex = pC->uiCurrentClip + 1;
-        pClip = pC->pC2;
-        piClipActiveEffect = &(pC->iClip2ActiveEffect);
-    }
-
-    /**
-    * Shortcuts for code readability */
-    BC = pClip->iActualAudioBeginCut;
-    EC = pClip->iEndTime;
-
-    /**
-    Change the absolut time to clip related time
-     RC t = (M4OSA_Int32)(pC->ewc.dATo - pClip->iAoffset/pClip->scale_audio + 0.5);
-    < rounding */;
-    t = (M4OSA_Int32)(pC->ewc.dATo/*- pClip->iAoffset/pClip->scale_audio*/
-        + 0.5); /**< rounding */
-    ;
-
-    /**
-    * Default: no effect active */
-    *piClipActiveEffect = -1;
-
-    /**
-    * Check the three effects */
-    // RC    for (uiFxIndex=0; uiFxIndex<pC->pClipList[uiClipIndex].nbEffects; uiFxIndex++)
-    for ( uiFxIndex = 0; uiFxIndex < pC->nbEffects; uiFxIndex++ )
-    {
-        /** Shortcut, reverse order because of priority between effects
-        ( EndEffect always clean ) */
-        pFx = &(pC->pEffectsList[pC->nbEffects - 1 - uiFxIndex]);
-
-        if( M4VSS3GPP_kAudioEffectType_None != pFx->AudioEffectType )
-        {
-            /**
-            * Check if there is actually a video effect */
-            if( ( t >= (M4OSA_Int32)(/*BC +*/pFx->uiStartTime))
-                && /**< Are we after the start time of the effect? */
-                (t < (M4OSA_Int32)(/*BC +*/pFx->uiStartTime + pFx->
-                uiDuration)) ) /**< Are we into the effect duration? */
-            {
-                /**
-                * Set the active effect */
-                *piClipActiveEffect = pC->nbEffects - 1 - uiFxIndex;
-
-                /**
-                * The first effect has the highest priority, then the second one,
-                  then the thirs one.
-                * Hence, as soon as we found an active effect, we can get out of this loop */
-                uiFxIndex = pC->nbEffects; /** get out of the for loop */
-            }
-            /**
-            * Bugfix: The duration of the end effect has been set according to the
-                      announced clip duration.
-            * If the announced duration is smaller than the real one, the end effect
-                      won't be applied at
-            * the very end of the clip. To solve this issue we force the end effect. */
-
-        }
-    }
-
-    return;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intApplyAudioEffect()
- * @brief    Apply audio effect to pPCMdata
- * @param   pC            (IN/OUT) Internal edit context
- * @param   uiClip1orClip2    (IN/OUT) 1 for first clip, 2 for second clip
- * @param    pPCMdata    (IN/OUT) Input and Output PCM audio data
- * @param    uiPCMsize    (IN)     Size of pPCMdata
- * @return    M4NO_ERROR:                        No error
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intApplyAudioEffect( M4VSS3GPP_InternalEditContext
-                                               *pC, M4OSA_UInt8 uiClip1orClip2,
-                                               M4OSA_Int16 *pPCMdata,
-                                               M4OSA_UInt32 uiPCMsize )
-{
-    M4VSS3GPP_ClipContext *pClip;
-    M4VSS3GPP_ClipSettings *pClipSettings;
-    M4VSS3GPP_EffectSettings *pFx;
-    M4OSA_Int32
-        i32sample; /**< we will cast each Int16 sample into this Int32 variable */
-    M4OSA_Int32 iPos;
-    M4OSA_Int32 iDur;
-
-    M4OSA_DEBUG_IF2(( 1 != uiClip1orClip2) && (2 != uiClip1orClip2),
-        M4ERR_PARAMETER,
-        "M4VSS3GPP_intBeginAudioEffect: uiClip1orClip2 invalid");
-
-    if( 1 == uiClip1orClip2 )
-    {
-        pClip = pC->pC1;
-        pClipSettings = &(pC->pClipList[pC->
-            uiCurrentClip]); /**< Get a shortcut to the clip settings */
-        // RC        pFx = &(pClipSettings->Effects[pC->iClip1ActiveEffect]);/**< Get a shortcut
-        //                                                                to the active effect */
-        pFx = &(pC->
-            pEffectsList[pC->
-            iClip1ActiveEffect]); /**< Get a shortcut to the active effect */
-        M4OSA_DEBUG_IF2(( pC->iClip1ActiveEffect < 0)
-            || (pC->iClip1ActiveEffect > 2), M4ERR_PARAMETER,
-            "M4VSS3GPP_intApplyAudioEffect: iClip1ActiveEffect invalid");
-    }
-    else /**< if (2==uiClip1orClip2) */
-    {
-        pClip = pC->pC2;
-        pClipSettings = &(pC->pClipList[pC->uiCurrentClip
-            + 1]); /**< Get a shortcut to the clip settings */
-        // RC        pFx = &(pClipSettings->Effects[pC->iClip2ActiveEffect]);/**< Get a shortcut
-        //                                                                to the active effect */
-        pFx = &(pC->
-            pEffectsList[pC->
-            iClip2ActiveEffect]); /**< Get a shortcut to the active effect */
-        M4OSA_DEBUG_IF2(( pC->iClip2ActiveEffect < 0)
-            || (pC->iClip2ActiveEffect > 2), M4ERR_PARAMETER,
-            "M4VSS3GPP_intApplyAudioEffect: iClip2ActiveEffect invalid");
-    }
-
-    iDur = (M4OSA_Int32)pFx->uiDuration;
-
-    /**
-    * Compute how far from the beginning of the effect we are, in clip-base time.
-    * It is done with integers because the offset and begin cut have been rounded already. */
-    iPos =
-        (M4OSA_Int32)(pC->ewc.dATo + 0.5 - pClip->iAoffset / pClip->scale_audio)
-        - pClip->iActualAudioBeginCut - pFx->uiStartTime;
-
-    /**
-    * Sanity check */
-    if( iPos > iDur )
-    {
-        iPos = iDur;
-    }
-    else if( iPos < 0 )
-    {
-        iPos = 0;
-    }
-
-    /**
-    * At this point, iPos is the effect progress, in a 0 to iDur base */
-    switch( pFx->AudioEffectType )
-    {
-        case M4VSS3GPP_kAudioEffectType_FadeIn:
-
-            /**
-            * Original samples are signed 16bits.
-            * We convert it to signed 32bits and multiply it by iPos.
-            * So we must assure that iPos is not higher that 16bits max.
-            * iPos max value is iDur, so we test iDur. */
-            while( iDur > PWR_FXP_FRACT_MAX )
-            {
-                iDur >>=
-                    2; /**< divide by 2 would be more logical (instead of 4),
-                       but we have enough dynamic..) */
-                iPos >>= 2; /**< idem */
-            }
-
-            /**
-            * From buffer size (bytes) to number of sample (int16): divide by two */
-            uiPCMsize >>= 1;
-
-            /**
-            * Loop on samples */
-            while( uiPCMsize-- > 0 ) /**< decrementing to optimize */
-            {
-                i32sample = *pPCMdata;
-                i32sample *= iPos;
-                i32sample /= iDur;
-                *pPCMdata++ = (M4OSA_Int16)i32sample;
-            }
-
-            break;
-
-        case M4VSS3GPP_kAudioEffectType_FadeOut:
-
-            /**
-            * switch from 0->Dur to Dur->0 in order to do fadeOUT instead of fadeIN */
-            iPos = iDur - iPos;
-
-            /**
-            * Original samples are signed 16bits.
-            * We convert it to signed 32bits and multiply it by iPos.
-            * So we must assure that iPos is not higher that 16bits max.
-            * iPos max value is iDur, so we test iDur. */
-            while( iDur > PWR_FXP_FRACT_MAX )
-            {
-                iDur >>=
-                    2; /**< divide by 2 would be more logical (instead of 4),
-                       but we have enough dynamic..) */
-                iPos >>= 2; /**< idem */
-            }
-
-            /**
-            * From buffer size (bytes) to number of sample (int16): divide by two */
-            uiPCMsize >>= 1;
-
-            /**
-            * Loop on samples, apply the fade factor on each */
-            while( uiPCMsize-- > 0 ) /**< decrementing counter to optimize */
-            {
-                i32sample = *pPCMdata;
-                i32sample *= iPos;
-                i32sample /= iDur;
-                *pPCMdata++ = (M4OSA_Int16)i32sample;
-            }
-
-            break;
-
-        default:
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intApplyAudioEffect: unknown audio effect type (0x%x),\
-                returning M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE",
-                pFx->AudioEffectType);
-            return M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE;
-    }
-
-    /**
-    *    Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intApplyAudioEffect: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAudioTransition()
- * @brief    Apply transition effect to two PCM buffer
- * @note    The result of the transition is put in the first buffer.
- *          I know it's not beautiful, but it fits my current needs, and it's efficient!
- *          So why bother with a third output buffer?
- * @param   pC            (IN/OUT) Internal edit context
- * @param    pPCMdata1    (IN/OUT) First input and Output PCM audio data
- * @param    pPCMdata2    (IN) Second input PCM audio data
- * @param    uiPCMsize    (IN) Size of both PCM buffers
- * @return    M4NO_ERROR:                        No error
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAudioTransition( M4VSS3GPP_InternalEditContext
-                                              *pC, M4OSA_Int16 *pPCMdata1,
-                                              M4OSA_Int16 *pPCMdata2,
-                                              M4OSA_UInt32 uiPCMsize )
-{
-    M4OSA_Int32 i32sample1,
-        i32sample2; /**< we will cast each Int16 sample into this Int32 variable */
-    M4OSA_Int32 iPos1, iPos2;
-    M4OSA_Int32 iDur = (M4OSA_Int32)pC->
-        pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
-
-    /**
-    * Compute how far from the end cut we are, in clip-base time.
-    * It is done with integers because the offset and begin cut have been rounded already. */
-    iPos1 = pC->pC1->iEndTime - (M4OSA_Int32)(pC->ewc.dATo
-        + 0.5 - pC->pC1->iAoffset / pC->pC1->scale_audio);
-
-    /**
-    * Sanity check */
-    if( iPos1 > iDur )
-    {
-        iPos1 = iDur;
-    }
-    else if( iPos1 < 0 )
-    {
-        iPos1 = 0;
-    }
-
-    /**
-    * Position of second clip in the transition */
-    iPos2 = iDur - iPos1;
-
-    /**
-    * At this point, iPos2 is the transition progress, in a 0 to iDur base.
-    * iPos1 is the transition progress, in a iDUr to 0 base. */
-    switch( pC->pTransitionList[pC->uiCurrentClip].AudioTransitionType )
-    {
-        case M4VSS3GPP_kAudioTransitionType_CrossFade:
-
-            /**
-            * Original samples are signed 16bits.
-            * We convert it to signed 32bits and multiply it by iPos.
-            * So we must assure that iPos is not higher that 16bits max.
-            * iPos max value is iDur, so we test iDur. */
-            while( iDur > PWR_FXP_FRACT_MAX )
-            {
-                iDur >>=
-                    2; /**< divide by 2 would be more logical (instead of 4),
-                       but we have enough dynamic..) */
-                iPos1 >>= 2; /**< idem */
-                iPos2 >>= 2; /**< idem */
-            }
-
-            /**
-            * From buffer size (bytes) to number of sample (int16): divide by two */
-            uiPCMsize >>= 1;
-
-            /**
-            * Loop on samples, apply the fade factor on each */
-            while( uiPCMsize-- > 0 ) /**< decrementing counter to optimize */
-            {
-                i32sample1 = *pPCMdata1; /**< Get clip1 sample */
-                i32sample1 *= iPos1;     /**< multiply by fade numerator */
-                i32sample1 /= iDur;      /**< divide by fade denominator */
-
-                i32sample2 = *pPCMdata2; /**< Get clip2 sample */
-                i32sample2 *= iPos2;     /**< multiply by fade numerator */
-                i32sample2 /= iDur;      /**< divide by fade denominator */
-
-                *pPCMdata1++ = (M4OSA_Int16)(i32sample1
-                    + i32sample2); /**< mix the two samples */
-                pPCMdata2++; /**< don't forget to increment the second buffer */
-            }
-            break;
-
-        case M4VSS3GPP_kAudioTransitionType_None:
-            /**
-            * This is a stupid-non optimized version of the None transition...
-            * We copy the PCM frames */
-            if( iPos1 < (iDur >> 1) ) /**< second half of transition */
-            {
-                /**
-                * Copy the input PCM to the output buffer */
-                memcpy((void *)pPCMdata1,
-                    (void *)pPCMdata2, uiPCMsize);
-            }
-            /**
-            * the output must be put in the first buffer.
-            * For the first half of the non-transition it's already the case!
-            * So we have nothing to do here...
-            */
-
-            break;
-
-        default:
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intAudioTransition: unknown transition type (0x%x),\
-                returning M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE",
-                pC->pTransitionList[pC->uiCurrentClip].AudioTransitionType);
-            return M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE;
-    }
-
-    /**
-    *    Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intAudioTransition: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder()
- * @brief    Reset the audio encoder (Create it if needed)
- * @note
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder( M4VSS3GPP_EncodeWriteContext *pC_ewc,
-                                          M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
-                                          M4OSA_UInt32 uiAudioBitrate )
-{
-    M4OSA_ERR err;
-
-    /**
-    * If an encoder already exist, we destroy it */
-    if( M4OSA_NULL != pC_ewc->pAudioEncCtxt )
-    {
-        err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctClose(
-            pC_ewc->pAudioEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
-                err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctCleanUp(
-            pC_ewc->pAudioEncCtxt);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intResetAudioEncoder:\
-                pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",    err);
-            /**< don't return, we still have stuff to free */
-        }
-
-        pC_ewc->pAudioEncCtxt = M4OSA_NULL;
-    }
-
-    /**
-    * Creates a new encoder  */
-    switch( pC_ewc->AudioStreamType )
-    {
-            //EVRC
-            //        case M4SYS_kEVRC:
-            //
-            //            err = M4VSS3GPP_setCurrentAudioEncoder(&pC->ShellAPI,
-            //                                                   pC_ewc->AudioStreamType);
-            //            M4ERR_CHECK_RETURN(err);
-            //
-            //            pC_ewc->AudioEncParams.Format = M4ENCODER_kEVRC;
-            //            pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
-            //            pC_ewc->AudioEncParams.ChannelNum = M4ENCODER_kMono;
-            //            pC_ewc->AudioEncParams.Bitrate = M4VSS3GPP_EVRC_DEFAULT_BITRATE;
-            //            break;
-
-        case M4SYS_kAMR:
-
-            err = M4VSS3GPP_setCurrentAudioEncoder(pC_ShellAPI,
-                pC_ewc->AudioStreamType);
-            M4ERR_CHECK_RETURN(err);
-
-            pC_ewc->AudioEncParams.Format = M4ENCODER_kAMRNB;
-            pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
-            pC_ewc->AudioEncParams.ChannelNum = M4ENCODER_kMono;
-            pC_ewc->AudioEncParams.Bitrate = M4VSS3GPP_AMR_DEFAULT_BITRATE;
-            pC_ewc->AudioEncParams.SpecifParam.AmrSID = M4ENCODER_kAmrNoSID;
-            break;
-
-        case M4SYS_kAAC:
-
-            err = M4VSS3GPP_setCurrentAudioEncoder(pC_ShellAPI,
-                pC_ewc->AudioStreamType);
-            M4ERR_CHECK_RETURN(err);
-
-            pC_ewc->AudioEncParams.Format = M4ENCODER_kAAC;
-
-            switch( pC_ewc->uiSamplingFrequency )
-            {
-                case 8000:
-                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
-                    break;
-
-                case 16000:
-                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
-                    break;
-
-                case 22050:
-                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k22050Hz;
-                    break;
-
-                case 24000:
-                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k24000Hz;
-                    break;
-
-                case 32000:
-                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k32000Hz;
-                    break;
-
-                case 44100:
-                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k44100Hz;
-                    break;
-
-                case 48000:
-                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k48000Hz;
-                    break;
-
-                default:
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intCreateAudioEncoder: invalid input AAC sampling frequency\
-                        (%d Hz), returning M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED",
-                        pC_ewc->uiSamplingFrequency);
-                    return M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED;
-            }
-            pC_ewc->AudioEncParams.ChannelNum = (pC_ewc->uiNbChannels == 1)
-                ? M4ENCODER_kMono : M4ENCODER_kStereo;
-            pC_ewc->AudioEncParams.SpecifParam.AacParam.Regulation =
-                M4ENCODER_kAacRegulNone; //M4ENCODER_kAacBitReservoir
-            /* unused */
-            pC_ewc->AudioEncParams.SpecifParam.AacParam.bIS = M4OSA_FALSE;
-            pC_ewc->AudioEncParams.SpecifParam.AacParam.bMS = M4OSA_FALSE;
-            pC_ewc->AudioEncParams.SpecifParam.AacParam.bPNS = M4OSA_FALSE;
-            pC_ewc->AudioEncParams.SpecifParam.AacParam.bTNS = M4OSA_FALSE;
-            /* TODO change into highspeed asap */
-            pC_ewc->AudioEncParams.SpecifParam.AacParam.bHighSpeed =
-                M4OSA_FALSE;
-
-            /* Quantify value (ceil one) */
-            if( uiAudioBitrate <= 16000 )
-                pC_ewc->AudioEncParams.Bitrate = 16000;
-
-            else if( uiAudioBitrate <= 24000 )
-                pC_ewc->AudioEncParams.Bitrate = 24000;
-
-            else if( uiAudioBitrate <= 32000 )
-                pC_ewc->AudioEncParams.Bitrate = 32000;
-
-            else if( uiAudioBitrate <= 48000 )
-                pC_ewc->AudioEncParams.Bitrate = 48000;
-
-            else if( uiAudioBitrate <= 64000 )
-                pC_ewc->AudioEncParams.Bitrate = 64000;
-
-            else
-                pC_ewc->AudioEncParams.Bitrate = 96000;
-
-            /* Special requirement of our encoder */
-            if( ( pC_ewc->uiNbChannels == 2)
-                && (pC_ewc->AudioEncParams.Bitrate < 32000) )
-                pC_ewc->AudioEncParams.Bitrate = 32000;
-
-            break;
-
-        default:
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intResetAudioEncoder: Undefined output audio format (%d),\
-                returning M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT",
-                pC_ewc->AudioStreamType);
-            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
-    }
-
-    /* Initialise the audio encoder */
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-    M4OSA_TRACE3_1(
-        "M4VSS3GPP_intResetAudioEncoder:\
-        pAudioEncoderGlobalFcts->pFctInit called with userdata 0x%x",
-        pC_ShellAPI->pCurrentAudioEncoderUserData);
-    err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctInit(&pC_ewc->pAudioEncCtxt,
-        pC_ShellAPI->pCurrentAudioEncoderUserData);
-
-#else
-
-    err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctInit(&pC_ewc->pAudioEncCtxt,
-        M4OSA_NULL /* no HW encoder */);
-
-#endif
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctInit returns 0x%x",
-            err);
-        return err;
-    }
-
-    /* Open the audio encoder */
-    err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctOpen(pC_ewc->pAudioEncCtxt,
-        &pC_ewc->AudioEncParams, &pC_ewc->pAudioEncDSI,
-        M4OSA_NULL /* no grabbing */);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctOpen returns 0x%x",
-            err);
-        return err;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intResetAudioEncoder: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
deleted file mode 100755
index f19f412..0000000
--- a/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
+++ /dev/null
@@ -1,3955 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4VSS3GPP_EditVideo.c
- * @brief    Video Studio Service 3GPP edit API implementation.
- * @note
- ******************************************************************************
- */
-#undef M4OSA_TRACE_LEVEL
-#define M4OSA_TRACE_LEVEL 1
-
-/****************/
-/*** Includes ***/
-/****************/
-
-#include "NXPSW_CompilerSwitches.h"
-/**
- * Our header */
-#include "M4VSS3GPP_API.h"
-#include "M4VSS3GPP_InternalTypes.h"
-#include "M4VSS3GPP_InternalFunctions.h"
-#include "M4VSS3GPP_InternalConfig.h"
-#include "M4VSS3GPP_ErrorCodes.h"
-
-// StageFright encoders require %16 resolution
-#include "M4ENCODER_common.h"
-/**
- * OSAL headers */
-#include "M4OSA_Memory.h" /**< OSAL memory management */
-#include "M4OSA_Debug.h"  /**< OSAL debug management */
-
-/**
- * component includes */
-#include "M4VFL_transition.h" /**< video effects */
-
-/*for transition behaviour*/
-#include <math.h>
-#include "M4AIR_API.h"
-#include "M4VSS3GPP_Extended_API.h"
-/** Determine absolute value of a. */
-#define M4xVSS_ABS(a) ( ( (a) < (0) ) ? (-(a)) : (a) )
-#define Y_PLANE_BORDER_VALUE    0x00
-#define U_PLANE_BORDER_VALUE    0x80
-#define V_PLANE_BORDER_VALUE    0x80
-
-/************************************************************************/
-/* Static local functions                                               */
-/************************************************************************/
-
-static M4OSA_ERR M4VSS3GPP_intCheckVideoMode(
-    M4VSS3GPP_InternalEditContext *pC );
-static M4OSA_Void
-M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC,
-                               M4OSA_UInt8 uiClipNumber );
-static M4OSA_ERR M4VSS3GPP_intApplyVideoEffect(
-          M4VSS3GPP_InternalEditContext *pC, M4VIFI_ImagePlane *pPlaneIn,
-          M4VIFI_ImagePlane *pPlaneOut, M4OSA_Bool bSkipFramingEffect);
-
-static M4OSA_ERR
-M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC,
-                             M4VIFI_ImagePlane *pPlaneOut );
-
-static M4OSA_Void
-M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC,
-                            M4SYS_AccessUnit *pAU );
-static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer,
-                                                  M4OSA_UInt8 uiCts );
-static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
-                                           M4OSA_UInt32 uiCtsSec );
-static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
-                                           M4OSA_UInt32 *pCtsSec );
-static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes,
-                                             M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight );
-static M4OSA_ERR M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420(
-          M4OSA_Void* pFileIn, M4OSA_FileReadPointer* pFileReadPtr,
-          M4VIFI_ImagePlane* pImagePlanes,
-          M4OSA_UInt32 width,M4OSA_UInt32 height);
-static M4OSA_ERR M4VSS3GPP_intApplyRenderingMode(
-          M4VSS3GPP_InternalEditContext *pC,
-          M4xVSS_MediaRendering renderingMode,
-          M4VIFI_ImagePlane* pInplane,
-          M4VIFI_ImagePlane* pOutplane);
-
-static M4OSA_ERR M4VSS3GPP_intSetYuv420PlaneFromARGB888 (
-                                        M4VSS3GPP_InternalEditContext *pC,
-                                        M4VSS3GPP_ClipContext* pClipCtxt);
-static M4OSA_ERR M4VSS3GPP_intRenderFrameWithEffect(
-                                             M4VSS3GPP_InternalEditContext *pC,
-                                             M4VSS3GPP_ClipContext* pClipCtxt,
-                                             M4_MediaTime ts,
-                                             M4OSA_Bool bIsClip1,
-                                             M4VIFI_ImagePlane *pResizePlane,
-                                             M4VIFI_ImagePlane *pPlaneNoResize,
-                                             M4VIFI_ImagePlane *pPlaneOut);
-
-static M4OSA_ERR M4VSS3GPP_intRotateVideo(M4VIFI_ImagePlane* pPlaneIn,
-                                      M4OSA_UInt32 rotationDegree);
-
-static M4OSA_ERR M4VSS3GPP_intSetYUV420Plane(M4VIFI_ImagePlane* planeIn,
-                                      M4OSA_UInt32 width, M4OSA_UInt32 height);
-
-static M4OSA_ERR M4VSS3GPP_intApplyVideoOverlay (
-                                      M4VSS3GPP_InternalEditContext *pC,
-                                      M4VIFI_ImagePlane *pPlaneIn,
-                                      M4VIFI_ImagePlane *pPlaneOut);
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intEditStepVideo()
- * @brief    One step of video processing
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intEditStepVideo( M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_ERR err;
-    M4OSA_Int32 iCts, iNextCts;
-    M4ENCODER_FrameMode FrameMode;
-    M4OSA_Bool bSkipFrame;
-    M4OSA_UInt16 offset;
-
-    /**
-     * Check if we reached end cut. Decorrelate input and output encoding
-     * timestamp to handle encoder prefetch
-     */
-    if ( ((M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset
-        + pC->iInOutTimeOffset) >= pC->pC1->iEndTime )
-    {
-        /* Re-adjust video to precise cut time */
-        pC->iInOutTimeOffset = ((M4OSA_Int32)(pC->ewc.dInputVidCts))
-            - pC->pC1->iVoffset + pC->iInOutTimeOffset - pC->pC1->iEndTime;
-        if ( pC->iInOutTimeOffset < 0 ) {
-            pC->iInOutTimeOffset = 0;
-        }
-
-        /**
-        * Video is done for this clip */
-        err = M4VSS3GPP_intReachedEndOfVideo(pC);
-
-        /* RC: to know when a file has been processed */
-        if (M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP)
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intReachedEndOfVideo returns 0x%x",
-                err);
-        }
-
-        return err;
-    }
-
-    /* Don't change the states if we are in decodeUpTo() */
-    if ( (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus)
-        && (( pC->pC2 == M4OSA_NULL)
-        || (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus)) )
-    {
-        /**
-        * Check Video Mode, depending on the current output CTS */
-        err = M4VSS3GPP_intCheckVideoMode(
-            pC); /**< This function change the pC->Vstate variable! */
-
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intCheckVideoMode returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-
-
-    switch( pC->Vstate )
-    {
-        /* _________________ */
-        /*|                 |*/
-        /*| READ_WRITE MODE |*/
-        /*|_________________|*/
-
-        case M4VSS3GPP_kEditVideoState_READ_WRITE:
-        case M4VSS3GPP_kEditVideoState_AFTER_CUT:
-            {
-                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo READ_WRITE");
-
-                bSkipFrame = M4OSA_FALSE;
-
-                /**
-                * If we were decoding the clip, we must jump to be sure
-                * to get to the good position. */
-                if( M4VSS3GPP_kClipStatus_READ != pC->pC1->Vstatus )
-                {
-                    /**
-                    * Jump to target video time (tc = to-T) */
-                // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                iCts = (M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset;
-                    err = pC->pC1->ShellAPI.m_pReader->m_pFctJump(
-                        pC->pC1->pReaderContext,
-                        (M4_StreamHandler *)pC->pC1->pVideoStream, &iCts);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepVideo:\
-                            READ_WRITE: m_pReader->m_pFctJump(V1) returns 0x%x!",
-                            err);
-                        return err;
-                    }
-
-                    err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
-                        pC->pC1->pReaderContext,
-                        (M4_StreamHandler *)pC->pC1->pVideoStream,
-                        &pC->pC1->VideoAU);
-
-                    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepVideo:\
-                            READ_WRITE: m_pReader->m_pFctGetNextAu returns 0x%x!",
-                            err);
-                        return err;
-                    }
-
-                    M4OSA_TRACE2_3("A .... read  : cts  = %.0f + %ld [ 0x%x ]",
-                        pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
-                        pC->pC1->VideoAU.m_size);
-
-                    /* This frame has been already written in BEGIN CUT step -> skip it */
-                    if( pC->pC1->VideoAU.m_CTS == iCts
-                        && pC->pC1->iVideoRenderCts >= iCts )
-                    {
-                        bSkipFrame = M4OSA_TRUE;
-                    }
-                }
-
-                /* This frame has been already written in BEGIN CUT step -> skip it */
-                if( ( pC->Vstate == M4VSS3GPP_kEditVideoState_AFTER_CUT)
-                    && (pC->pC1->VideoAU.m_CTS
-                    + pC->pC1->iVoffset <= pC->ewc.WriterVideoAU.CTS) )
-                {
-                    bSkipFrame = M4OSA_TRUE;
-                }
-
-                /**
-                * Remember the clip reading state */
-                pC->pC1->Vstatus = M4VSS3GPP_kClipStatus_READ;
-                // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                // Rounding is to compensate reader imprecision (m_CTS is actually an integer)
-                iCts = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pC->pC1->iVoffset - 1;
-                iNextCts = iCts + ((M4OSA_Int32)pC->dOutputFrameDuration) + 1;
-                /* Avoid to write a last frame of duration 0 */
-                if( iNextCts > pC->pC1->iEndTime )
-                    iNextCts = pC->pC1->iEndTime;
-
-                /**
-                * If the AU is good to be written, write it, else just skip it */
-                if( ( M4OSA_FALSE == bSkipFrame)
-                    && (( pC->pC1->VideoAU.m_CTS >= iCts)
-                    && (pC->pC1->VideoAU.m_CTS < iNextCts)
-                    && (pC->pC1->VideoAU.m_size > 0)) )
-                {
-                    /**
-                    * Get the output AU to write into */
-                    err = pC->ShellAPI.pWriterDataFcts->pStartAU(
-                        pC->ewc.p3gpWriterContext,
-                        M4VSS3GPP_WRITER_VIDEO_STREAM_ID,
-                        &pC->ewc.WriterVideoAU);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
-                            pWriterDataFcts->pStartAU(Video) returns 0x%x!",
-                            err);
-                        return err;
-                    }
-
-                    /**
-                    * Copy the input AU to the output AU */
-                    pC->ewc.WriterVideoAU.attribute = pC->pC1->VideoAU.m_attribute;
-                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                    pC->ewc.WriterVideoAU.CTS = (M4OSA_Time)pC->pC1->VideoAU.m_CTS +
-                        (M4OSA_Time)pC->pC1->iVoffset;
-                    pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
-                    offset = 0;
-                    /* for h.264 stream do not read the 1st 4 bytes as they are header
-                     indicators */
-                    if( pC->pC1->pVideoStream->m_basicProperties.m_streamType
-                        == M4DA_StreamTypeVideoMpeg4Avc )
-                        offset = 4;
-
-                    pC->ewc.WriterVideoAU.size = pC->pC1->VideoAU.m_size - offset;
-                    if( pC->ewc.WriterVideoAU.size > pC->ewc.uiVideoMaxAuSize )
-                    {
-                        M4OSA_TRACE1_2(
-                            "M4VSS3GPP_intEditStepVideo: READ_WRITE: AU size greater than\
-                             MaxAuSize (%d>%d)! returning M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE",
-                            pC->ewc.WriterVideoAU.size, pC->ewc.uiVideoMaxAuSize);
-                        return M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE;
-                    }
-
-                    memcpy((void *)pC->ewc.WriterVideoAU.dataAddress,
-                        (void *)(pC->pC1->VideoAU.m_dataAddress + offset),
-                        (pC->ewc.WriterVideoAU.size));
-
-                    /**
-                    * Update time info for the Counter Time System to be equal to the bit
-                    -stream time*/
-                    M4VSS3GPP_intUpdateTimeInfo(pC, &pC->ewc.WriterVideoAU);
-                    M4OSA_TRACE2_2("B ---- write : cts  = %lu [ 0x%x ]",
-                        pC->ewc.WriterVideoAU.CTS, pC->ewc.WriterVideoAU.size);
-
-                    /**
-                    * Write the AU */
-                    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
-                        pC->ewc.p3gpWriterContext,
-                        M4VSS3GPP_WRITER_VIDEO_STREAM_ID,
-                        &pC->ewc.WriterVideoAU);
-
-                    if( M4NO_ERROR != err )
-                    {
-                        /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
-                         file size is reached
-                        The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE
-                        is returned*/
-                        if( M4WAR_WRITER_STOP_REQ == err )
-                        {
-                            M4OSA_TRACE1_0(
-                                "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
-                            return M4VSS3GPP_WAR_EDITING_DONE;
-                        }
-                        else
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
-                                pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
-                                err);
-                            return err;
-                        }
-                    }
-
-                    /**
-                    * Read next AU for next step */
-                    err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
-                        pC->pC1->pReaderContext,
-                        (M4_StreamHandler *)pC->pC1->pVideoStream,
-                        &pC->pC1->VideoAU);
-
-                    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
-                            m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!",
-                            err);
-                        return err;
-                    }
-
-                    M4OSA_TRACE2_3("C .... read  : cts  = %.0f + %ld [ 0x%x ]",
-                        pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
-                        pC->pC1->VideoAU.m_size);
-                }
-                else
-                {
-                    /**
-                    * Decide wether to read or to increment time increment */
-                    if( ( pC->pC1->VideoAU.m_size == 0)
-                        || (pC->pC1->VideoAU.m_CTS >= iNextCts) )
-                    {
-                        /*Increment time by the encoding period (NO_MORE_AU or reader in advance */
-                       // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                       pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
-
-                        /* Switch (from AFTER_CUT) to normal mode because time is
-                        no more frozen */
-                        pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
-                    }
-                    else
-                    {
-                        /* In other cases (reader late), just let the reader catch up
-                         pC->ewc.dVTo */
-                        err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
-                            pC->pC1->pReaderContext,
-                            (M4_StreamHandler *)pC->pC1->pVideoStream,
-                            &pC->pC1->VideoAU);
-
-                        if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
-                                m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!",
-                                err);
-                            return err;
-                        }
-
-                        M4OSA_TRACE2_3("D .... read  : cts  = %.0f + %ld [ 0x%x ]",
-                            pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
-                            pC->pC1->VideoAU.m_size);
-                    }
-                }
-            }
-            break;
-
-            /* ____________________ */
-            /*|                    |*/
-            /*| DECODE_ENCODE MODE |*/
-            /*|   BEGIN_CUT MODE   |*/
-            /*|____________________|*/
-
-        case M4VSS3GPP_kEditVideoState_DECODE_ENCODE:
-        case M4VSS3GPP_kEditVideoState_BEGIN_CUT:
-            {
-                M4OSA_TRACE3_0(
-                    "M4VSS3GPP_intEditStepVideo DECODE_ENCODE / BEGIN_CUT");
-
-            if ((pC->pC1->pSettings->FileType ==
-                     M4VIDEOEDITING_kFileType_ARGB8888) &&
-                (M4OSA_FALSE ==
-                    pC->pC1->pSettings->ClipProperties.bSetImageData)) {
-
-                err = M4VSS3GPP_intSetYuv420PlaneFromARGB888(pC, pC->pC1);
-                if( M4NO_ERROR != err ) {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
-                        M4VSS3GPP_intSetYuv420PlaneFromARGB888 err=%x", err);
-                    return err;
-                }
-            }
-                /**
-                * Decode the video up to the target time
-                (will jump to the previous RAP if needed ) */
-                // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1, (M4OSA_Int32)pC->ewc.dInputVidCts);
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
-                        M4VSS3GPP_intDecodeVideoUpToCts returns err=0x%x",
-                        err);
-                    return err;
-                }
-
-                /* If the decoding is not completed, do one more step with time frozen */
-                if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus )
-                {
-                    return M4NO_ERROR;
-                }
-
-                /**
-                * Reset the video pre-processing error before calling the encoder */
-                pC->ewc.VppError = M4NO_ERROR;
-
-                M4OSA_TRACE2_0("E ++++ encode AU");
-
-                /**
-                * Encode the frame(rendering,filtering and writing will be done
-                 in encoder callbacks)*/
-                if( pC->Vstate == M4VSS3GPP_kEditVideoState_BEGIN_CUT )
-                    FrameMode = M4ENCODER_kIFrame;
-                else
-                    FrameMode = M4ENCODER_kNormalFrame;
-
-                // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL,
-                pC->ewc.dInputVidCts, FrameMode);
-                /**
-                * Check if we had a VPP error... */
-                if( M4NO_ERROR != pC->ewc.VppError )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
-                        pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x",
-                        pC->ewc.VppError);
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-                    if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError )
-                    {
-#endif //M4VSS_SUPPORT_OMX_CODECS
-
-                        return pC->ewc.VppError;
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-                    }
-
-#endif                                   //M4VSS_SUPPORT_OMX_CODECS
-                }
-                else if( M4NO_ERROR != err ) /**< ...or an encoder error */
-                {
-                    if( ((M4OSA_UInt32)M4ERR_ALLOC) == err )
-                    {
-                        M4OSA_TRACE1_0(
-                            "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
-                            returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
-                        return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR;
-                    }
-                    /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
-                    file size is reached
-                    The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE
-                    is returned*/
-                    else if( M4WAR_WRITER_STOP_REQ == err )
-                    {
-                        M4OSA_TRACE1_0(
-                            "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
-                        return M4VSS3GPP_WAR_EDITING_DONE;
-                    }
-                    else
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
-                            pVideoEncoderGlobalFcts->pFctEncode returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-
-                /**
-                * Increment time by the encoding period (for begin cut, do not increment to not
-                loose P-frames) */
-                if( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate )
-                {
-                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                    pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
-                }
-            }
-            break;
-
-            /* _________________ */
-            /*|                 |*/
-            /*| TRANSITION MODE |*/
-            /*|_________________|*/
-
-        case M4VSS3GPP_kEditVideoState_TRANSITION:
-            {
-                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo TRANSITION");
-
-                /* Don't decode more than needed */
-                if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus)
-                    && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus)) )
-                {
-                    /**
-                    * Decode the clip1 video up to the target time
-                    (will jump to the previous RAP if needed */
-                    if ((pC->pC1->pSettings->FileType ==
-                          M4VIDEOEDITING_kFileType_ARGB8888) &&
-                        (M4OSA_FALSE ==
-                         pC->pC1->pSettings->ClipProperties.bSetImageData)) {
-
-                        err = M4VSS3GPP_intSetYuv420PlaneFromARGB888(pC, pC->pC1);
-                        if( M4NO_ERROR != err ) {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepVideo: TRANSITION:\
-                                M4VSS3GPP_intSetYuv420PlaneFromARGB888 err=%x", err);
-                            return err;
-                        }
-                    }
-                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                    err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1,
-                         (M4OSA_Int32)pC->ewc.dInputVidCts);
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
-                            M4VSS3GPP_intDecodeVideoUpToCts(C1) returns err=0x%x",
-                            err);
-                        return err;
-                    }
-
-                    /* If the decoding is not completed, do one more step with time frozen */
-                    if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus )
-                    {
-                        return M4NO_ERROR;
-                    }
-                }
-
-                /* Don't decode more than needed */
-                if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus)
-                    && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus)) )
-                {
-                    /**
-                    * Decode the clip2 video up to the target time
-                        (will jump to the previous RAP if needed) */
-                    if ((pC->pC2->pSettings->FileType ==
-                          M4VIDEOEDITING_kFileType_ARGB8888) &&
-                        (M4OSA_FALSE ==
-                          pC->pC2->pSettings->ClipProperties.bSetImageData)) {
-
-                        err = M4VSS3GPP_intSetYuv420PlaneFromARGB888(pC, pC->pC2);
-                        if( M4NO_ERROR != err ) {
-                            M4OSA_TRACE1_1(
-                                "M4VSS3GPP_intEditStepVideo: TRANSITION:\
-                                M4VSS3GPP_intSetYuv420PlaneFromARGB888 err=%x", err);
-                            return err;
-                        }
-                    }
-
-                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                    err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC2,
-                         (M4OSA_Int32)pC->ewc.dInputVidCts);
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
-                            M4VSS3GPP_intDecodeVideoUpToCts(C2) returns err=0x%x",
-                            err);
-                        return err;
-                    }
-
-                    /* If the decoding is not completed, do one more step with time frozen */
-                    if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus )
-                    {
-                        return M4NO_ERROR;
-                    }
-                }
-
-                /**
-                * Reset the video pre-processing error before calling the encoder */
-                pC->ewc.VppError = M4NO_ERROR;
-
-                M4OSA_TRACE2_0("F **** blend AUs");
-
-                /**
-                * Encode the frame (rendering, filtering and writing will be done
-                in encoder callbacks */
-                // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL,
-                    pC->ewc.dInputVidCts, M4ENCODER_kNormalFrame);
-
-                /**
-                * If encode returns a process frame error, it is likely to be a VPP error */
-                if( M4NO_ERROR != pC->ewc.VppError )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intEditStepVideo: TRANSITION:\
-                        pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x",
-                        pC->ewc.VppError);
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-                    if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError )
-                    {
-
-#endif //M4VSS_SUPPORT_OMX_CODECS
-
-                        return pC->ewc.VppError;
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-
-                    }
-
-#endif //M4VSS_SUPPORT_OMX_CODECS
-                }
-                else if( M4NO_ERROR != err ) /**< ...or an encoder error */
-                {
-                    if( ((M4OSA_UInt32)M4ERR_ALLOC) == err )
-                    {
-                        M4OSA_TRACE1_0(
-                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
-                            returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
-                        return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR;
-                    }
-
-                    /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
-                     file size is reached
-                    The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE is
-                     returned*/
-                    else if( M4WAR_WRITER_STOP_REQ == err )
-                    {
-                        M4OSA_TRACE1_0(
-                            "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
-                        return M4VSS3GPP_WAR_EDITING_DONE;
-                    }
-                    else
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
-                            pVideoEncoderGlobalFcts->pFctEncode returns 0x%x",
-                            err);
-                        return err;
-                    }
-                }
-
-                /**
-                * Increment time by the encoding period */
-                // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
-            }
-            break;
-
-            /* ____________ */
-            /*|            |*/
-            /*| ERROR CASE |*/
-            /*|____________|*/
-
-        default:
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intEditStepVideo: invalid internal state (0x%x),\
-                returning M4VSS3GPP_ERR_INTERNAL_STATE",
-                pC->Vstate);
-            return M4VSS3GPP_ERR_INTERNAL_STATE;
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCheckVideoMode()
- * @brief    Check which video process mode we must use, depending on the output CTS.
- * @param   pC    (IN/OUT) Internal edit context
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intCheckVideoMode(
-    M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_ERR err;
-    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-    const M4OSA_Int32  t = (M4OSA_Int32)pC->ewc.dInputVidCts;
-    /**< Transition duration */
-    const M4OSA_Int32 TD = pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
-
-    M4OSA_Int32 iTmp;
-
-    const M4VSS3GPP_EditVideoState previousVstate = pC->Vstate;
-
-    /**
-    * Check if Clip1 is on its begin cut, or in an effect zone */
-    M4VSS3GPP_intCheckVideoEffects(pC, 1);
-
-    /**
-    * Check if we are in the transition with next clip */
-    if( ( TD > 0) && (( t - pC->pC1->iVoffset) >= (pC->pC1->iEndTime - TD)) )
-    {
-        /**
-        * We are in a transition */
-        pC->Vstate = M4VSS3GPP_kEditVideoState_TRANSITION;
-        pC->bTransitionEffect = M4OSA_TRUE;
-
-        /**
-        * Open second clip for transition, if not yet opened */
-        if( M4OSA_NULL == pC->pC2 )
-        {
-            pC->pC1->bGetYuvDataFromDecoder = M4OSA_TRUE;
-
-            err = M4VSS3GPP_intOpenClip(pC, &pC->pC2,
-                &pC->pClipList[pC->uiCurrentClip + 1]);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_editOpenClip returns 0x%x!",
-                    err);
-                return err;
-            }
-
-            /**
-            * Add current video output CTS to the clip offset
-            * (audio output CTS is not yet at the transition, so audio
-            *  offset can't be updated yet). */
-            // Decorrelate input and output encoding timestamp to handle encoder prefetch
-            pC->pC2->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
-
-            /**
-            * 2005-03-24: BugFix for audio-video synchro:
-            * Update transition duration due to the actual video transition beginning time.
-            * It will avoid desynchronization when doing the audio transition. */
-           // Decorrelate input and output encoding timestamp to handle encoder prefetch
-            iTmp = ((M4OSA_Int32)pC->ewc.dInputVidCts)\
-             - (pC->pC1->iEndTime - TD + pC->pC1->iVoffset);
-            if (iTmp < (M4OSA_Int32)pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration)
-            /**< Test in case of a very short transition */
-            {
-                pC->pTransitionList[pC->
-                    uiCurrentClip].uiTransitionDuration -= iTmp;
-
-                /**
-                * Don't forget to also correct the total duration used for the progress bar
-                * (it was computed with the original transition duration). */
-                pC->ewc.iOutputDuration += iTmp;
-            }
-            /**< No "else" here because it's hard predict the effect of 0 duration transition...*/
-        }
-
-        /**
-        * Check effects for clip2 */
-        M4VSS3GPP_intCheckVideoEffects(pC, 2);
-    }
-    else
-    {
-        /**
-        * We are not in a transition */
-        pC->bTransitionEffect = M4OSA_FALSE;
-
-        /* If there is an effect we go to decode/encode mode */
-        if((pC->nbActiveEffects > 0) || (pC->nbActiveEffects1 > 0) ||
-            (pC->pC1->pSettings->FileType ==
-             M4VIDEOEDITING_kFileType_ARGB8888) ||
-            (pC->pC1->pSettings->bTranscodingRequired == M4OSA_TRUE)) {
-            pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE;
-        }
-        /* We do a begin cut, except if already done (time is not progressing because we want
-        to catch all P-frames after the cut) */
-        else if( M4OSA_TRUE == pC->bClip1AtBeginCut )
-        {
-            if(pC->pC1->pSettings->ClipProperties.VideoStreamType == M4VIDEOEDITING_kH264) {
-                pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE;
-                pC->bEncodeTillEoF = M4OSA_TRUE;
-            } else if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
-                || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) ) {
-                pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT;
-            } else {
-                pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT;
-            }
-        }
-        /* Else we are in default copy/paste mode */
-        else
-        {
-            if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
-                || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) )
-            {
-                pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT;
-            }
-            else if( pC->bIsMMS == M4OSA_TRUE )
-            {
-                M4OSA_UInt32 currentBitrate;
-                M4OSA_ERR err = M4NO_ERROR;
-
-                /* Do we need to reencode the video to downgrade the bitrate or not ? */
-                /* Let's compute the cirrent bitrate of the current edited clip */
-                err = pC->pC1->ShellAPI.m_pReader->m_pFctGetOption(
-                    pC->pC1->pReaderContext,
-                    M4READER_kOptionID_Bitrate, &currentBitrate);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intCheckVideoMode:\
-                        Error when getting next bitrate of edited clip: 0x%x",
-                        err);
-                    return err;
-                }
-
-                /* Remove audio bitrate */
-                currentBitrate -= 12200;
-
-                /* Test if we go into copy/paste mode or into decode/encode mode */
-                if( currentBitrate > pC->uiMMSVideoBitrate )
-                {
-                    pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE;
-                }
-                else
-                {
-                    pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
-                }
-            }
-            else if(!((pC->m_bClipExternalHasStarted == M4OSA_TRUE) &&
-                    (pC->Vstate == M4VSS3GPP_kEditVideoState_DECODE_ENCODE)) &&
-                    pC->bEncodeTillEoF == M4OSA_FALSE)
-            {
-                /**
-                 * Test if we go into copy/paste mode or into decode/encode mode
-                 * If an external effect has been applied on the current clip
-                 * then continue to be in decode/encode mode till end of
-                 * clip to avoid H.264 distortion.
-                 */
-                pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
-            }
-        }
-    }
-
-    /**
-    * Check if we create an encoder */
-    if( ( ( M4VSS3GPP_kEditVideoState_READ_WRITE == previousVstate)
-        || (M4VSS3GPP_kEditVideoState_AFTER_CUT
-        == previousVstate)) /**< read mode */
-        && (( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate)
-        || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == pC->Vstate)
-        || (M4VSS3GPP_kEditVideoState_TRANSITION
-        == pC->Vstate)) /**< encode mode */
-        && pC->bIsMMS == M4OSA_FALSE )
-    {
-        /**
-        * Create the encoder, if not created already*/
-        if (pC->ewc.encoderState == M4VSS3GPP_kNoEncoder) {
-            err = M4VSS3GPP_intCreateVideoEncoder(pC);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder \
-                     returns 0x%x!", err);
-                return err;
-            }
-        }
-    }
-    else if( pC->bIsMMS == M4OSA_TRUE && pC->ewc.pEncContext == M4OSA_NULL )
-    {
-        /**
-        * Create the encoder */
-        err = M4VSS3GPP_intCreateVideoEncoder(pC);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * When we go from filtering to read/write, we must act like a begin cut,
-    * because the last filtered image may be different than the original image. */
-    else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate)
-        || (M4VSS3GPP_kEditVideoState_TRANSITION
-        == previousVstate)) /**< encode mode */
-        && (M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate) /**< read mode */
-        && (pC->bEncodeTillEoF == M4OSA_FALSE) )
-    {
-        pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT;
-    }
-
-    /**
-    * Check if we destroy an encoder */
-    else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate)
-        || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
-        || (M4VSS3GPP_kEditVideoState_TRANSITION
-        == previousVstate)) /**< encode mode */
-        && (( M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate)
-        || (M4VSS3GPP_kEditVideoState_AFTER_CUT
-        == pC->Vstate)) /**< read mode */
-        && pC->bIsMMS == M4OSA_FALSE )
-    {
-        /**
-        * Destroy the previously created encoder */
-        err = M4VSS3GPP_intDestroyVideoEncoder(pC);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intDestroyVideoEncoder returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4VSS3GPP_intCheckVideoMode: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intStartAU()
- * @brief    StartAU writer-like interface used for the VSS 3GPP only
- * @note
- * @param    pContext: (IN) It is the VSS 3GPP context in our case
- * @param    streamID: (IN) Id of the stream to which the Access Unit is related.
- * @param    pAU:      (IN/OUT) Access Unit to be prepared.
- * @return    M4NO_ERROR: there is no error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intStartAU( M4WRITER_Context pContext,
-                               M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU )
-{
-    M4OSA_ERR err;
-    M4OSA_UInt32 uiMaxAuSize;
-
-    /**
-    * Given context is actually the VSS3GPP context */
-    M4VSS3GPP_InternalEditContext *pC =
-        (M4VSS3GPP_InternalEditContext *)pContext;
-
-    /**
-    * Get the output AU to write into */
-    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
-        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intStartAU: pWriterDataFcts->pStartAU(Video) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    *    Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intStartAU: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intProcessAU()
- * @brief    ProcessAU writer-like interface used for the VSS 3GPP only
- * @note
- * @param    pContext: (IN) It is the VSS 3GPP context in our case
- * @param    streamID: (IN) Id of the stream to which the Access Unit is related.
- * @param    pAU:      (IN/OUT) Access Unit to be written
- * @return    M4NO_ERROR: there is no error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intProcessAU( M4WRITER_Context pContext,
-                                 M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU )
-{
-    M4OSA_ERR err;
-
-    /**
-    * Given context is actually the VSS3GPP context */
-    M4VSS3GPP_InternalEditContext *pC =
-        (M4VSS3GPP_InternalEditContext *)pContext;
-
-    /**
-    * Fix the encoded AU time */
-    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-    pC->ewc.dOutputVidCts = pAU->CTS;
-    /**
-    * Update time info for the Counter Time System to be equal to the bit-stream time */
-    M4VSS3GPP_intUpdateTimeInfo(pC, pAU);
-
-    /**
-    * Write the AU */
-    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
-        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intProcessAU: pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
-            err);
-        return err;
-    }
-
-    /**
-    *    Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intProcessAU: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intVPP()
- * @brief    We implement our own VideoPreProcessing function
- * @note    It is called by the video encoder
- * @param    pContext    (IN) VPP context, which actually is the VSS 3GPP context in our case
- * @param    pPlaneIn    (IN)
- * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the output
- *                                  YUV420 image
- * @return    M4NO_ERROR:    No error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn,
-                           M4VIFI_ImagePlane *pPlaneOut )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4_MediaTime ts;
-    M4VIFI_ImagePlane *pTmp = M4OSA_NULL;
-    M4VIFI_ImagePlane *pLastDecodedFrame = M4OSA_NULL ;
-    M4VIFI_ImagePlane *pDecoderRenderFrame = M4OSA_NULL;
-    M4VIFI_ImagePlane pTemp1[3],pTemp2[3];
-    M4VIFI_ImagePlane pTempPlaneClip1[3],pTempPlaneClip2[3];
-    M4OSA_UInt32  i = 0, yuvFrameWidth = 0, yuvFrameHeight = 0;
-    M4OSA_Bool bSkipFrameEffect = M4OSA_FALSE;
-    /**
-    * VPP context is actually the VSS3GPP context */
-    M4VSS3GPP_InternalEditContext *pC =
-        (M4VSS3GPP_InternalEditContext *)pContext;
-
-    memset((void *)pTemp1, 0, 3*sizeof(M4VIFI_ImagePlane));
-    memset((void *)pTemp2, 0, 3*sizeof(M4VIFI_ImagePlane));
-    memset((void *)pTempPlaneClip1, 0, 3*sizeof(M4VIFI_ImagePlane));
-    memset((void *)pTempPlaneClip2, 0, 3*sizeof(M4VIFI_ImagePlane));
-
-    /**
-    * Reset VPP error remembered in context */
-    pC->ewc.VppError = M4NO_ERROR;
-
-    /**
-    * At the end of the editing, we may be called when no more clip is loaded.
-    * (because to close the encoder properly it must be stepped one or twice...) */
-    if( M4OSA_NULL == pC->pC1 )
-    {
-        /**
-        * We must fill the input of the encoder with a dummy image, because
-        * encoding noise leads to a huge video AU, and thus a writer buffer overflow. */
-        memset((void *)pPlaneOut[0].pac_data,0,
-            pPlaneOut[0].u_stride * pPlaneOut[0].u_height);
-        memset((void *)pPlaneOut[1].pac_data,0,
-            pPlaneOut[1].u_stride * pPlaneOut[1].u_height);
-        memset((void *)pPlaneOut[2].pac_data,0,
-            pPlaneOut[2].u_stride * pPlaneOut[2].u_height);
-
-        M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR (abort)");
-        return M4NO_ERROR;
-    }
-
-    /**
-    **************** Transition case ****************/
-    if( M4OSA_TRUE == pC->bTransitionEffect )
-    {
-
-        err = M4VSS3GPP_intAllocateYUV420(pTemp1, pC->ewc.uiVideoWidth,
-                                          pC->ewc.uiVideoHeight);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(1) returns 0x%x, \
-                           returning M4NO_ERROR", err);
-            pC->ewc.VppError = err;
-            return M4NO_ERROR; /**< Return no error to the encoder core
-                               (else it may leak in some situations...) */
-        }
-
-        err = M4VSS3GPP_intAllocateYUV420(pTemp2, pC->ewc.uiVideoWidth,
-                                          pC->ewc.uiVideoHeight);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(2) returns 0x%x, \
-                           returning M4NO_ERROR", err);
-            pC->ewc.VppError = err;
-            return M4NO_ERROR; /**< Return no error to the encoder core
-                              (else it may leak in some situations...) */
-        }
-
-        err = M4VSS3GPP_intAllocateYUV420(pC->yuv1, pC->ewc.uiVideoWidth,
-            pC->ewc.uiVideoHeight);
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\
-                returning M4NO_ERROR",
-                err);
-            pC->ewc.VppError = err;
-            return
-                M4NO_ERROR; /**< Return no error to the encoder core
-                            (else it may leak in some situations...) */
-        }
-
-        err = M4VSS3GPP_intAllocateYUV420(pC->yuv2, pC->ewc.uiVideoWidth,
-            pC->ewc.uiVideoHeight);
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\
-                returning M4NO_ERROR",
-                err);
-            pC->ewc.VppError = err;
-            return
-                M4NO_ERROR; /**< Return no error to the encoder core
-                            (else it may leak in some situations...) */
-        }
-
-        err = M4VSS3GPP_intAllocateYUV420(pC->yuv3, pC->ewc.uiVideoWidth,
-            pC->ewc.uiVideoHeight);
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\
-                returning M4NO_ERROR",
-                err);
-            pC->ewc.VppError = err;
-            return
-                M4NO_ERROR; /**< Return no error to the encoder core
-                            (else it may leak in some situations...) */
-        }
-
-        /**
-        * Compute the time in the clip1 base: ts = to - Offset */
-        // Decorrelate input and output encoding timestamp to handle encoder prefetch
-        ts = pC->ewc.dInputVidCts - pC->pC1->iVoffset;
-
-        /**
-        * Render Clip1 */
-        if( pC->pC1->isRenderDup == M4OSA_FALSE )
-        {
-            err = M4VSS3GPP_intRenderFrameWithEffect(pC, pC->pC1, ts, M4OSA_TRUE,
-                                                pTempPlaneClip1, pTemp1,
-                                                pPlaneOut);
-            if ((M4NO_ERROR != err) &&
-                 (M4WAR_VIDEORENDERER_NO_NEW_FRAME != err)) {
-                M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                    M4VSS3GPP_intRenderFrameWithEffect returns 0x%x", err);
-                pC->ewc.VppError = err;
-                /** Return no error to the encoder core
-                  * else it may leak in some situations.*/
-                return M4NO_ERROR;
-            }
-        }
-        if ((pC->pC1->isRenderDup == M4OSA_TRUE) ||
-             (M4WAR_VIDEORENDERER_NO_NEW_FRAME == err)) {
-            pTmp = pC->yuv1;
-            if (pC->pC1->lastDecodedPlane != M4OSA_NULL) {
-                /* Copy last decoded plane to output plane */
-                memcpy((void *)pTmp[0].pac_data,
-                    (void *)pC->pC1->lastDecodedPlane[0].pac_data,
-                    (pTmp[0].u_height * pTmp[0].u_width));
-                memcpy((void *)pTmp[1].pac_data,
-                    (void *)pC->pC1->lastDecodedPlane[1].pac_data,
-                    (pTmp[1].u_height * pTmp[1].u_width));
-                memcpy((void *)pTmp[2].pac_data,
-                    (void *)pC->pC1->lastDecodedPlane[2].pac_data,
-                    (pTmp[2].u_height * pTmp[2].u_width));
-            } else {
-                err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME;
-                M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)",
-                   err, __FILE__, __LINE__);
-                pC->ewc.VppError = err;
-                return M4NO_ERROR;
-            }
-            pC->pC1->lastDecodedPlane = pTmp;
-        }
-
-        /**
-        * Compute the time in the clip2 base: ts = to - Offset */
-        // Decorrelate input and output encoding timestamp to handle encoder prefetch
-        ts = pC->ewc.dInputVidCts - pC->pC2->iVoffset;
-        /**
-        * Render Clip2 */
-        if( pC->pC2->isRenderDup == M4OSA_FALSE )
-        {
-
-            err = M4VSS3GPP_intRenderFrameWithEffect(pC, pC->pC2, ts, M4OSA_FALSE,
-                                                pTempPlaneClip2, pTemp2,
-                                                pPlaneOut);
-            if ((M4NO_ERROR != err) &&
-                 (M4WAR_VIDEORENDERER_NO_NEW_FRAME != err)) {
-                M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                    M4VSS3GPP_intRenderFrameWithEffect returns 0x%x", err);
-                pC->ewc.VppError = err;
-                /** Return no error to the encoder core
-                  * else it may leak in some situations.*/
-                return M4NO_ERROR;
-            }
-        }
-        if ((pC->pC2->isRenderDup == M4OSA_TRUE) ||
-             (M4WAR_VIDEORENDERER_NO_NEW_FRAME == err)) {
-            pTmp = pC->yuv2;
-            if (pC->pC2->lastDecodedPlane != M4OSA_NULL) {
-                /* Copy last decoded plane to output plane */
-                memcpy((void *)pTmp[0].pac_data,
-                    (void *)pC->pC2->lastDecodedPlane[0].pac_data,
-                    (pTmp[0].u_height * pTmp[0].u_width));
-                memcpy((void *)pTmp[1].pac_data,
-                    (void *)pC->pC2->lastDecodedPlane[1].pac_data,
-                    (pTmp[1].u_height * pTmp[1].u_width));
-                memcpy((void *)pTmp[2].pac_data,
-                    (void *)pC->pC2->lastDecodedPlane[2].pac_data,
-                    (pTmp[2].u_height * pTmp[2].u_width));
-            } else {
-                err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME;
-                M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)",
-                   err, __FILE__, __LINE__);
-                pC->ewc.VppError = err;
-                return M4NO_ERROR;
-            }
-            pC->pC2->lastDecodedPlane = pTmp;
-        }
-
-
-        pTmp = pPlaneOut;
-        err = M4VSS3GPP_intVideoTransition(pC, pTmp);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intVPP: M4VSS3GPP_intVideoTransition returns 0x%x,\
-                returning M4NO_ERROR",
-                err);
-            pC->ewc.VppError = err;
-            return  M4NO_ERROR; /**< Return no error to the encoder core
-                                (else it may leak in some situations...) */
-        }
-        for (i=0; i < 3; i++)
-        {
-            if(pTempPlaneClip2[i].pac_data != M4OSA_NULL) {
-                free(pTempPlaneClip2[i].pac_data);
-                pTempPlaneClip2[i].pac_data = M4OSA_NULL;
-            }
-
-            if(pTempPlaneClip1[i].pac_data != M4OSA_NULL) {
-                free(pTempPlaneClip1[i].pac_data);
-                pTempPlaneClip1[i].pac_data = M4OSA_NULL;
-            }
-
-            if (pTemp2[i].pac_data != M4OSA_NULL) {
-                free(pTemp2[i].pac_data);
-                pTemp2[i].pac_data = M4OSA_NULL;
-            }
-
-            if (pTemp1[i].pac_data != M4OSA_NULL) {
-                free(pTemp1[i].pac_data);
-                pTemp1[i].pac_data = M4OSA_NULL;
-            }
-        }
-    }
-    /**
-    **************** No Transition case ****************/
-    else
-    {
-        M4OSA_TRACE3_0("M4VSS3GPP_intVPP: NO transition case");
-        /**
-        * Compute the time in the clip base: ts = to - Offset */
-        ts = pC->ewc.dInputVidCts - pC->pC1->iVoffset;
-        pC->bIssecondClip = M4OSA_FALSE;
-        /**
-        * Render */
-        if (pC->pC1->isRenderDup == M4OSA_FALSE) {
-            M4OSA_TRACE3_0("M4VSS3GPP_intVPP: renderdup false");
-            /**
-            *   Check if resizing is needed */
-            if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame) {
-                if ((pC->pC1->pSettings->FileType ==
-                            M4VIDEOEDITING_kFileType_ARGB8888) &&
-                        (pC->nbActiveEffects == 0) &&
-                        (pC->pC1->bGetYuvDataFromDecoder == M4OSA_FALSE)) {
-                    err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
-                              pC->pC1->pViDecCtxt,
-                              M4DECODER_kOptionID_EnableYuvWithEffect,
-                              (M4OSA_DataOption)M4OSA_TRUE);
-                    if (M4NO_ERROR == err ) {
-                        err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(
-                                  pC->pC1->pViDecCtxt, &ts,
-                                  pPlaneOut, M4OSA_TRUE);
-                    }
-                } else {
-                    if (pC->pC1->pSettings->FileType ==
-                            M4VIDEOEDITING_kFileType_ARGB8888) {
-                        err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
-                                  pC->pC1->pViDecCtxt,
-                                  M4DECODER_kOptionID_EnableYuvWithEffect,
-                                  (M4OSA_DataOption)M4OSA_FALSE);
-                    }
-                    if (M4NO_ERROR == err) {
-                        err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(
-                                  pC->pC1->pViDecCtxt, &ts,
-                                  pC->pC1->m_pPreResizeFrame, M4OSA_TRUE);
-                    }
-                }
-                if (M4NO_ERROR != err) {
-                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                        m_pFctRender() returns error 0x%x", err);
-                    pC->ewc.VppError = err;
-                    return M4NO_ERROR;
-                }
-                if (pC->pC1->pSettings->FileType !=
-                        M4VIDEOEDITING_kFileType_ARGB8888) {
-                    if (0 != pC->pC1->pSettings->ClipProperties.videoRotationDegrees) {
-                        // Save width and height of un-rotated frame
-                        yuvFrameWidth = pC->pC1->m_pPreResizeFrame[0].u_width;
-                        yuvFrameHeight = pC->pC1->m_pPreResizeFrame[0].u_height;
-                        err = M4VSS3GPP_intRotateVideo(pC->pC1->m_pPreResizeFrame,
-                                pC->pC1->pSettings->ClipProperties.videoRotationDegrees);
-                        if (M4NO_ERROR != err) {
-                            M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                                rotateVideo() returns error 0x%x", err);
-                            pC->ewc.VppError = err;
-                            return M4NO_ERROR;
-                        }
-                    }
-                }
-
-                if (pC->nbActiveEffects > 0) {
-                    pC->pC1->bGetYuvDataFromDecoder = M4OSA_TRUE;
-                    /**
-                    * If we do modify the image, we need an intermediate
-                    * image plane */
-                    err = M4VSS3GPP_intAllocateYUV420(pTemp1,
-                            pC->pC1->m_pPreResizeFrame[0].u_width,
-                            pC->pC1->m_pPreResizeFrame[0].u_height);
-                    if (M4NO_ERROR != err) {
-                        M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                            M4VSS3GPP_intAllocateYUV420 error 0x%x", err);
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                    /* If video frame need to be resized, then apply the overlay after
-                     * the frame was rendered with rendering mode.
-                     * Here skip the framing(overlay) effect when applying video Effect. */
-                    bSkipFrameEffect = M4OSA_TRUE;
-                    err = M4VSS3GPP_intApplyVideoEffect(pC,
-                            pC->pC1->m_pPreResizeFrame, pTemp1, bSkipFrameEffect);
-                    if (M4NO_ERROR != err) {
-                        M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                            M4VSS3GPP_intApplyVideoEffect() error 0x%x", err);
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                    pDecoderRenderFrame= pTemp1;
-
-                } else {
-                    pDecoderRenderFrame = pC->pC1->m_pPreResizeFrame;
-                }
-                /* Prepare overlay temporary buffer if overlay exist */
-                if (pC->bClip1ActiveFramingEffect) {
-                    err = M4VSS3GPP_intAllocateYUV420(pTemp2,
-                        pPlaneOut[0].u_width, pPlaneOut[0].u_height);
-                    if (M4NO_ERROR != err) {
-                        M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420 \
-                            returns 0x%x, returning M4NO_ERROR", err);
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                    pTmp = pTemp2;
-                } else {
-                    pTmp = pPlaneOut;
-                }
-
-                /* Do rendering mode. */
-                if ((pC->pC1->bGetYuvDataFromDecoder == M4OSA_TRUE) ||
-                    (pC->pC1->pSettings->FileType !=
-                        M4VIDEOEDITING_kFileType_ARGB8888)) {
-
-                    err = M4VSS3GPP_intApplyRenderingMode(pC,
-                              pC->pC1->pSettings->xVSS.MediaRendering,
-                              pDecoderRenderFrame, pTmp);
-                    if (M4NO_ERROR != err) {
-                        M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                            M4VSS3GPP_intApplyRenderingMode) error 0x%x ", err);
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                }
-
-                /* Apply overlay if overlay is exist */
-                if (pC->bClip1ActiveFramingEffect) {
-                    pDecoderRenderFrame = pTmp;
-                    pTmp = pPlaneOut;
-                    err = M4VSS3GPP_intApplyVideoOverlay(pC,
-                        pDecoderRenderFrame, pTmp);
-                    if (M4NO_ERROR != err) {
-                        M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                            M4VSS3GPP_intApplyVideoOverlay) error 0x%x ", err);
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                }
-
-                if ((pC->pC1->pSettings->FileType ==
-                        M4VIDEOEDITING_kFileType_ARGB8888) &&
-                    (pC->nbActiveEffects == 0) &&
-                    (pC->pC1->bGetYuvDataFromDecoder == M4OSA_TRUE)) {
-
-                    err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
-                              pC->pC1->pViDecCtxt,
-                              M4DECODER_kOptionID_YuvWithEffectNonContiguous,
-                              (M4OSA_DataOption)pTmp);
-                    if (M4NO_ERROR != err) {
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                    pC->pC1->bGetYuvDataFromDecoder = M4OSA_FALSE;
-                }
-
-                // Reset original width and height for resize frame plane
-                if (0 != pC->pC1->pSettings->ClipProperties.videoRotationDegrees &&
-                    180 != pC->pC1->pSettings->ClipProperties.videoRotationDegrees) {
-
-                    M4VSS3GPP_intSetYUV420Plane(pC->pC1->m_pPreResizeFrame,
-                                                yuvFrameWidth, yuvFrameHeight);
-                }
-            }
-            else
-            {
-                M4OSA_TRACE3_0("M4VSS3GPP_intVPP: NO resize required");
-                if (pC->nbActiveEffects > 0) {
-                    /** If we do modify the image, we need an
-                     * intermediate image plane */
-                    err = M4VSS3GPP_intAllocateYUV420(pTemp1,
-                              pC->ewc.uiVideoWidth,
-                              pC->ewc.uiVideoHeight);
-                    if (M4NO_ERROR != err) {
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                    pDecoderRenderFrame = pTemp1;
-                }
-                else {
-                    pDecoderRenderFrame = pPlaneOut;
-                }
-
-                pTmp = pPlaneOut;
-                err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(
-                          pC->pC1->pViDecCtxt, &ts,
-                          pDecoderRenderFrame, M4OSA_TRUE);
-                if (M4NO_ERROR != err) {
-                    pC->ewc.VppError = err;
-                    return M4NO_ERROR;
-                }
-
-                if (pC->nbActiveEffects > 0) {
-                    /* Here we do not skip the overlay effect since
-                     * overlay and video frame are both of same resolution */
-                    bSkipFrameEffect = M4OSA_FALSE;
-                    err = M4VSS3GPP_intApplyVideoEffect(pC,
-                              pDecoderRenderFrame,pPlaneOut,bSkipFrameEffect);
-                    }
-                    if (M4NO_ERROR != err) {
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-            }
-            pC->pC1->lastDecodedPlane = pTmp;
-            pC->pC1->iVideoRenderCts = (M4OSA_Int32)ts;
-
-        } else {
-            M4OSA_TRACE3_0("M4VSS3GPP_intVPP: renderdup true");
-
-            if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame) {
-                /**
-                * Copy last decoded plane to output plane */
-                if (pC->pC1->lastDecodedPlane != M4OSA_NULL) {
-
-                    memcpy((void *)pC->pC1->m_pPreResizeFrame[0].pac_data,
-                        (void *)pC->pC1->lastDecodedPlane[0].pac_data,
-                        (pC->pC1->m_pPreResizeFrame[0].u_height * \
-                         pC->pC1->m_pPreResizeFrame[0].u_width));
-
-                    memcpy((void *)pC->pC1->m_pPreResizeFrame[1].pac_data,
-                        (void *)pC->pC1->lastDecodedPlane[1].pac_data,
-                        (pC->pC1->m_pPreResizeFrame[1].u_height * \
-                         pC->pC1->m_pPreResizeFrame[1].u_width));
-
-                    memcpy((void *)pC->pC1->m_pPreResizeFrame[2].pac_data,
-                        (void *)pC->pC1->lastDecodedPlane[2].pac_data,
-                        (pC->pC1->m_pPreResizeFrame[2].u_height * \
-                         pC->pC1->m_pPreResizeFrame[2].u_width));
-                } else {
-                    err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME;
-                    M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)",
-                        err, __FILE__, __LINE__);
-                    pC->ewc.VppError = err;
-                    return M4NO_ERROR;
-                }
-
-                if(pC->nbActiveEffects > 0) {
-                    /**
-                    * If we do modify the image, we need an
-                    * intermediate image plane */
-                    err = M4VSS3GPP_intAllocateYUV420(pTemp1,
-                              pC->pC1->m_pPreResizeFrame[0].u_width,
-                              pC->pC1->m_pPreResizeFrame[0].u_height);
-                    if (M4NO_ERROR != err) {
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                    /* If video frame need to be resized, then apply the overlay after
-                     * the frame was rendered with rendering mode.
-                     * Here skip the framing(overlay) effect when applying video Effect. */
-                    bSkipFrameEffect = M4OSA_TRUE;
-                    err = M4VSS3GPP_intApplyVideoEffect(pC,
-                              pC->pC1->m_pPreResizeFrame,pTemp1, bSkipFrameEffect);
-                    if (M4NO_ERROR != err) {
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                    pDecoderRenderFrame= pTemp1;
-                } else {
-                    pDecoderRenderFrame = pC->pC1->m_pPreResizeFrame;
-                }
-                /* Prepare overlay temporary buffer if overlay exist */
-                if (pC->bClip1ActiveFramingEffect) {
-                    err = M4VSS3GPP_intAllocateYUV420(
-                        pTemp2, pC->ewc.uiVideoWidth, pC->ewc.uiVideoHeight);
-                    if (M4NO_ERROR != err) {
-                        M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420 \
-                            returns 0x%x, returning M4NO_ERROR", err);
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                    pTmp = pTemp2;
-                } else {
-                    pTmp = pPlaneOut;
-                }
-                /* Do rendering mode */
-                err = M4VSS3GPP_intApplyRenderingMode(pC,
-                          pC->pC1->pSettings->xVSS.MediaRendering,
-                          pDecoderRenderFrame, pTmp);
-                if (M4NO_ERROR != err) {
-                    pC->ewc.VppError = err;
-                    return M4NO_ERROR;
-                }
-                /* Apply overlay if overlay is exist */
-                pTmp = pPlaneOut;
-                if (pC->bClip1ActiveFramingEffect) {
-                    err = M4VSS3GPP_intApplyVideoOverlay(pC,
-                        pTemp2, pTmp);
-                    if (M4NO_ERROR != err) {
-                        M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                            M4VSS3GPP_intApplyRenderingMode) error 0x%x ", err);
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                }
-            } else {
-
-                err = M4VSS3GPP_intAllocateYUV420(pTemp1,
-                          pC->ewc.uiVideoWidth,
-                          pC->ewc.uiVideoHeight);
-                if (M4NO_ERROR != err) {
-                    pC->ewc.VppError = err;
-                    return M4NO_ERROR;
-                }
-                /**
-                 * Copy last decoded plane to output plane */
-                if (pC->pC1->lastDecodedPlane != M4OSA_NULL &&
-                    pLastDecodedFrame != M4OSA_NULL) {
-                    memcpy((void *)pLastDecodedFrame[0].pac_data,
-                        (void *)pC->pC1->lastDecodedPlane[0].pac_data,
-                        (pLastDecodedFrame[0].u_height * pLastDecodedFrame[0].u_width));
-
-                    memcpy((void *)pLastDecodedFrame[1].pac_data,
-                        (void *)pC->pC1->lastDecodedPlane[1].pac_data,
-                        (pLastDecodedFrame[1].u_height * pLastDecodedFrame[1].u_width));
-
-                    memcpy((void *)pLastDecodedFrame[2].pac_data,
-                        (void *)pC->pC1->lastDecodedPlane[2].pac_data,
-                        (pLastDecodedFrame[2].u_height * pLastDecodedFrame[2].u_width));
-                } else {
-                    err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME;
-                    M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)",
-                        err, __FILE__, __LINE__);
-                    pC->ewc.VppError = err;
-                    return M4NO_ERROR;
-                }
-
-                pTmp = pPlaneOut;
-                /**
-                * Check if there is a effect */
-                if(pC->nbActiveEffects > 0) {
-                    /* Here we do not skip the overlay effect since
-                     * overlay and video are both of same resolution */
-                    bSkipFrameEffect = M4OSA_FALSE;
-                    err = M4VSS3GPP_intApplyVideoEffect(pC,
-                              pLastDecodedFrame, pTmp,bSkipFrameEffect);
-                    if (M4NO_ERROR != err) {
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                }
-            }
-            pC->pC1->lastDecodedPlane = pTmp;
-        }
-
-        M4OSA_TRACE3_1("M4VSS3GPP_intVPP: Rendered at CTS %.3f", ts);
-
-        for (i=0; i<3; i++) {
-            if (pTemp1[i].pac_data != M4OSA_NULL) {
-                free(pTemp1[i].pac_data);
-                pTemp1[i].pac_data = M4OSA_NULL;
-            }
-        }
-        for (i=0; i<3; i++) {
-            if (pTemp2[i].pac_data != M4OSA_NULL) {
-                free(pTemp2[i].pac_data);
-                pTemp2[i].pac_data = M4OSA_NULL;
-            }
-        }
-    }
-
-    /**
-    *    Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intApplyVideoOverlay()
- * @brief    Apply video overlay from pPlaneIn to pPlaneOut
- * @param    pC               (IN/OUT) Internal edit context
- * @param    pInputPlanes    (IN) Input raw YUV420 image
- * @param    pOutputPlanes   (IN/OUT) Output raw YUV420 image
- * @return   M4NO_ERROR:    No error
- ******************************************************************************
- */
-static M4OSA_ERR
-M4VSS3GPP_intApplyVideoOverlay (M4VSS3GPP_InternalEditContext *pC,
-    M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut) {
-
-    M4VSS3GPP_ClipContext *pClip;
-    M4VSS3GPP_EffectSettings *pFx;
-    M4VSS3GPP_ExternalProgress extProgress;
-    M4OSA_Double VideoEffectTime;
-    M4OSA_Double PercentageDone;
-    M4OSA_UInt8 NumActiveEffects =0;
-    M4OSA_UInt32 Cts = 0;
-    M4OSA_Int32 nextEffectTime;
-    M4OSA_Int32 tmp;
-    M4OSA_UInt8 i;
-    M4OSA_ERR err;
-
-    pClip = pC->pC1;
-    if (pC->bIssecondClip == M4OSA_TRUE) {
-        NumActiveEffects = pC->nbActiveEffects1;
-    } else {
-        NumActiveEffects = pC->nbActiveEffects;
-    }
-    for (i=0; i<NumActiveEffects; i++) {
-        if (pC->bIssecondClip == M4OSA_TRUE) {
-            pFx = &(pC->pEffectsList[pC->pActiveEffectsList1[i]]);
-            /* Compute how far from the beginning of the effect we are, in clip-base time. */
-            // Decorrelate input and output encoding timestamp to handle encoder prefetch
-            VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) +
-                pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration - pFx->uiStartTime;
-        } else {
-            pFx = &(pC->pEffectsList[pC->pActiveEffectsList[i]]);
-            /* Compute how far from the beginning of the effect we are, in clip-base time. */
-            // Decorrelate input and output encoding timestamp to handle encoder prefetch
-            VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pFx->uiStartTime;
-        }
-        /* Do the framing(overlay) effect only,
-         * skip other color effect which had been applied */
-        if (pFx->xVSS.pFramingBuffer == M4OSA_NULL) {
-            continue;
-        }
-
-        /* To calculate %, substract timeIncrement because effect should finish
-         * on the last frame which is presented from CTS = eof-timeIncrement till CTS = eof */
-        PercentageDone = VideoEffectTime / ((M4OSA_Float)pFx->uiDuration);
-
-        if (PercentageDone < 0.0) {
-            PercentageDone = 0.0;
-        }
-        if (PercentageDone > 1.0) {
-            PercentageDone = 1.0;
-        }
-        /**
-        * Compute where we are in the effect (scale is 0->1000) */
-        tmp = (M4OSA_Int32)(PercentageDone * 1000);
-
-        /**
-        * Set the progress info provided to the external function */
-        extProgress.uiProgress = (M4OSA_UInt32)tmp;
-        // Decorrelate input and output encoding timestamp to handle encoder prefetch
-        extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts;
-        extProgress.uiClipTime = extProgress.uiOutputTime - pClip->iVoffset;
-        extProgress.bIsLast = M4OSA_FALSE;
-        // Decorrelate input and output encoding timestamp to handle encoder prefetch
-        nextEffectTime = (M4OSA_Int32)(pC->ewc.dInputVidCts \
-            + pC->dOutputFrameDuration);
-        if (nextEffectTime >= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) {
-            extProgress.bIsLast = M4OSA_TRUE;
-        }
-        err = pFx->ExtVideoEffectFct(pFx->pExtVideoEffectFctCtxt,
-            pPlaneIn, pPlaneOut, &extProgress,
-            pFx->VideoEffectType - M4VSS3GPP_kVideoEffectType_External);
-
-        if (M4NO_ERROR != err) {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intApplyVideoOverlay: \
-                External video effect function returns 0x%x!",
-                err);
-            return err;
-        }
-    }
-
-    /**
-    *    Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intApplyVideoOverlay: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intApplyVideoEffect()
- * @brief    Apply video effect from pPlaneIn to pPlaneOut
- * @param   pC                (IN/OUT) Internal edit context
- * @param   uiClip1orClip2    (IN/OUT) 1 for first clip, 2 for second clip
- * @param    pInputPlanes    (IN) Input raw YUV420 image
- * @param    pOutputPlanes    (IN/OUT) Output raw YUV420 image
- * @param    bSkipFramingEffect (IN) skip framing effect flag
- * @return    M4NO_ERROR:                        No error
- ******************************************************************************
- */
-static M4OSA_ERR
-M4VSS3GPP_intApplyVideoEffect (M4VSS3GPP_InternalEditContext *pC,
-    M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut,
-    M4OSA_Bool bSkipFramingEffect) {
-
-    M4OSA_ERR err;
-
-    M4VSS3GPP_ClipContext *pClip;
-    M4VSS3GPP_EffectSettings *pFx;
-    M4VSS3GPP_ExternalProgress extProgress;
-
-    M4OSA_Double VideoEffectTime;
-    M4OSA_Double PercentageDone;
-    M4OSA_Int32 tmp;
-
-    M4VIFI_ImagePlane *pPlaneTempIn;
-    M4VIFI_ImagePlane *pPlaneTempOut;
-    M4VIFI_ImagePlane  pTempYuvPlane[3];
-    M4OSA_UInt8 i;
-    M4OSA_UInt8 NumActiveEffects =0;
-
-
-    pClip = pC->pC1;
-    if (pC->bIssecondClip == M4OSA_TRUE)
-    {
-        NumActiveEffects = pC->nbActiveEffects1;
-    }
-    else
-    {
-        NumActiveEffects = pC->nbActiveEffects;
-    }
-
-    memset((void *)pTempYuvPlane, 0, 3*sizeof(M4VIFI_ImagePlane));
-
-    /**
-    * Allocate temporary plane if needed RC */
-    if (NumActiveEffects > 1) {
-        err = M4VSS3GPP_intAllocateYUV420(pTempYuvPlane, pPlaneOut->u_width,
-                  pPlaneOut->u_height);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intApplyVideoEffect: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\
-                returning M4NO_ERROR",
-                err);
-            pC->ewc.VppError = err;
-            return
-                M4NO_ERROR; /**< Return no error to the encoder core
-                            (else it may leak in some situations...) */
-        }
-    }
-
-    if (NumActiveEffects  % 2 == 0)
-    {
-        pPlaneTempIn = pPlaneIn;
-        pPlaneTempOut = pTempYuvPlane;
-    }
-    else
-    {
-        pPlaneTempIn = pPlaneIn;
-        pPlaneTempOut = pPlaneOut;
-    }
-
-    for (i=0; i<NumActiveEffects; i++)
-    {
-        if (pC->bIssecondClip == M4OSA_TRUE)
-        {
-
-
-            pFx = &(pC->pEffectsList[pC->pActiveEffectsList1[i]]);
-            /* Compute how far from the beginning of the effect we are, in clip-base time. */
-            // Decorrelate input and output encoding timestamp to handle encoder prefetch
-            VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) +
-                              pC->pTransitionList[pC->uiCurrentClip].
-                              uiTransitionDuration- pFx->uiStartTime;
-        }
-        else
-        {
-            pFx = &(pC->pEffectsList[pC->pActiveEffectsList[i]]);
-            /* Compute how far from the beginning of the effect we are, in clip-base time. */
-            // Decorrelate input and output encoding timestamp to handle encoder prefetch
-            VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pFx->uiStartTime;
-        }
-
-
-
-        /* To calculate %, substract timeIncrement because effect should finish on the last frame*/
-        /* which is presented from CTS = eof-timeIncrement till CTS = eof */
-        PercentageDone = VideoEffectTime
-            / ((M4OSA_Float)pFx->uiDuration/*- pC->dOutputFrameDuration*/);
-
-        if( PercentageDone < 0.0 )
-            PercentageDone = 0.0;
-
-        if( PercentageDone > 1.0 )
-            PercentageDone = 1.0;
-
-        switch( pFx->VideoEffectType )
-        {
-            case M4VSS3GPP_kVideoEffectType_FadeFromBlack:
-                /**
-                * Compute where we are in the effect (scale is 0->1024). */
-                tmp = (M4OSA_Int32)(PercentageDone * 1024);
-
-                /**
-                * Apply the darkening effect */
-                err =
-                    M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn,
-                    (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intApplyVideoEffect:\
-                        M4VFL_modifyLumaWithScale returns error 0x%x,\
-                        returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR",
-                        err);
-                    return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
-                }
-                break;
-
-            case M4VSS3GPP_kVideoEffectType_FadeToBlack:
-                /**
-                * Compute where we are in the effect (scale is 0->1024) */
-                tmp = (M4OSA_Int32)(( 1.0 - PercentageDone) * 1024);
-
-                /**
-                * Apply the darkening effect */
-                err =
-                    M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn,
-                    (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intApplyVideoEffect:\
-                        M4VFL_modifyLumaWithScale returns error 0x%x,\
-                        returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR",
-                        err);
-                    return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
-                }
-                break;
-
-            default:
-                if( pFx->VideoEffectType
-                    >= M4VSS3GPP_kVideoEffectType_External )
-                {
-                    M4OSA_UInt32 Cts = 0;
-                    M4OSA_Int32 nextEffectTime;
-
-                    /**
-                    * Compute where we are in the effect (scale is 0->1000) */
-                    tmp = (M4OSA_Int32)(PercentageDone * 1000);
-
-                    /**
-                    * Set the progress info provided to the external function */
-                    extProgress.uiProgress = (M4OSA_UInt32)tmp;
-                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                    extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts;
-                    extProgress.uiClipTime = extProgress.uiOutputTime - pClip->iVoffset;
-                    extProgress.bIsLast = M4OSA_FALSE;
-                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                    nextEffectTime = (M4OSA_Int32)(pC->ewc.dInputVidCts \
-                        + pC->dOutputFrameDuration);
-                    if(nextEffectTime >= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration))
-                    {
-                        extProgress.bIsLast = M4OSA_TRUE;
-                    }
-                    /* Here skip the framing effect,
-                     * do the framing effect after apply rendering mode */
-                    if ((pFx->xVSS.pFramingBuffer != M4OSA_NULL) &&
-                        bSkipFramingEffect == M4OSA_TRUE) {
-                        memcpy(pPlaneTempOut[0].pac_data, pPlaneTempIn[0].pac_data,
-                            pPlaneTempIn[0].u_height * pPlaneTempIn[0].u_width);
-                        memcpy(pPlaneTempOut[1].pac_data, pPlaneTempIn[1].pac_data,
-                            pPlaneTempIn[1].u_height * pPlaneTempIn[1].u_width);
-                        memcpy(pPlaneTempOut[2].pac_data, pPlaneTempIn[2].pac_data,
-                            pPlaneTempIn[2].u_height * pPlaneTempIn[2].u_width);
-
-                    } else {
-                        err = pFx->ExtVideoEffectFct(pFx->pExtVideoEffectFctCtxt,
-                            pPlaneTempIn, pPlaneTempOut, &extProgress,
-                            pFx->VideoEffectType
-                            - M4VSS3GPP_kVideoEffectType_External);
-                    }
-                    if( M4NO_ERROR != err )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_intApplyVideoEffect: \
-                            External video effect function returns 0x%x!",
-                            err);
-                        return err;
-                    }
-                    break;
-                }
-                else
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intApplyVideoEffect: unknown effect type (0x%x),\
-                        returning M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE",
-                        pFx->VideoEffectType);
-                    return M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE;
-                }
-        }
-        /**
-        * RC Updates pTempPlaneIn and pTempPlaneOut depending on current effect */
-        if (((i % 2 == 0) && (NumActiveEffects  % 2 == 0))
-            || ((i % 2 != 0) && (NumActiveEffects % 2 != 0)))
-        {
-            pPlaneTempIn = pTempYuvPlane;
-            pPlaneTempOut = pPlaneOut;
-        }
-        else
-        {
-            pPlaneTempIn = pPlaneOut;
-            pPlaneTempOut = pTempYuvPlane;
-        }
-    }
-
-    for(i=0; i<3; i++) {
-        if(pTempYuvPlane[i].pac_data != M4OSA_NULL) {
-            free(pTempYuvPlane[i].pac_data);
-            pTempYuvPlane[i].pac_data = M4OSA_NULL;
-        }
-    }
-
-    /**
-    *    Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intApplyVideoEffect: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intVideoTransition()
- * @brief    Apply video transition effect pC1+pC2->pPlaneOut
- * @param   pC                (IN/OUT) Internal edit context
- * @param    pOutputPlanes    (IN/OUT) Output raw YUV420 image
- * @return    M4NO_ERROR:                        No error
- ******************************************************************************
- */
-static M4OSA_ERR
-M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC,
-                             M4VIFI_ImagePlane *pPlaneOut )
-{
-    M4OSA_ERR err;
-    M4OSA_Int32 iProgress;
-    M4VSS3GPP_ExternalProgress extProgress;
-    M4VIFI_ImagePlane *pPlane;
-    M4OSA_Int32 i;
-    const M4OSA_Int32 iDur = (M4OSA_Int32)pC->
-        pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
-
-    /**
-    * Compute how far from the end cut we are, in clip-base time.
-    * It is done with integers because the offset and begin cut have been rounded already. */
-    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-    iProgress = (M4OSA_Int32)((M4OSA_Double)pC->pC1->iEndTime) - pC->ewc.dInputVidCts +
-        ((M4OSA_Double)pC->pC1->iVoffset);
-    /**
-    * We must remove the duration of one frame, else we would almost never reach the end
-    * (It's kind of a "pile and intervals" issue). */
-    iProgress -= (M4OSA_Int32)pC->dOutputFrameDuration;
-
-    if( iProgress < 0 ) /**< Sanity checks */
-    {
-        iProgress = 0;
-    }
-
-    /**
-    * Compute where we are in the transition, on a base 1000 */
-    iProgress = ( ( iDur - iProgress) * 1000) / iDur;
-
-    /**
-    * Sanity checks */
-    if( iProgress < 0 )
-    {
-        iProgress = 0;
-    }
-    else if( iProgress > 1000 )
-    {
-        iProgress = 1000;
-    }
-
-    switch( pC->pTransitionList[pC->uiCurrentClip].TransitionBehaviour )
-    {
-        case M4VSS3GPP_TransitionBehaviour_SpeedUp:
-            iProgress = ( iProgress * iProgress) / 1000;
-            break;
-
-        case M4VSS3GPP_TransitionBehaviour_Linear:
-            /*do nothing*/
-            break;
-
-        case M4VSS3GPP_TransitionBehaviour_SpeedDown:
-            iProgress = (M4OSA_Int32)(sqrt(iProgress * 1000));
-            break;
-
-        case M4VSS3GPP_TransitionBehaviour_SlowMiddle:
-            if( iProgress < 500 )
-            {
-                iProgress = (M4OSA_Int32)(sqrt(iProgress * 500));
-            }
-            else
-            {
-                iProgress =
-                    (M4OSA_Int32)(( ( ( iProgress - 500) * (iProgress - 500))
-                    / 500) + 500);
-            }
-            break;
-
-        case M4VSS3GPP_TransitionBehaviour_FastMiddle:
-            if( iProgress < 500 )
-            {
-                iProgress = (M4OSA_Int32)(( iProgress * iProgress) / 500);
-            }
-            else
-            {
-                iProgress = (M4OSA_Int32)(sqrt(( iProgress - 500) * 500) + 500);
-            }
-            break;
-
-        default:
-            /*do nothing*/
-            break;
-    }
-
-    switch( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType )
-    {
-        case M4VSS3GPP_kVideoTransitionType_CrossFade:
-            /**
-            * Apply the transition effect */
-            err = M4VIFI_ImageBlendingonYUV420(M4OSA_NULL,
-                (M4ViComImagePlane *)pC->yuv1,
-                (M4ViComImagePlane *)pC->yuv2,
-                (M4ViComImagePlane *)pPlaneOut, iProgress);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intVideoTransition:\
-                     M4VIFI_ImageBlendingonYUV420 returns error 0x%x,\
-                    returning M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR",
-                    err);
-                return M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR;
-            }
-            break;
-
-        case M4VSS3GPP_kVideoTransitionType_None:
-            /**
-            * This is a stupid-non optimized version of the None transition...
-            * We copy the YUV frame */
-            if( iProgress < 500 ) /**< first half of transition */
-            {
-                pPlane = pC->yuv1;
-            }
-            else /**< second half of transition */
-            {
-                pPlane = pC->yuv2;
-            }
-            /**
-            * Copy the input YUV frames */
-            i = 3;
-
-            while( i-- > 0 )
-            {
-                memcpy((void *)pPlaneOut[i].pac_data,
-                 (void *)pPlane[i].pac_data,
-                    pPlaneOut[i].u_stride * pPlaneOut[i].u_height);
-            }
-            break;
-
-        default:
-            if( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType
-                >= M4VSS3GPP_kVideoTransitionType_External )
-            {
-                /**
-                * Set the progress info provided to the external function */
-                extProgress.uiProgress = (M4OSA_UInt32)iProgress;
-                // Decorrelate input and output encoding timestamp to handle encoder prefetch
-                extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts;
-                extProgress.uiClipTime = extProgress.uiOutputTime - pC->pC1->iVoffset;
-
-                err = pC->pTransitionList[pC->
-                    uiCurrentClip].ExtVideoTransitionFct(
-                    pC->pTransitionList[pC->
-                    uiCurrentClip].pExtVideoTransitionFctCtxt,
-                    pC->yuv1, pC->yuv2, pPlaneOut, &extProgress,
-                    pC->pTransitionList[pC->
-                    uiCurrentClip].VideoTransitionType
-                    - M4VSS3GPP_kVideoTransitionType_External);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intVideoTransition:\
-                        External video transition function returns 0x%x!",
-                        err);
-                    return err;
-                }
-                break;
-            }
-            else
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intVideoTransition: unknown transition type (0x%x),\
-                    returning M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE",
-                    pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType);
-                return M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE;
-            }
-    }
-
-    /**
-    *    Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intVideoTransition: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_Void M4VSS3GPP_intUpdateTimeInfo()
- * @brief    Update bit stream time info by Counter Time System to be compliant with
- *          players using bit stream time info
- * @note    H263 uses an absolute time counter unlike MPEG4 which uses Group Of Vops
- *          (GOV, see the standard)
- * @param   pC                    (IN/OUT) returns time updated video AU,
- *                                the offset between system and video time (MPEG4 only)
- *                                and the state of the current clip (MPEG4 only)
- * @return    nothing
- ******************************************************************************
- */
-static M4OSA_Void
-M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC,
-                            M4SYS_AccessUnit *pAU )
-{
-    M4OSA_UInt8 uiTmp;
-    M4OSA_UInt32 uiCts = 0;
-    M4OSA_MemAddr8 pTmp;
-    M4OSA_UInt32 uiAdd;
-    M4OSA_UInt32 uiCurrGov;
-    M4OSA_Int8 iDiff;
-
-    M4VSS3GPP_ClipContext *pClipCtxt = pC->pC1;
-    M4OSA_Int32 *pOffset = &(pC->ewc.iMpeg4GovOffset);
-
-    /**
-    * Set H263 time counter from system time */
-    if( M4SYS_kH263 == pAU->stream->streamType )
-    {
-        uiTmp = (M4OSA_UInt8)((M4OSA_UInt32)( ( pAU->CTS * 30) / 1001 + 0.5)
-            % M4VSS3GPP_EDIT_H263_MODULO_TIME);
-        M4VSS3GPP_intSetH263TimeCounter((M4OSA_MemAddr8)(pAU->dataAddress),
-            uiTmp);
-    }
-    /*
-    * Set MPEG4 GOV time counter regarding video and system time */
-    else if( M4SYS_kMPEG_4 == pAU->stream->streamType )
-    {
-        /*
-        * If GOV.
-        * beware of little/big endian! */
-        /* correction: read 8 bits block instead of one 32 bits block */
-        M4OSA_UInt8 *temp8 = (M4OSA_UInt8 *)(pAU->dataAddress);
-        M4OSA_UInt32 temp32 = 0;
-
-        temp32 = ( 0x000000ff & (M4OSA_UInt32)(*temp8))
-            + (0x0000ff00 & ((M4OSA_UInt32)(*(temp8 + 1))) << 8)
-            + (0x00ff0000 & ((M4OSA_UInt32)(*(temp8 + 2))) << 16)
-            + (0xff000000 & ((M4OSA_UInt32)(*(temp8 + 3))) << 24);
-
-        M4OSA_TRACE3_2("RC: Temp32: 0x%x, dataAddress: 0x%x\n", temp32,
-            *(pAU->dataAddress));
-
-        if( M4VSS3GPP_EDIT_GOV_HEADER == temp32 )
-        {
-            pTmp =
-                (M4OSA_MemAddr8)(pAU->dataAddress
-                + 1); /**< Jump to the time code (just after the 32 bits header) */
-            uiAdd = (M4OSA_UInt32)(pAU->CTS)+( *pOffset);
-
-            switch( pClipCtxt->bMpeg4GovState )
-            {
-                case M4OSA_FALSE: /*< INIT */
-                    {
-                        /* video time = ceil (system time + offset) */
-                        uiCts = ( uiAdd + 999) / 1000;
-
-                        /* offset update */
-                        ( *pOffset) += (( uiCts * 1000) - uiAdd);
-
-                        /* Save values */
-                        pClipCtxt->uiMpeg4PrevGovValueSet = uiCts;
-
-                        /* State to 'first' */
-                        pClipCtxt->bMpeg4GovState = M4OSA_TRUE;
-                    }
-                    break;
-
-                case M4OSA_TRUE: /*< UPDATE */
-                    {
-                        /* Get current Gov value */
-                        M4VSS3GPP_intGetMPEG4Gov(pTmp, &uiCurrGov);
-
-                        /* video time = floor or ceil (system time + offset) */
-                        uiCts = (uiAdd / 1000);
-                        iDiff = (M4OSA_Int8)(uiCurrGov
-                            - pClipCtxt->uiMpeg4PrevGovValueGet - uiCts
-                            + pClipCtxt->uiMpeg4PrevGovValueSet);
-
-                        /* ceiling */
-                        if( iDiff > 0 )
-                        {
-                            uiCts += (M4OSA_UInt32)(iDiff);
-
-                            /* offset update */
-                            ( *pOffset) += (( uiCts * 1000) - uiAdd);
-                        }
-
-                        /* Save values */
-                        pClipCtxt->uiMpeg4PrevGovValueGet = uiCurrGov;
-                        pClipCtxt->uiMpeg4PrevGovValueSet = uiCts;
-                    }
-                    break;
-            }
-
-            M4VSS3GPP_intSetMPEG4Gov(pTmp, uiCts);
-        }
-    }
-    return;
-}
-
-/**
- ******************************************************************************
- * M4OSA_Void M4VSS3GPP_intCheckVideoEffects()
- * @brief    Check which video effect must be applied at the current time
- ******************************************************************************
- */
-static M4OSA_Void
-M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC,
-                               M4OSA_UInt8 uiClipNumber )
-{
-    M4OSA_UInt8 uiClipIndex;
-    M4OSA_UInt8 uiFxIndex, i;
-    M4VSS3GPP_ClipContext *pClip;
-    M4VSS3GPP_EffectSettings *pFx;
-    M4OSA_Int32 Off, BC, EC;
-    // Decorrelate input and output encoding timestamp to handle encoder prefetch
-    M4OSA_Int32 t = (M4OSA_Int32)pC->ewc.dInputVidCts;
-
-    uiClipIndex = pC->uiCurrentClip;
-    if (uiClipNumber == 1) {
-        pClip = pC->pC1;
-        pC->bClip1ActiveFramingEffect = M4OSA_FALSE;
-    } else {
-        pClip = pC->pC2;
-        pC->bClip2ActiveFramingEffect = M4OSA_FALSE;
-    }
-    /**
-    * Shortcuts for code readability */
-    Off = pClip->iVoffset;
-    BC = pClip->iActualVideoBeginCut;
-    EC = pClip->iEndTime;
-
-    i = 0;
-
-    for ( uiFxIndex = 0; uiFxIndex < pC->nbEffects; uiFxIndex++ )
-    {
-        /** Shortcut, reverse order because of priority between effects(EndEffect always clean )*/
-        pFx = &(pC->pEffectsList[pC->nbEffects - 1 - uiFxIndex]);
-
-        if( M4VSS3GPP_kVideoEffectType_None != pFx->VideoEffectType )
-        {
-            /**
-            * Check if there is actually a video effect */
-
-             if(uiClipNumber ==1)
-             {
-                /**< Are we after the start time of the effect?
-                 * or Are we into the effect duration?
-                 */
-                if ( (t >= (M4OSA_Int32)(pFx->uiStartTime)) &&
-                    (t <= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) {
-                    /**
-                     * Set the active effect(s) */
-                    pC->pActiveEffectsList[i] = pC->nbEffects-1-uiFxIndex;
-
-                    /**
-                     * Update counter of active effects */
-                    i++;
-                    if (pFx->xVSS.pFramingBuffer != M4OSA_NULL) {
-                        pC->bClip1ActiveFramingEffect = M4OSA_TRUE;
-                    }
-
-                    /**
-                     * For all external effects set this flag to true. */
-                    if(pFx->VideoEffectType > M4VSS3GPP_kVideoEffectType_External)
-                    {
-                        pC->m_bClipExternalHasStarted = M4OSA_TRUE;
-                    }
-                }
-
-            }
-            else
-            {
-                /**< Are we into the effect duration? */
-                if ( ((M4OSA_Int32)(t + pC->pTransitionList[uiClipIndex].uiTransitionDuration)
-                    >= (M4OSA_Int32)(pFx->uiStartTime))
-                    && ( (M4OSA_Int32)(t + pC->pTransitionList[uiClipIndex].uiTransitionDuration)
-                    <= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) {
-                    /**
-                     * Set the active effect(s) */
-                    pC->pActiveEffectsList1[i] = pC->nbEffects-1-uiFxIndex;
-
-                    /**
-                     * Update counter of active effects */
-                    i++;
-                    if (pFx->xVSS.pFramingBuffer != M4OSA_NULL) {
-                        pC->bClip2ActiveFramingEffect = M4OSA_TRUE;
-                    }
-                    /**
-                     * For all external effects set this flag to true. */
-                    if(pFx->VideoEffectType > M4VSS3GPP_kVideoEffectType_External)
-                    {
-                        pC->m_bClipExternalHasStarted = M4OSA_TRUE;
-                    }
-
-                    /**
-                     * The third effect has the highest priority, then the second one, then the first one.
-                     * Hence, as soon as we found an active effect, we can get out of this loop */
-                }
-            }
-            if (M4VIDEOEDITING_kH264 !=
-                    pC->pC1->pSettings->ClipProperties.VideoStreamType) {
-
-                    // For Mpeg4 and H263 clips, full decode encode not required
-                    pC->m_bClipExternalHasStarted = M4OSA_FALSE;
-            }
-        }
-    }
-    if(1==uiClipNumber)
-    {
-    /**
-     * Save number of active effects */
-        pC->nbActiveEffects = i;
-    }
-    else
-    {
-        pC->nbActiveEffects1 = i;
-    }
-
-    /**
-    * Change the absolut time to clip related time */
-    t -= Off;
-
-    /**
-    * Check if we are on the begin cut (for clip1 only) */
-    if( ( 0 != BC) && (t == BC) && (1 == uiClipNumber) )
-    {
-        pC->bClip1AtBeginCut = M4OSA_TRUE;
-    }
-    else
-    {
-        pC->bClip1AtBeginCut = M4OSA_FALSE;
-    }
-
-    return;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder()
- * @brief    Creates the video encoder
- * @note
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder( M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_ERR err;
-    M4ENCODER_AdvancedParams EncParams;
-
-    /**
-    * Simulate a writer interface with our specific function */
-    pC->ewc.OurWriterDataInterface.pProcessAU =
-        M4VSS3GPP_intProcessAU; /**< This function is VSS 3GPP specific,
-                                but it follow the writer interface */
-    pC->ewc.OurWriterDataInterface.pStartAU =
-        M4VSS3GPP_intStartAU; /**< This function is VSS 3GPP specific,
-                              but it follow the writer interface */
-    pC->ewc.OurWriterDataInterface.pWriterContext =
-        (M4WRITER_Context)
-        pC; /**< We give the internal context as writer context */
-
-    /**
-    * Get the encoder interface, if not already done */
-    if( M4OSA_NULL == pC->ShellAPI.pVideoEncoderGlobalFcts )
-    {
-        err = M4VSS3GPP_setCurrentVideoEncoder(&pC->ShellAPI,
-            pC->ewc.VideoStreamType);
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intCreateVideoEncoder: setCurrentEncoder returns 0x%x",
-            err);
-        M4ERR_CHECK_RETURN(err);
-    }
-
-    /**
-    * Set encoder shell parameters according to VSS settings */
-
-    /* Common parameters */
-    EncParams.InputFormat = M4ENCODER_kIYUV420;
-    EncParams.FrameWidth = pC->ewc.uiVideoWidth;
-    EncParams.FrameHeight = pC->ewc.uiVideoHeight;
-    EncParams.uiTimeScale = pC->ewc.uiVideoTimeScale;
-
-    if( pC->bIsMMS == M4OSA_FALSE )
-    {
-        /* No strict regulation in video editor */
-        /* Because of the effects and transitions we should allow more flexibility */
-        /* Also it prevents to drop important frames (with a bad result on sheduling and
-        block effetcs) */
-        EncParams.bInternalRegulation = M4OSA_FALSE;
-        // Variable framerate is not supported by StageFright encoders
-        EncParams.FrameRate = M4ENCODER_k30_FPS;
-    }
-    else
-    {
-        /* In case of MMS mode, we need to enable bitrate regulation to be sure */
-        /* to reach the targeted output file size */
-        EncParams.bInternalRegulation = M4OSA_TRUE;
-        EncParams.FrameRate = pC->MMSvideoFramerate;
-    }
-
-    /**
-    * Other encoder settings (defaults) */
-    EncParams.uiHorizontalSearchRange = 0;     /* use default */
-    EncParams.uiVerticalSearchRange = 0;       /* use default */
-    EncParams.bErrorResilience = M4OSA_FALSE;  /* no error resilience */
-    EncParams.uiIVopPeriod = 0;                /* use default */
-    EncParams.uiMotionEstimationTools = 0;     /* M4V_MOTION_EST_TOOLS_ALL */
-    EncParams.bAcPrediction = M4OSA_TRUE;      /* use AC prediction */
-    EncParams.uiStartingQuantizerValue = 10;   /* initial QP = 10 */
-    EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
-
-    /**
-    * Set the video profile and level */
-    EncParams.videoProfile = pC->ewc.outputVideoProfile;
-    EncParams.videoLevel= pC->ewc.outputVideoLevel;
-
-    switch ( pC->ewc.VideoStreamType )
-    {
-        case M4SYS_kH263:
-
-            EncParams.Format = M4ENCODER_kH263;
-
-            EncParams.uiStartingQuantizerValue = 10;
-            EncParams.uiRateFactor = 1; /* default */
-
-            EncParams.bErrorResilience = M4OSA_FALSE;
-            EncParams.bDataPartitioning = M4OSA_FALSE;
-            break;
-
-        case M4SYS_kMPEG_4:
-
-            EncParams.Format = M4ENCODER_kMPEG4;
-
-            EncParams.uiStartingQuantizerValue = 8;
-            EncParams.uiRateFactor = (M4OSA_UInt8)(( pC->dOutputFrameDuration
-                * pC->ewc.uiVideoTimeScale) / 1000.0 + 0.5);
-
-            if( EncParams.uiRateFactor == 0 )
-                EncParams.uiRateFactor = 1; /* default */
-
-            if( M4OSA_FALSE == pC->ewc.bVideoDataPartitioning )
-            {
-                EncParams.bErrorResilience = M4OSA_FALSE;
-                EncParams.bDataPartitioning = M4OSA_FALSE;
-            }
-            else
-            {
-                EncParams.bErrorResilience = M4OSA_TRUE;
-                EncParams.bDataPartitioning = M4OSA_TRUE;
-            }
-            break;
-
-        case M4SYS_kH264:
-            M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: M4SYS_H264");
-
-            EncParams.Format = M4ENCODER_kH264;
-
-            EncParams.uiStartingQuantizerValue = 10;
-            EncParams.uiRateFactor = 1; /* default */
-
-            EncParams.bErrorResilience = M4OSA_FALSE;
-            EncParams.bDataPartitioning = M4OSA_FALSE;
-            //EncParams.FrameRate = M4VIDEOEDITING_k5_FPS;
-            break;
-
-        default:
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCreateVideoEncoder: Unknown videoStreamType 0x%x",
-                pC->ewc.VideoStreamType);
-            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
-    }
-
-    if( pC->bIsMMS == M4OSA_FALSE )
-    {
-        EncParams.Bitrate = pC->xVSS.outputVideoBitrate;
-
-    }
-    else
-    {
-        EncParams.Bitrate = pC->uiMMSVideoBitrate; /* RC */
-        EncParams.uiTimeScale = 0; /* We let the encoder choose the timescale */
-    }
-
-    M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctInit");
-    /**
-    * Init the video encoder (advanced settings version of the encoder Open function) */
-    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctInit(&pC->ewc.pEncContext,
-        &pC->ewc.OurWriterDataInterface, M4VSS3GPP_intVPP, pC,
-        pC->ShellAPI.pCurrentVideoEncoderExternalAPI,
-        pC->ShellAPI.pCurrentVideoEncoderUserData);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctInit returns 0x%x",
-            err);
-        return err;
-    }
-
-    pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
-    M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctOpen");
-
-    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctOpen(pC->ewc.pEncContext,
-        &pC->ewc.WriterVideoAU, &EncParams);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1(
-            "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctOpen returns 0x%x",
-            err);
-        return err;
-    }
-
-    pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
-    M4OSA_TRACE1_0(
-        "M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctStart");
-
-    if( M4OSA_NULL != pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart )
-    {
-        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart(
-            pC->ewc.pEncContext);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctStart returns 0x%x",
-                err);
-            return err;
-        }
-    }
-
-    pC->ewc.encoderState = M4VSS3GPP_kEncoderRunning;
-
-    /**
-    *    Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intCreateVideoEncoder: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder()
- * @brief    Destroy the video encoder
- * @note
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder( M4VSS3GPP_InternalEditContext *pC )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    if( M4OSA_NULL != pC->ewc.pEncContext )
-    {
-        if( M4VSS3GPP_kEncoderRunning == pC->ewc.encoderState )
-        {
-            if( pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
-            {
-                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop(
-                    pC->ewc.pEncContext);
-
-                if( M4NO_ERROR != err )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4VSS3GPP_intDestroyVideoEncoder:\
-                        pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
-                        err);
-                    /* Well... how the heck do you handle a failed cleanup? */
-                }
-            }
-
-            pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
-        }
-
-        /* Has the encoder actually been opened? Don't close it if that's not the case. */
-        if( M4VSS3GPP_kEncoderStopped == pC->ewc.encoderState )
-        {
-            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctClose(
-                pC->ewc.pEncContext);
-
-            if( M4NO_ERROR != err )
-            {
-                M4OSA_TRACE1_1(
-                    "M4VSS3GPP_intDestroyVideoEncoder:\
-                    pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
-                    err);
-                /* Well... how the heck do you handle a failed cleanup? */
-            }
-
-            pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
-        }
-
-        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctCleanup(
-            pC->ewc.pEncContext);
-
-        if( M4NO_ERROR != err )
-        {
-            M4OSA_TRACE1_1(
-                "M4VSS3GPP_intDestroyVideoEncoder:\
-                pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x!",
-                err);
-            /**< We do not return the error here because we still have stuff to free */
-        }
-
-        pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
-        /**
-        * Reset variable */
-        pC->ewc.pEncContext = M4OSA_NULL;
-    }
-
-    M4OSA_TRACE3_1("M4VSS3GPP_intDestroyVideoEncoder: returning 0x%x", err);
-    return err;
-}
-
-/**
- ******************************************************************************
- * M4OSA_Void M4VSS3GPP_intSetH263TimeCounter()
- * @brief    Modify the time counter of the given H263 video AU
- * @note
- * @param    pAuDataBuffer    (IN/OUT) H263 Video AU to modify
- * @param    uiCts            (IN)     New time counter value
- * @return    nothing
- ******************************************************************************
- */
-static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer,
-                                                  M4OSA_UInt8 uiCts )
-{
-    /*
-    *  The H263 time counter is 8 bits located on the "x" below:
-    *
-    *   |--------|--------|--------|--------|
-    *    ???????? ???????? ??????xx xxxxxx??
-    */
-
-    /**
-    * Write the 2 bits on the third byte */
-    pAuDataBuffer[2] = ( pAuDataBuffer[2] & 0xFC) | (( uiCts >> 6) & 0x3);
-
-    /**
-    * Write the 6 bits on the fourth byte */
-    pAuDataBuffer[3] = ( ( uiCts << 2) & 0xFC) | (pAuDataBuffer[3] & 0x3);
-
-    return;
-}
-
-/**
- ******************************************************************************
- * M4OSA_Void M4VSS3GPP_intSetMPEG4Gov()
- * @brief    Modify the time info from Group Of VOP video AU
- * @note
- * @param    pAuDataBuffer    (IN)    MPEG4 Video AU to modify
- * @param    uiCtsSec            (IN)     New GOV time info in second unit
- * @return    nothing
- ******************************************************************************
- */
-static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
-                                           M4OSA_UInt32 uiCtsSec )
-{
-    /*
-    *  The MPEG-4 time code length is 18 bits:
-    *
-    *     hh     mm    marker    ss
-    *    xxxxx|xxx xxx     1    xxxx xx ??????
-    *   |----- ---|---     -    ----|-- ------|
-    */
-    M4OSA_UInt8 uiHh;
-    M4OSA_UInt8 uiMm;
-    M4OSA_UInt8 uiSs;
-    M4OSA_UInt8 uiTmp;
-
-    /**
-    * Write the 2 last bits ss */
-    uiSs = (M4OSA_UInt8)(uiCtsSec % 60); /**< modulo part */
-    pAuDataBuffer[2] = (( ( uiSs & 0x03) << 6) | (pAuDataBuffer[2] & 0x3F));
-
-    if( uiCtsSec < 60 )
-    {
-        /**
-        * Write the 3 last bits of mm, the marker bit (0x10 */
-        pAuDataBuffer[1] = (( 0x10) | (uiSs >> 2));
-
-        /**
-        * Write the 5 bits of hh and 3 of mm (out of 6) */
-        pAuDataBuffer[0] = 0;
-    }
-    else
-    {
-        /**
-        * Write the 3 last bits of mm, the marker bit (0x10 */
-        uiTmp = (M4OSA_UInt8)(uiCtsSec / 60); /**< integer part */
-        uiMm = (M4OSA_UInt8)(uiTmp % 60);
-        pAuDataBuffer[1] = (( uiMm << 5) | (0x10) | (uiSs >> 2));
-
-        if( uiTmp < 60 )
-        {
-            /**
-            * Write the 5 bits of hh and 3 of mm (out of 6) */
-            pAuDataBuffer[0] = ((uiMm >> 3));
-        }
-        else
-        {
-            /**
-            * Write the 5 bits of hh and 3 of mm (out of 6) */
-            uiHh = (M4OSA_UInt8)(uiTmp / 60);
-            pAuDataBuffer[0] = (( uiHh << 3) | (uiMm >> 3));
-        }
-    }
-    return;
-}
-
-/**
- ******************************************************************************
- * M4OSA_Void M4VSS3GPP_intGetMPEG4Gov()
- * @brief    Get the time info from Group Of VOP video AU
- * @note
- * @param    pAuDataBuffer    (IN)    MPEG4 Video AU to modify
- * @param    pCtsSec            (OUT)    Current GOV time info in second unit
- * @return    nothing
- ******************************************************************************
- */
-static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
-                                           M4OSA_UInt32 *pCtsSec )
-{
-    /*
-    *  The MPEG-4 time code length is 18 bits:
-    *
-    *     hh     mm    marker    ss
-    *    xxxxx|xxx xxx     1    xxxx xx ??????
-    *   |----- ---|---     -    ----|-- ------|
-    */
-    M4OSA_UInt8 uiHh;
-    M4OSA_UInt8 uiMm;
-    M4OSA_UInt8 uiSs;
-    M4OSA_UInt8 uiTmp;
-    M4OSA_UInt32 uiCtsSec;
-
-    /**
-    * Read ss */
-    uiSs = (( pAuDataBuffer[2] & 0xC0) >> 6);
-    uiTmp = (( pAuDataBuffer[1] & 0x0F) << 2);
-    uiCtsSec = uiSs + uiTmp;
-
-    /**
-    * Read mm */
-    uiMm = (( pAuDataBuffer[1] & 0xE0) >> 5);
-    uiTmp = (( pAuDataBuffer[0] & 0x07) << 3);
-    uiMm = uiMm + uiTmp;
-    uiCtsSec = ( uiMm * 60) + uiCtsSec;
-
-    /**
-    * Read hh */
-    uiHh = (( pAuDataBuffer[0] & 0xF8) >> 3);
-
-    if( uiHh )
-    {
-        uiCtsSec = ( uiHh * 3600) + uiCtsSec;
-    }
-
-    /*
-    * in sec */
-    *pCtsSec = uiCtsSec;
-
-    return;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_intAllocateYUV420()
- * @brief    Allocate the three YUV 4:2:0 planes
- * @note
- * @param    pPlanes    (IN/OUT) valid pointer to 3 M4VIFI_ImagePlane structures
- * @param    uiWidth    (IN)     Image width
- * @param    uiHeight(IN)     Image height
- ******************************************************************************
- */
-static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes,
-                                             M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight )
-{
-    if (pPlanes == M4OSA_NULL) {
-        M4OSA_TRACE1_0("M4VSS3GPP_intAllocateYUV420: Invalid pPlanes pointer");
-        return M4ERR_PARAMETER;
-    }
-    /* if the buffer is not NULL and same size with target size,
-     * do not malloc again*/
-    if (pPlanes[0].pac_data != M4OSA_NULL &&
-        pPlanes[0].u_width == uiWidth &&
-        pPlanes[0].u_height == uiHeight) {
-        return M4NO_ERROR;
-    }
-
-    pPlanes[0].u_width = uiWidth;
-    pPlanes[0].u_height = uiHeight;
-    pPlanes[0].u_stride = uiWidth;
-    pPlanes[0].u_topleft = 0;
-
-    if (pPlanes[0].pac_data != M4OSA_NULL) {
-        free(pPlanes[0].pac_data);
-        pPlanes[0].pac_data = M4OSA_NULL;
-    }
-    pPlanes[0].pac_data = (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pPlanes[0].u_stride
-        * pPlanes[0].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[0].pac_data");
-
-    if( M4OSA_NULL == pPlanes[0].pac_data )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[0].pac_data,\
-            returning M4ERR_ALLOC");
-        return M4ERR_ALLOC;
-    }
-
-    pPlanes[1].u_width = pPlanes[0].u_width >> 1;
-    pPlanes[1].u_height = pPlanes[0].u_height >> 1;
-    pPlanes[1].u_stride = pPlanes[1].u_width;
-    pPlanes[1].u_topleft = 0;
-    if (pPlanes[1].pac_data != M4OSA_NULL) {
-        free(pPlanes[1].pac_data);
-        pPlanes[1].pac_data = M4OSA_NULL;
-    }
-    pPlanes[1].pac_data = (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pPlanes[1].u_stride
-        * pPlanes[1].u_height, M4VSS3GPP,(M4OSA_Char *) "pPlanes[1].pac_data");
-
-    if( M4OSA_NULL == pPlanes[1].pac_data )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[1].pac_data,\
-            returning M4ERR_ALLOC");
-        free((void *)pPlanes[0].pac_data);
-        pPlanes[0].pac_data = M4OSA_NULL;
-        return M4ERR_ALLOC;
-    }
-
-    pPlanes[2].u_width = pPlanes[1].u_width;
-    pPlanes[2].u_height = pPlanes[1].u_height;
-    pPlanes[2].u_stride = pPlanes[2].u_width;
-    pPlanes[2].u_topleft = 0;
-    if (pPlanes[2].pac_data != M4OSA_NULL) {
-        free(pPlanes[2].pac_data);
-        pPlanes[2].pac_data = M4OSA_NULL;
-    }
-    pPlanes[2].pac_data = (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pPlanes[2].u_stride
-        * pPlanes[2].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[2].pac_data");
-
-    if( M4OSA_NULL == pPlanes[2].pac_data )
-    {
-        M4OSA_TRACE1_0(
-            "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[2].pac_data,\
-            returning M4ERR_ALLOC");
-        free((void *)pPlanes[0].pac_data);
-        free((void *)pPlanes[1].pac_data);
-        pPlanes[0].pac_data = M4OSA_NULL;
-        pPlanes[1].pac_data = M4OSA_NULL;
-        return M4ERR_ALLOC;
-    }
-
-    memset((void *)pPlanes[0].pac_data, 0, pPlanes[0].u_stride*pPlanes[0].u_height);
-    memset((void *)pPlanes[1].pac_data, 0, pPlanes[1].u_stride*pPlanes[1].u_height);
-    memset((void *)pPlanes[2].pac_data, 0, pPlanes[2].u_stride*pPlanes[2].u_height);
-    /**
-    *    Return */
-    M4OSA_TRACE3_0("M4VSS3GPP_intAllocateYUV420: returning M4NO_ERROR");
-    return M4NO_ERROR;
-}
-
-/**
-******************************************************************************
-* M4OSA_ERR M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
-*                                            M4OSA_FileReadPointer* pFileReadPtr,
-*                                               M4VIFI_ImagePlane* pImagePlanes,
-*                                               M4OSA_UInt32 width,
-*                                               M4OSA_UInt32 height);
-* @brief    It Coverts and resizes a ARGB8888 image to YUV420
-* @note
-* @param    pFileIn         (IN) The ARGB888 input file
-* @param    pFileReadPtr    (IN) Pointer on filesystem functions
-* @param    pImagePlanes    (IN/OUT) Pointer on YUV420 output planes allocated by the user.
-*                           ARGB8888 image  will be converted and resized to output
-*                           YUV420 plane size
-* @param width       (IN) width of the ARGB8888
-* @param height      (IN) height of the ARGB8888
-* @return   M4NO_ERROR: No error
-* @return   M4ERR_ALLOC: memory error
-* @return   M4ERR_PARAMETER: At least one of the function parameters is null
-******************************************************************************
-*/
-
-M4OSA_ERR M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
-                           M4OSA_FileReadPointer* pFileReadPtr,
-                           M4VIFI_ImagePlane* pImagePlanes,
-                           M4OSA_UInt32 width,M4OSA_UInt32 height) {
-    M4OSA_Context pARGBIn;
-    M4VIFI_ImagePlane rgbPlane1 ,rgbPlane2;
-    M4OSA_UInt32 frameSize_argb = width * height * 4;
-    M4OSA_UInt32 frameSize_rgb888 = width * height * 3;
-    M4OSA_UInt32 i = 0,j= 0;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4OSA_UInt8 *pArgbPlane =
-        (M4OSA_UInt8*) M4OSA_32bitAlignedMalloc(frameSize_argb,
-                                                M4VS, (M4OSA_Char*)"argb data");
-    if (pArgbPlane == M4OSA_NULL) {
-        M4OSA_TRACE1_0("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420: \
-            Failed to allocate memory for ARGB plane");
-        return M4ERR_ALLOC;
-    }
-
-    /* Get file size */
-    err = pFileReadPtr->openRead(&pARGBIn, pFileIn, M4OSA_kFileRead);
-    if (err != M4NO_ERROR) {
-        M4OSA_TRACE1_2("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 : \
-            Can not open input ARGB8888 file %s, error: 0x%x\n",pFileIn, err);
-        free(pArgbPlane);
-        pArgbPlane = M4OSA_NULL;
-        goto cleanup;
-    }
-
-    err = pFileReadPtr->readData(pARGBIn,(M4OSA_MemAddr8)pArgbPlane,
-                                 &frameSize_argb);
-    if (err != M4NO_ERROR) {
-        M4OSA_TRACE1_2("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 \
-            Can not read ARGB8888 file %s, error: 0x%x\n",pFileIn, err);
-        pFileReadPtr->closeRead(pARGBIn);
-        free(pArgbPlane);
-        pArgbPlane = M4OSA_NULL;
-        goto cleanup;
-    }
-
-    err = pFileReadPtr->closeRead(pARGBIn);
-    if(err != M4NO_ERROR) {
-        M4OSA_TRACE1_2("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 \
-            Can not close ARGB8888  file %s, error: 0x%x\n",pFileIn, err);
-        free(pArgbPlane);
-        pArgbPlane = M4OSA_NULL;
-        goto cleanup;
-    }
-
-    rgbPlane1.pac_data =
-        (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize_rgb888,
-                                            M4VS, (M4OSA_Char*)"RGB888 plane1");
-    if(rgbPlane1.pac_data == M4OSA_NULL) {
-        M4OSA_TRACE1_0("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 \
-            Failed to allocate memory for rgb plane1");
-        free(pArgbPlane);
-        return M4ERR_ALLOC;
-    }
-
-    rgbPlane1.u_height = height;
-    rgbPlane1.u_width = width;
-    rgbPlane1.u_stride = width*3;
-    rgbPlane1.u_topleft = 0;
-
-
-    /** Remove the alpha channel */
-    for (i=0, j = 0; i < frameSize_argb; i++) {
-        if ((i % 4) == 0) continue;
-        rgbPlane1.pac_data[j] = pArgbPlane[i];
-        j++;
-    }
-    free(pArgbPlane);
-
-    /**
-     * Check if resizing is required with color conversion */
-    if(width != pImagePlanes->u_width || height != pImagePlanes->u_height) {
-
-        frameSize_rgb888 = pImagePlanes->u_width * pImagePlanes->u_height * 3;
-        rgbPlane2.pac_data =
-            (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize_rgb888, M4VS,
-                                                   (M4OSA_Char*)"rgb Plane2");
-        if(rgbPlane2.pac_data == M4OSA_NULL) {
-            M4OSA_TRACE1_0("Failed to allocate memory for rgb plane2");
-            free(rgbPlane1.pac_data);
-            return M4ERR_ALLOC;
-        }
-        rgbPlane2.u_height =  pImagePlanes->u_height;
-        rgbPlane2.u_width = pImagePlanes->u_width;
-        rgbPlane2.u_stride = pImagePlanes->u_width*3;
-        rgbPlane2.u_topleft = 0;
-
-        /* Resizing */
-        err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL,
-                                                  &rgbPlane1, &rgbPlane2);
-        free(rgbPlane1.pac_data);
-        if(err != M4NO_ERROR) {
-            M4OSA_TRACE1_1("error resizing RGB888 to RGB888: 0x%x\n", err);
-            free(rgbPlane2.pac_data);
-            return err;
-        }
-
-        /*Converting Resized RGB888 to YUV420 */
-        err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane2, pImagePlanes);
-        free(rgbPlane2.pac_data);
-        if(err != M4NO_ERROR) {
-            M4OSA_TRACE1_1("error converting from RGB888 to YUV: 0x%x\n", err);
-            return err;
-        }
-    } else {
-        err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane1, pImagePlanes);
-        if(err != M4NO_ERROR) {
-            M4OSA_TRACE1_1("error when converting from RGB to YUV: 0x%x\n", err);
-        }
-        free(rgbPlane1.pac_data);
-    }
-cleanup:
-    M4OSA_TRACE3_0("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 exit");
-    return err;
-}
-
-M4OSA_ERR M4VSS3GPP_intApplyRenderingMode(M4VSS3GPP_InternalEditContext *pC,
-                                          M4xVSS_MediaRendering renderingMode,
-                                          M4VIFI_ImagePlane* pInplane,
-                                          M4VIFI_ImagePlane* pOutplane) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4AIR_Params airParams;
-    M4VIFI_ImagePlane pImagePlanesTemp[3];
-    M4OSA_UInt32 i = 0;
-
-    if (renderingMode == M4xVSS_kBlackBorders) {
-        memset((void *)pOutplane[0].pac_data, Y_PLANE_BORDER_VALUE,
-               (pOutplane[0].u_height*pOutplane[0].u_stride));
-        memset((void *)pOutplane[1].pac_data, U_PLANE_BORDER_VALUE,
-               (pOutplane[1].u_height*pOutplane[1].u_stride));
-        memset((void *)pOutplane[2].pac_data, V_PLANE_BORDER_VALUE,
-               (pOutplane[2].u_height*pOutplane[2].u_stride));
-    }
-
-    if (renderingMode == M4xVSS_kResizing) {
-        /**
-        * Call the resize filter.
-        * From the intermediate frame to the encoder image plane */
-        err = M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL,
-                                                  pInplane, pOutplane);
-        if (M4NO_ERROR != err) {
-            M4OSA_TRACE1_1("M4VSS3GPP_intApplyRenderingMode: \
-                M4ViFilResizeBilinearYUV420toYUV420 returns 0x%x!", err);
-            return err;
-        }
-    } else {
-        M4VIFI_ImagePlane* pPlaneTemp = M4OSA_NULL;
-        M4OSA_UInt8* pOutPlaneY =
-            pOutplane[0].pac_data + pOutplane[0].u_topleft;
-        M4OSA_UInt8* pOutPlaneU =
-            pOutplane[1].pac_data + pOutplane[1].u_topleft;
-        M4OSA_UInt8* pOutPlaneV =
-            pOutplane[2].pac_data + pOutplane[2].u_topleft;
-        M4OSA_UInt8* pInPlaneY = M4OSA_NULL;
-        M4OSA_UInt8* pInPlaneU = M4OSA_NULL;
-        M4OSA_UInt8* pInPlaneV = M4OSA_NULL;
-
-        /* To keep media aspect ratio*/
-        /* Initialize AIR Params*/
-        airParams.m_inputCoord.m_x = 0;
-        airParams.m_inputCoord.m_y = 0;
-        airParams.m_inputSize.m_height = pInplane->u_height;
-        airParams.m_inputSize.m_width = pInplane->u_width;
-        airParams.m_outputSize.m_width = pOutplane->u_width;
-        airParams.m_outputSize.m_height = pOutplane->u_height;
-        airParams.m_bOutputStripe = M4OSA_FALSE;
-        airParams.m_outputOrientation = M4COMMON_kOrientationTopLeft;
-
-        /**
-        Media rendering: Black borders*/
-        if (renderingMode == M4xVSS_kBlackBorders) {
-            pImagePlanesTemp[0].u_width = pOutplane[0].u_width;
-            pImagePlanesTemp[0].u_height = pOutplane[0].u_height;
-            pImagePlanesTemp[0].u_stride = pOutplane[0].u_width;
-            pImagePlanesTemp[0].u_topleft = 0;
-
-            pImagePlanesTemp[1].u_width = pOutplane[1].u_width;
-            pImagePlanesTemp[1].u_height = pOutplane[1].u_height;
-            pImagePlanesTemp[1].u_stride = pOutplane[1].u_width;
-            pImagePlanesTemp[1].u_topleft = 0;
-
-            pImagePlanesTemp[2].u_width = pOutplane[2].u_width;
-            pImagePlanesTemp[2].u_height = pOutplane[2].u_height;
-            pImagePlanesTemp[2].u_stride = pOutplane[2].u_width;
-            pImagePlanesTemp[2].u_topleft = 0;
-
-            /**
-             * Allocates plan in local image plane structure */
-            pImagePlanesTemp[0].pac_data =
-                (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
-                    pImagePlanesTemp[0].u_width * pImagePlanesTemp[0].u_height,
-                    M4VS, (M4OSA_Char *)"pImagePlaneTemp Y") ;
-            if (pImagePlanesTemp[0].pac_data == M4OSA_NULL) {
-                M4OSA_TRACE1_0("M4VSS3GPP_intApplyRenderingMode: Alloc Error");
-                return M4ERR_ALLOC;
-            }
-            pImagePlanesTemp[1].pac_data =
-                (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
-                    pImagePlanesTemp[1].u_width * pImagePlanesTemp[1].u_height,
-                    M4VS, (M4OSA_Char *)"pImagePlaneTemp U") ;
-            if (pImagePlanesTemp[1].pac_data == M4OSA_NULL) {
-                M4OSA_TRACE1_0("M4VSS3GPP_intApplyRenderingMode: Alloc Error");
-                free(pImagePlanesTemp[0].pac_data);
-                return M4ERR_ALLOC;
-            }
-            pImagePlanesTemp[2].pac_data =
-                (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
-                    pImagePlanesTemp[2].u_width * pImagePlanesTemp[2].u_height,
-                    M4VS, (M4OSA_Char *)"pImagePlaneTemp V") ;
-            if (pImagePlanesTemp[2].pac_data == M4OSA_NULL) {
-                M4OSA_TRACE1_0("M4VSS3GPP_intApplyRenderingMode: Alloc Error");
-                free(pImagePlanesTemp[0].pac_data);
-                free(pImagePlanesTemp[1].pac_data);
-                return M4ERR_ALLOC;
-            }
-
-            pInPlaneY = pImagePlanesTemp[0].pac_data ;
-            pInPlaneU = pImagePlanesTemp[1].pac_data ;
-            pInPlaneV = pImagePlanesTemp[2].pac_data ;
-
-            memset((void *)pImagePlanesTemp[0].pac_data, Y_PLANE_BORDER_VALUE,
-                (pImagePlanesTemp[0].u_height*pImagePlanesTemp[0].u_stride));
-            memset((void *)pImagePlanesTemp[1].pac_data, U_PLANE_BORDER_VALUE,
-                (pImagePlanesTemp[1].u_height*pImagePlanesTemp[1].u_stride));
-            memset((void *)pImagePlanesTemp[2].pac_data, V_PLANE_BORDER_VALUE,
-                (pImagePlanesTemp[2].u_height*pImagePlanesTemp[2].u_stride));
-
-            M4OSA_UInt32 height =
-                (pInplane->u_height * pOutplane->u_width) /pInplane->u_width;
-
-            if (height <= pOutplane->u_height) {
-                /**
-                 * Black borders will be on the top and the bottom side */
-                airParams.m_outputSize.m_width = pOutplane->u_width;
-                airParams.m_outputSize.m_height = height;
-                /**
-                 * Number of lines at the top */
-                pImagePlanesTemp[0].u_topleft =
-                    (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_height -
-                      airParams.m_outputSize.m_height)>>1)) *
-                      pImagePlanesTemp[0].u_stride;
-                pImagePlanesTemp[0].u_height = airParams.m_outputSize.m_height;
-                pImagePlanesTemp[1].u_topleft =
-                    (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_height -
-                     (airParams.m_outputSize.m_height>>1)))>>1) *
-                     pImagePlanesTemp[1].u_stride;
-                pImagePlanesTemp[1].u_height =
-                    airParams.m_outputSize.m_height>>1;
-                pImagePlanesTemp[2].u_topleft =
-                    (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_height -
-                     (airParams.m_outputSize.m_height>>1)))>>1) *
-                     pImagePlanesTemp[2].u_stride;
-                pImagePlanesTemp[2].u_height =
-                    airParams.m_outputSize.m_height>>1;
-            } else {
-                /**
-                 * Black borders will be on the left and right side */
-                airParams.m_outputSize.m_height = pOutplane->u_height;
-                airParams.m_outputSize.m_width =
-                    (M4OSA_UInt32)((pInplane->u_width * pOutplane->u_height)/pInplane->u_height);
-
-                pImagePlanesTemp[0].u_topleft =
-                    (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_width -
-                     airParams.m_outputSize.m_width)>>1));
-                pImagePlanesTemp[0].u_width = airParams.m_outputSize.m_width;
-                pImagePlanesTemp[1].u_topleft =
-                    (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_width -
-                     (airParams.m_outputSize.m_width>>1)))>>1);
-                pImagePlanesTemp[1].u_width = airParams.m_outputSize.m_width>>1;
-                pImagePlanesTemp[2].u_topleft =
-                    (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_width -
-                     (airParams.m_outputSize.m_width>>1)))>>1);
-                pImagePlanesTemp[2].u_width = airParams.m_outputSize.m_width>>1;
-            }
-
-            /**
-             * Width and height have to be even */
-            airParams.m_outputSize.m_width =
-                (airParams.m_outputSize.m_width>>1)<<1;
-            airParams.m_outputSize.m_height =
-                (airParams.m_outputSize.m_height>>1)<<1;
-            airParams.m_inputSize.m_width =
-                (airParams.m_inputSize.m_width>>1)<<1;
-            airParams.m_inputSize.m_height =
-                (airParams.m_inputSize.m_height>>1)<<1;
-            pImagePlanesTemp[0].u_width =
-                (pImagePlanesTemp[0].u_width>>1)<<1;
-            pImagePlanesTemp[1].u_width =
-                (pImagePlanesTemp[1].u_width>>1)<<1;
-            pImagePlanesTemp[2].u_width =
-                (pImagePlanesTemp[2].u_width>>1)<<1;
-            pImagePlanesTemp[0].u_height =
-                (pImagePlanesTemp[0].u_height>>1)<<1;
-            pImagePlanesTemp[1].u_height =
-                (pImagePlanesTemp[1].u_height>>1)<<1;
-            pImagePlanesTemp[2].u_height =
-                (pImagePlanesTemp[2].u_height>>1)<<1;
-
-            /**
-             * Check that values are coherent */
-            if (airParams.m_inputSize.m_height ==
-                   airParams.m_outputSize.m_height) {
-                airParams.m_inputSize.m_width =
-                    airParams.m_outputSize.m_width;
-            } else if (airParams.m_inputSize.m_width ==
-                          airParams.m_outputSize.m_width) {
-                airParams.m_inputSize.m_height =
-                    airParams.m_outputSize.m_height;
-            }
-            pPlaneTemp = pImagePlanesTemp;
-        }
-
-        /**
-         * Media rendering: Cropping*/
-        if (renderingMode == M4xVSS_kCropping) {
-            airParams.m_outputSize.m_height = pOutplane->u_height;
-            airParams.m_outputSize.m_width = pOutplane->u_width;
-            if ((airParams.m_outputSize.m_height *
-                 airParams.m_inputSize.m_width)/airParams.m_outputSize.m_width <
-                  airParams.m_inputSize.m_height) {
-                /* Height will be cropped */
-                airParams.m_inputSize.m_height =
-                    (M4OSA_UInt32)((airParams.m_outputSize.m_height *
-                     airParams.m_inputSize.m_width)/airParams.m_outputSize.m_width);
-                airParams.m_inputSize.m_height =
-                    (airParams.m_inputSize.m_height>>1)<<1;
-                airParams.m_inputCoord.m_y =
-                    (M4OSA_Int32)((M4OSA_Int32)((pInplane->u_height -
-                     airParams.m_inputSize.m_height))>>1);
-            } else {
-                /* Width will be cropped */
-                airParams.m_inputSize.m_width =
-                    (M4OSA_UInt32)((airParams.m_outputSize.m_width *
-                     airParams.m_inputSize.m_height)/airParams.m_outputSize.m_height);
-                airParams.m_inputSize.m_width =
-                    (airParams.m_inputSize.m_width>>1)<<1;
-                airParams.m_inputCoord.m_x =
-                    (M4OSA_Int32)((M4OSA_Int32)((pInplane->u_width -
-                     airParams.m_inputSize.m_width))>>1);
-            }
-            pPlaneTemp = pOutplane;
-        }
-        /**
-        * Call AIR functions */
-        if (M4OSA_NULL == pC->m_air_context) {
-            err = M4AIR_create(&pC->m_air_context, M4AIR_kYUV420P);
-            if(err != M4NO_ERROR) {
-                M4OSA_TRACE1_1("M4VSS3GPP_intApplyRenderingMode: \
-                    M4AIR_create returned error 0x%x", err);
-                goto cleanUp;
-            }
-        }
-
-        err = M4AIR_configure(pC->m_air_context, &airParams);
-        if (err != M4NO_ERROR) {
-            M4OSA_TRACE1_1("M4VSS3GPP_intApplyRenderingMode: \
-                Error when configuring AIR: 0x%x", err);
-            M4AIR_cleanUp(pC->m_air_context);
-            goto cleanUp;
-        }
-
-        err = M4AIR_get(pC->m_air_context, pInplane, pPlaneTemp);
-        if (err != M4NO_ERROR) {
-            M4OSA_TRACE1_1("M4VSS3GPP_intApplyRenderingMode: \
-                Error when getting AIR plane: 0x%x", err);
-            M4AIR_cleanUp(pC->m_air_context);
-            goto cleanUp;
-        }
-
-        if (renderingMode == M4xVSS_kBlackBorders) {
-            for (i=0; i<pOutplane[0].u_height; i++) {
-                memcpy((void *)pOutPlaneY, (void *)pInPlaneY,
-                        pOutplane[0].u_width);
-                pInPlaneY += pOutplane[0].u_width;
-                pOutPlaneY += pOutplane[0].u_stride;
-            }
-            for (i=0; i<pOutplane[1].u_height; i++) {
-                memcpy((void *)pOutPlaneU, (void *)pInPlaneU,
-                        pOutplane[1].u_width);
-                pInPlaneU += pOutplane[1].u_width;
-                pOutPlaneU += pOutplane[1].u_stride;
-            }
-            for (i=0; i<pOutplane[2].u_height; i++) {
-                memcpy((void *)pOutPlaneV, (void *)pInPlaneV,
-                        pOutplane[2].u_width);
-                pInPlaneV += pOutplane[2].u_width;
-                pOutPlaneV += pOutplane[2].u_stride;
-            }
-        }
-    }
-cleanUp:
-    if (renderingMode == M4xVSS_kBlackBorders) {
-        for (i=0; i<3; i++) {
-            if (pImagePlanesTemp[i].pac_data != M4OSA_NULL) {
-                free(pImagePlanesTemp[i].pac_data);
-                pImagePlanesTemp[i].pac_data = M4OSA_NULL;
-            }
-        }
-    }
-    return err;
-}
-
-M4OSA_ERR M4VSS3GPP_intSetYuv420PlaneFromARGB888 (
-                                        M4VSS3GPP_InternalEditContext *pC,
-                                        M4VSS3GPP_ClipContext* pClipCtxt) {
-
-    M4OSA_ERR err= M4NO_ERROR;
-
-    // Allocate memory for YUV plane
-    pClipCtxt->pPlaneYuv =
-     (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(
-        3*sizeof(M4VIFI_ImagePlane), M4VS,
-        (M4OSA_Char*)"pPlaneYuv");
-
-    if (pClipCtxt->pPlaneYuv == M4OSA_NULL) {
-        return M4ERR_ALLOC;
-    }
-
-    pClipCtxt->pPlaneYuv[0].u_height =
-        pClipCtxt->pSettings->ClipProperties.uiStillPicHeight;
-    pClipCtxt->pPlaneYuv[0].u_width =
-        pClipCtxt->pSettings->ClipProperties.uiStillPicWidth;
-    pClipCtxt->pPlaneYuv[0].u_stride = pClipCtxt->pPlaneYuv[0].u_width;
-    pClipCtxt->pPlaneYuv[0].u_topleft = 0;
-
-    pClipCtxt->pPlaneYuv[0].pac_data =
-     (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(
-         pClipCtxt->pPlaneYuv[0].u_height * pClipCtxt->pPlaneYuv[0].u_width * 1.5,
-         M4VS, (M4OSA_Char*)"imageClip YUV data");
-    if (pClipCtxt->pPlaneYuv[0].pac_data == M4OSA_NULL) {
-        free(pClipCtxt->pPlaneYuv);
-        return M4ERR_ALLOC;
-    }
-
-    pClipCtxt->pPlaneYuv[1].u_height = pClipCtxt->pPlaneYuv[0].u_height >>1;
-    pClipCtxt->pPlaneYuv[1].u_width = pClipCtxt->pPlaneYuv[0].u_width >> 1;
-    pClipCtxt->pPlaneYuv[1].u_stride = pClipCtxt->pPlaneYuv[1].u_width;
-    pClipCtxt->pPlaneYuv[1].u_topleft = 0;
-    pClipCtxt->pPlaneYuv[1].pac_data = (M4VIFI_UInt8*)(
-     pClipCtxt->pPlaneYuv[0].pac_data +
-      pClipCtxt->pPlaneYuv[0].u_height * pClipCtxt->pPlaneYuv[0].u_width);
-
-    pClipCtxt->pPlaneYuv[2].u_height = pClipCtxt->pPlaneYuv[0].u_height >>1;
-    pClipCtxt->pPlaneYuv[2].u_width = pClipCtxt->pPlaneYuv[0].u_width >> 1;
-    pClipCtxt->pPlaneYuv[2].u_stride = pClipCtxt->pPlaneYuv[2].u_width;
-    pClipCtxt->pPlaneYuv[2].u_topleft = 0;
-    pClipCtxt->pPlaneYuv[2].pac_data = (M4VIFI_UInt8*)(
-     pClipCtxt->pPlaneYuv[1].pac_data +
-      pClipCtxt->pPlaneYuv[1].u_height * pClipCtxt->pPlaneYuv[1].u_width);
-
-    err = M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 (
-        pClipCtxt->pSettings->pFile,
-        pC->pOsaFileReadPtr,
-        pClipCtxt->pPlaneYuv,
-        pClipCtxt->pSettings->ClipProperties.uiStillPicWidth,
-        pClipCtxt->pSettings->ClipProperties.uiStillPicHeight);
-    if (M4NO_ERROR != err) {
-        free(pClipCtxt->pPlaneYuv[0].pac_data);
-        free(pClipCtxt->pPlaneYuv);
-        return err;
-    }
-
-    // Set the YUV data to the decoder using setoption
-    err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption (
-        pClipCtxt->pViDecCtxt,
-        M4DECODER_kOptionID_DecYuvData,
-        (M4OSA_DataOption)pClipCtxt->pPlaneYuv);
-    if (M4NO_ERROR != err) {
-        free(pClipCtxt->pPlaneYuv[0].pac_data);
-        free(pClipCtxt->pPlaneYuv);
-        return err;
-    }
-
-    pClipCtxt->pSettings->ClipProperties.bSetImageData = M4OSA_TRUE;
-
-    // Allocate Yuv plane with effect
-    pClipCtxt->pPlaneYuvWithEffect =
-     (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(
-         3*sizeof(M4VIFI_ImagePlane), M4VS,
-         (M4OSA_Char*)"pPlaneYuvWithEffect");
-    if (pClipCtxt->pPlaneYuvWithEffect == M4OSA_NULL) {
-        free(pClipCtxt->pPlaneYuv[0].pac_data);
-        free(pClipCtxt->pPlaneYuv);
-        return M4ERR_ALLOC;
-    }
-
-    pClipCtxt->pPlaneYuvWithEffect[0].u_height = pC->ewc.uiVideoHeight;
-    pClipCtxt->pPlaneYuvWithEffect[0].u_width = pC->ewc.uiVideoWidth;
-    pClipCtxt->pPlaneYuvWithEffect[0].u_stride = pC->ewc.uiVideoWidth;
-    pClipCtxt->pPlaneYuvWithEffect[0].u_topleft = 0;
-
-    pClipCtxt->pPlaneYuvWithEffect[0].pac_data =
-     (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(
-         pC->ewc.uiVideoHeight * pC->ewc.uiVideoWidth * 1.5,
-         M4VS, (M4OSA_Char*)"imageClip YUV data");
-    if (pClipCtxt->pPlaneYuvWithEffect[0].pac_data == M4OSA_NULL) {
-        free(pClipCtxt->pPlaneYuv[0].pac_data);
-        free(pClipCtxt->pPlaneYuv);
-        free(pClipCtxt->pPlaneYuvWithEffect);
-        return M4ERR_ALLOC;
-    }
-
-    pClipCtxt->pPlaneYuvWithEffect[1].u_height =
-        pClipCtxt->pPlaneYuvWithEffect[0].u_height >>1;
-    pClipCtxt->pPlaneYuvWithEffect[1].u_width =
-        pClipCtxt->pPlaneYuvWithEffect[0].u_width >> 1;
-    pClipCtxt->pPlaneYuvWithEffect[1].u_stride =
-        pClipCtxt->pPlaneYuvWithEffect[1].u_width;
-    pClipCtxt->pPlaneYuvWithEffect[1].u_topleft = 0;
-    pClipCtxt->pPlaneYuvWithEffect[1].pac_data = (M4VIFI_UInt8*)(
-        pClipCtxt->pPlaneYuvWithEffect[0].pac_data +
-         pClipCtxt->pPlaneYuvWithEffect[0].u_height * pClipCtxt->pPlaneYuvWithEffect[0].u_width);
-
-    pClipCtxt->pPlaneYuvWithEffect[2].u_height =
-        pClipCtxt->pPlaneYuvWithEffect[0].u_height >>1;
-    pClipCtxt->pPlaneYuvWithEffect[2].u_width =
-        pClipCtxt->pPlaneYuvWithEffect[0].u_width >> 1;
-    pClipCtxt->pPlaneYuvWithEffect[2].u_stride =
-        pClipCtxt->pPlaneYuvWithEffect[2].u_width;
-    pClipCtxt->pPlaneYuvWithEffect[2].u_topleft = 0;
-    pClipCtxt->pPlaneYuvWithEffect[2].pac_data = (M4VIFI_UInt8*)(
-        pClipCtxt->pPlaneYuvWithEffect[1].pac_data +
-         pClipCtxt->pPlaneYuvWithEffect[1].u_height * pClipCtxt->pPlaneYuvWithEffect[1].u_width);
-
-    err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
-        pClipCtxt->pViDecCtxt, M4DECODER_kOptionID_YuvWithEffectContiguous,
-        (M4OSA_DataOption)pClipCtxt->pPlaneYuvWithEffect);
-    if (M4NO_ERROR != err) {
-        free(pClipCtxt->pPlaneYuv[0].pac_data);
-        free(pClipCtxt->pPlaneYuv);
-        free(pClipCtxt->pPlaneYuvWithEffect);
-        return err;
-    }
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR M4VSS3GPP_intRenderFrameWithEffect(M4VSS3GPP_InternalEditContext *pC,
-                                             M4VSS3GPP_ClipContext* pClipCtxt,
-                                             M4_MediaTime ts,
-                                             M4OSA_Bool bIsClip1,
-                                             M4VIFI_ImagePlane *pResizePlane,
-                                             M4VIFI_ImagePlane *pPlaneNoResize,
-                                             M4VIFI_ImagePlane *pPlaneOut) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt8 numEffects = 0;
-    M4VIFI_ImagePlane *pDecoderRenderFrame = M4OSA_NULL;
-    M4OSA_UInt32 yuvFrameWidth = 0, yuvFrameHeight = 0;
-    M4VIFI_ImagePlane* pTmp = M4OSA_NULL;
-    M4VIFI_ImagePlane pTemp[3];
-    M4OSA_UInt8 i = 0;
-    M4OSA_Bool bSkipFramingEffect = M4OSA_FALSE;
-
-    memset((void *)pTemp, 0, 3*sizeof(M4VIFI_ImagePlane));
-    /* Resize or rotate case */
-    if (M4OSA_NULL != pClipCtxt->m_pPreResizeFrame) {
-        /**
-        * If we do modify the image, we need an intermediate image plane */
-        err = M4VSS3GPP_intAllocateYUV420(pResizePlane,
-            pClipCtxt->m_pPreResizeFrame[0].u_width,
-            pClipCtxt->m_pPreResizeFrame[0].u_height);
-        if (M4NO_ERROR != err) {
-            M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
-             M4VSS3GPP_intAllocateYUV420 returns 0x%x", err);
-            return err;
-        }
-
-        if ((pClipCtxt->pSettings->FileType ==
-              M4VIDEOEDITING_kFileType_ARGB8888) &&
-            (pC->nbActiveEffects == 0) &&
-            (pClipCtxt->bGetYuvDataFromDecoder == M4OSA_FALSE)) {
-
-            err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
-                      pClipCtxt->pViDecCtxt,
-                      M4DECODER_kOptionID_EnableYuvWithEffect,
-                      (M4OSA_DataOption)M4OSA_TRUE);
-            if (M4NO_ERROR == err) {
-                pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctRender(
-                    pClipCtxt->pViDecCtxt, &ts,
-                    pClipCtxt->pPlaneYuvWithEffect, M4OSA_TRUE);
-            }
-
-        } else {
-            if (pClipCtxt->pSettings->FileType ==
-              M4VIDEOEDITING_kFileType_ARGB8888) {
-                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
-                          pClipCtxt->pViDecCtxt,
-                          M4DECODER_kOptionID_EnableYuvWithEffect,
-                          (M4OSA_DataOption)M4OSA_FALSE);
-            }
-            if (M4NO_ERROR == err) {
-                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctRender(
-                    pClipCtxt->pViDecCtxt, &ts,
-                    pClipCtxt->m_pPreResizeFrame, M4OSA_TRUE);
-            }
-
-        }
-        if (M4NO_ERROR != err) {
-            M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
-                returns error 0x%x", err);
-            return err;
-        }
-
-        if (pClipCtxt->pSettings->FileType !=
-                M4VIDEOEDITING_kFileType_ARGB8888) {
-            if (0 != pClipCtxt->pSettings->ClipProperties.videoRotationDegrees) {
-                // Save width and height of un-rotated frame
-                yuvFrameWidth = pClipCtxt->m_pPreResizeFrame[0].u_width;
-                yuvFrameHeight = pClipCtxt->m_pPreResizeFrame[0].u_height;
-                err = M4VSS3GPP_intRotateVideo(pClipCtxt->m_pPreResizeFrame,
-                    pClipCtxt->pSettings->ClipProperties.videoRotationDegrees);
-                if (M4NO_ERROR != err) {
-                    M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
-                        rotateVideo() returns error 0x%x", err);
-                    return err;
-                }
-                /* Set the new video size for temporary buffer */
-                M4VSS3GPP_intSetYUV420Plane(pResizePlane,
-                    pClipCtxt->m_pPreResizeFrame[0].u_width,
-                    pClipCtxt->m_pPreResizeFrame[0].u_height);
-            }
-        }
-
-        if (bIsClip1 == M4OSA_TRUE) {
-            pC->bIssecondClip = M4OSA_FALSE;
-            numEffects = pC->nbActiveEffects;
-        } else {
-            numEffects = pC->nbActiveEffects1;
-            pC->bIssecondClip = M4OSA_TRUE;
-        }
-
-        if ( numEffects > 0) {
-            pClipCtxt->bGetYuvDataFromDecoder = M4OSA_TRUE;
-            /* If video frame need to be resized or rotated,
-             * then apply the overlay after the frame was rendered with rendering mode.
-             * Here skip the framing(overlay) effect when applying video Effect. */
-            bSkipFramingEffect = M4OSA_TRUE;
-            err = M4VSS3GPP_intApplyVideoEffect(pC,
-                      pClipCtxt->m_pPreResizeFrame, pResizePlane, bSkipFramingEffect);
-            if (M4NO_ERROR != err) {
-                M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
-                    M4VSS3GPP_intApplyVideoEffect() err 0x%x", err);
-                return err;
-            }
-            pDecoderRenderFrame= pResizePlane;
-        } else {
-            pDecoderRenderFrame = pClipCtxt->m_pPreResizeFrame;
-        }
-        /* Do rendering mode */
-        if ((pClipCtxt->bGetYuvDataFromDecoder == M4OSA_TRUE) ||
-            (pClipCtxt->pSettings->FileType !=
-             M4VIDEOEDITING_kFileType_ARGB8888)) {
-            if (bIsClip1 == M4OSA_TRUE) {
-                if (pC->bClip1ActiveFramingEffect == M4OSA_TRUE) {
-                    err = M4VSS3GPP_intAllocateYUV420(pTemp,
-                            pPlaneOut[0].u_width, pPlaneOut[0].u_height);
-                    if (M4NO_ERROR != err) {
-                        M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                            M4VSS3GPP_intAllocateYUV420 error 0x%x", err);
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                    pTmp = pTemp;
-                } else {
-                    pTmp = pC->yuv1;
-                }
-                err = M4VSS3GPP_intApplyRenderingMode (pC,
-                        pClipCtxt->pSettings->xVSS.MediaRendering,
-                        pDecoderRenderFrame,pTmp);
-            } else {
-                if (pC->bClip2ActiveFramingEffect == M4OSA_TRUE) {
-                    err = M4VSS3GPP_intAllocateYUV420(pTemp,
-                            pPlaneOut[0].u_width, pPlaneOut[0].u_height);
-                    if (M4NO_ERROR != err) {
-                        M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                            M4VSS3GPP_intAllocateYUV420 error 0x%x", err);
-                        pC->ewc.VppError = err;
-                        return M4NO_ERROR;
-                    }
-                    pTmp = pTemp;
-                } else {
-                    pTmp = pC->yuv2;
-                }
-                err = M4VSS3GPP_intApplyRenderingMode (pC,
-                        pClipCtxt->pSettings->xVSS.MediaRendering,
-                        pDecoderRenderFrame,pTmp);
-            }
-            if (M4NO_ERROR != err) {
-                M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
-                    M4VSS3GPP_intApplyRenderingMode error 0x%x ", err);
-                for (i=0; i<3; i++) {
-                    if (pTemp[i].pac_data != M4OSA_NULL) {
-                        free(pTemp[i].pac_data);
-                        pTemp[i].pac_data = M4OSA_NULL;
-                    }
-                }
-                return err;
-            }
-            /* Apply overlay if overlay exist*/
-            if (bIsClip1 == M4OSA_TRUE) {
-                if (pC->bClip1ActiveFramingEffect == M4OSA_TRUE) {
-                    err = M4VSS3GPP_intApplyVideoOverlay(pC,
-                        pTemp, pC->yuv1);
-                }
-                pClipCtxt->lastDecodedPlane = pC->yuv1;
-            } else {
-                if (pC->bClip2ActiveFramingEffect == M4OSA_TRUE) {
-                    err = M4VSS3GPP_intApplyVideoOverlay(pC,
-                        pTemp, pC->yuv2);
-                }
-                pClipCtxt->lastDecodedPlane = pC->yuv2;
-            }
-            if (M4NO_ERROR != err) {
-                M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
-                    M4VSS3GPP_intApplyVideoOverlay) error 0x%x ", err);
-                pC->ewc.VppError = err;
-                for (i=0; i<3; i++) {
-                    if (pTemp[i].pac_data != M4OSA_NULL) {
-                        free(pTemp[i].pac_data);
-                        pTemp[i].pac_data = M4OSA_NULL;
-                    }
-                }
-                return M4NO_ERROR;
-            }
-        } else {
-            pClipCtxt->lastDecodedPlane = pClipCtxt->pPlaneYuvWithEffect;
-        }
-        // free the temp buffer
-        for (i=0; i<3; i++) {
-            if (pTemp[i].pac_data != M4OSA_NULL) {
-                free(pTemp[i].pac_data);
-                pTemp[i].pac_data = M4OSA_NULL;
-            }
-        }
-
-        if ((pClipCtxt->pSettings->FileType ==
-                 M4VIDEOEDITING_kFileType_ARGB8888) &&
-             (pC->nbActiveEffects == 0) &&
-             (pClipCtxt->bGetYuvDataFromDecoder == M4OSA_TRUE)) {
-            if (bIsClip1 == M4OSA_TRUE) {
-                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
-                    pClipCtxt->pViDecCtxt,
-                    M4DECODER_kOptionID_YuvWithEffectNonContiguous,
-                    (M4OSA_DataOption)pC->yuv1);
-            } else {
-                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
-                    pClipCtxt->pViDecCtxt,
-                    M4DECODER_kOptionID_YuvWithEffectNonContiguous,
-                    (M4OSA_DataOption)pC->yuv2);
-            }
-            if (M4NO_ERROR != err) {
-                M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
-                    null decoder setOption error 0x%x ", err);
-                return err;
-            }
-            pClipCtxt->bGetYuvDataFromDecoder = M4OSA_FALSE;
-        }
-
-        // Reset original width and height for resize frame plane
-        if (0 != pClipCtxt->pSettings->ClipProperties.videoRotationDegrees &&
-            180 != pClipCtxt->pSettings->ClipProperties.videoRotationDegrees) {
-
-            M4VSS3GPP_intSetYUV420Plane(pClipCtxt->m_pPreResizeFrame,
-                                        yuvFrameWidth, yuvFrameHeight);
-        }
-
-    } else {
-        /* No rotate or no resize case*/
-        if (bIsClip1 == M4OSA_TRUE) {
-            numEffects = pC->nbActiveEffects;
-        } else {
-            numEffects = pC->nbActiveEffects1;
-        }
-
-        if(numEffects > 0) {
-            err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctRender(
-                      pClipCtxt->pViDecCtxt, &ts, pPlaneNoResize, M4OSA_TRUE);
-            if (M4NO_ERROR != err) {
-                M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
-                    Render returns error 0x%x", err);
-                return err;
-            }
-
-            bSkipFramingEffect = M4OSA_FALSE;
-            if (bIsClip1 == M4OSA_TRUE) {
-                pC->bIssecondClip = M4OSA_FALSE;
-                err = M4VSS3GPP_intApplyVideoEffect(pC, pPlaneNoResize,
-                            pC->yuv1, bSkipFramingEffect);
-                pClipCtxt->lastDecodedPlane = pC->yuv1;
-            } else {
-                pC->bIssecondClip = M4OSA_TRUE;
-                err = M4VSS3GPP_intApplyVideoEffect(pC, pPlaneNoResize,
-                            pC->yuv2, bSkipFramingEffect);
-                pClipCtxt->lastDecodedPlane = pC->yuv2;
-            }
-
-            if (M4NO_ERROR != err) {
-                M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
-                    M4VSS3GPP_intApplyVideoEffect error 0x%x", err);
-                return err;
-            }
-        } else {
-
-            if (bIsClip1 == M4OSA_TRUE) {
-                pTmp = pC->yuv1;
-            } else {
-                pTmp = pC->yuv2;
-            }
-            err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctRender(
-                      pClipCtxt->pViDecCtxt, &ts, pTmp, M4OSA_TRUE);
-            if (M4NO_ERROR != err) {
-                M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
-                    Render returns error 0x%x,", err);
-                return err;
-            }
-            pClipCtxt->lastDecodedPlane = pTmp;
-        }
-        pClipCtxt->iVideoRenderCts = (M4OSA_Int32)ts;
-    }
-
-    return err;
-}
-
-M4OSA_ERR M4VSS3GPP_intRotateVideo(M4VIFI_ImagePlane* pPlaneIn,
-                                   M4OSA_UInt32 rotationDegree) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4VIFI_ImagePlane outPlane[3];
-
-    if (rotationDegree != 180) {
-        // Swap width and height of in plane
-        outPlane[0].u_width = pPlaneIn[0].u_height;
-        outPlane[0].u_height = pPlaneIn[0].u_width;
-        outPlane[0].u_stride = outPlane[0].u_width;
-        outPlane[0].u_topleft = 0;
-        outPlane[0].pac_data = (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(
-            (outPlane[0].u_stride*outPlane[0].u_height), M4VS,
-            (M4OSA_Char*)("out Y plane for rotation"));
-        if (outPlane[0].pac_data == M4OSA_NULL) {
-            return M4ERR_ALLOC;
-        }
-
-        outPlane[1].u_width = pPlaneIn[0].u_height/2;
-        outPlane[1].u_height = pPlaneIn[0].u_width/2;
-        outPlane[1].u_stride = outPlane[1].u_width;
-        outPlane[1].u_topleft = 0;
-        outPlane[1].pac_data = (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(
-            (outPlane[1].u_stride*outPlane[1].u_height), M4VS,
-            (M4OSA_Char*)("out U plane for rotation"));
-        if (outPlane[1].pac_data == M4OSA_NULL) {
-            free((void *)outPlane[0].pac_data);
-            return M4ERR_ALLOC;
-        }
-
-        outPlane[2].u_width = pPlaneIn[0].u_height/2;
-        outPlane[2].u_height = pPlaneIn[0].u_width/2;
-        outPlane[2].u_stride = outPlane[2].u_width;
-        outPlane[2].u_topleft = 0;
-        outPlane[2].pac_data = (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(
-            (outPlane[2].u_stride*outPlane[2].u_height), M4VS,
-            (M4OSA_Char*)("out V plane for rotation"));
-        if (outPlane[2].pac_data == M4OSA_NULL) {
-            free((void *)outPlane[0].pac_data);
-            free((void *)outPlane[1].pac_data);
-            return M4ERR_ALLOC;
-        }
-    }
-
-    switch(rotationDegree) {
-        case 90:
-            M4VIFI_Rotate90RightYUV420toYUV420(M4OSA_NULL, pPlaneIn, outPlane);
-            break;
-
-        case 180:
-            // In plane rotation, so planeOut = planeIn
-            M4VIFI_Rotate180YUV420toYUV420(M4OSA_NULL, pPlaneIn, pPlaneIn);
-            break;
-
-        case 270:
-            M4VIFI_Rotate90LeftYUV420toYUV420(M4OSA_NULL, pPlaneIn, outPlane);
-            break;
-
-        default:
-            M4OSA_TRACE1_1("invalid rotation param %d", (int)rotationDegree);
-            err = M4ERR_PARAMETER;
-            break;
-    }
-
-    if (rotationDegree != 180) {
-        memset((void *)pPlaneIn[0].pac_data, 0,
-            (pPlaneIn[0].u_width*pPlaneIn[0].u_height));
-        memset((void *)pPlaneIn[1].pac_data, 0,
-            (pPlaneIn[1].u_width*pPlaneIn[1].u_height));
-        memset((void *)pPlaneIn[2].pac_data, 0,
-            (pPlaneIn[2].u_width*pPlaneIn[2].u_height));
-        // Copy Y, U and V planes
-        memcpy((void *)pPlaneIn[0].pac_data, (void *)outPlane[0].pac_data,
-            (pPlaneIn[0].u_width*pPlaneIn[0].u_height));
-        memcpy((void *)pPlaneIn[1].pac_data, (void *)outPlane[1].pac_data,
-            (pPlaneIn[1].u_width*pPlaneIn[1].u_height));
-        memcpy((void *)pPlaneIn[2].pac_data, (void *)outPlane[2].pac_data,
-            (pPlaneIn[2].u_width*pPlaneIn[2].u_height));
-
-        free((void *)outPlane[0].pac_data);
-        free((void *)outPlane[1].pac_data);
-        free((void *)outPlane[2].pac_data);
-
-        // Swap the width and height of the in plane
-        uint32_t temp = 0;
-        temp = pPlaneIn[0].u_width;
-        pPlaneIn[0].u_width = pPlaneIn[0].u_height;
-        pPlaneIn[0].u_height = temp;
-        pPlaneIn[0].u_stride = pPlaneIn[0].u_width;
-
-        temp = pPlaneIn[1].u_width;
-        pPlaneIn[1].u_width = pPlaneIn[1].u_height;
-        pPlaneIn[1].u_height = temp;
-        pPlaneIn[1].u_stride = pPlaneIn[1].u_width;
-
-        temp = pPlaneIn[2].u_width;
-        pPlaneIn[2].u_width = pPlaneIn[2].u_height;
-        pPlaneIn[2].u_height = temp;
-        pPlaneIn[2].u_stride = pPlaneIn[2].u_width;
-    }
-
-    return err;
-}
-
-M4OSA_ERR M4VSS3GPP_intSetYUV420Plane(M4VIFI_ImagePlane* planeIn,
-                                      M4OSA_UInt32 width, M4OSA_UInt32 height) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-
-    if (planeIn == M4OSA_NULL) {
-        M4OSA_TRACE1_0("NULL in plane, error");
-        return M4ERR_PARAMETER;
-    }
-
-    planeIn[0].u_width = width;
-    planeIn[0].u_height = height;
-    planeIn[0].u_stride = planeIn[0].u_width;
-
-    planeIn[1].u_width = width/2;
-    planeIn[1].u_height = height/2;
-    planeIn[1].u_stride = planeIn[1].u_width;
-
-    planeIn[2].u_width = width/2;
-    planeIn[2].u_height = height/2;
-    planeIn[2].u_stride = planeIn[1].u_width;
-
-    return err;
-}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c b/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c
deleted file mode 100755
index f30f705..0000000
--- a/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c
+++ /dev/null
@@ -1,469 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ************************************************************************
- * @file    M4VSS3GPP_MediaAndCodecSubscription.c
- * @brief    Media readers and codecs subscription
- * @note    This file implements the subscription of supported media
- *            readers and decoders for the VSS. Potential support can
- *            be activated or de-activated
- *            using compilation flags set in the projects settings.
- *************************************************************************
- */
-
-#include "NXPSW_CompilerSwitches.h"
-
-
-#include "M4OSA_Debug.h"
-#include "M4VSS3GPP_InternalTypes.h"                /**< Include for VSS specific types */
-#include "M4VSS3GPP_InternalFunctions.h"            /**< Registration module */
-
-/* _______________________ */
-/*|                       |*/
-/*|  reader subscription  |*/
-/*|_______________________|*/
-
-/* Reader registration : at least one reader must be defined */
-#ifndef M4VSS_SUPPORT_READER_3GP
-#ifndef M4VSS_SUPPORT_READER_AMR
-#ifndef M4VSS_SUPPORT_READER_MP3
-#ifndef M4VSS_SUPPORT_READER_PCM
-#ifndef M4VSS_SUPPORT_AUDEC_NULL
-#error "no reader registered"
-#endif /* M4VSS_SUPPORT_AUDEC_NULL */
-#endif /* M4VSS_SUPPORT_READER_PCM */
-#endif /* M4VSS_SUPPORT_READER_MP3 */
-#endif /* M4VSS_SUPPORT_READER_AMR */
-#endif /* M4VSS_SUPPORT_READER_3GP */
-
-/* There must be at least one MPEG4 decoder */
-#if !defined(M4VSS_SUPPORT_VIDEC_3GP) && !defined(M4VSS_ENABLE_EXTERNAL_DECODERS)
-#error "Wait, what?"
-/* "Hey, this is the VSS3GPP speaking. Pray tell, how the heck do you expect me to be able to do
-any editing without a built-in video decoder, nor the possibility to receive an external one?!
-Seriously, I'd love to know." */
-#endif
-
-/* Include files for each reader to subscribe */
-#ifdef M4VSS_SUPPORT_READER_3GP
-#include "VideoEditor3gpReader.h"
-#endif
-#ifdef M4VSS_SUPPORT_READER_AMR
-#include "M4READER_Amr.h"
-#endif
-#ifdef M4VSS_SUPPORT_READER_MP3
-#include "VideoEditorMp3Reader.h"
-#endif
-#ifdef M4VSS_SUPPORT_READER_PCM
-#include "M4READER_Pcm.h"
-#endif
-
-
-/* ______________________________ */
-/*|                              |*/
-/*|  audio decoder subscription  |*/
-/*|______________________________|*/
-
-#include "VideoEditorAudioDecoder.h"
-#include "VideoEditorVideoDecoder.h"
-#include "M4DECODER_Null.h"
-#ifdef M4VSS_SUPPORT_AUDEC_NULL
-#include "M4AD_Null.h"
-#endif
-
-/* _______________________ */
-/*|                       |*/
-/*|  writer subscription  |*/
-/*|_______________________|*/
-
-/* Writer registration : at least one writer must be defined */
-//#ifndef M4VSS_SUPPORT_WRITER_AMR
-#ifndef M4VSS_SUPPORT_WRITER_3GPP
-#error "no writer registered"
-#endif /* M4VSS_SUPPORT_WRITER_3GPP */
-//#endif /* M4VSS_SUPPORT_WRITER_AMR */
-
-/* Include files for each writer to subscribe */
-//#ifdef M4VSS_SUPPORT_WRITER_AMR
-/*extern M4OSA_ERR M4WRITER_AMR_getInterfaces( M4WRITER_OutputFileType* Type,
-M4WRITER_GlobalInterface** SrcGlobalInterface,
-M4WRITER_DataInterface** SrcDataInterface);*/
-//#endif
-#ifdef M4VSS_SUPPORT_WRITER_3GPP
-extern M4OSA_ERR M4WRITER_3GP_getInterfaces( M4WRITER_OutputFileType* Type,
-                                            M4WRITER_GlobalInterface** SrcGlobalInterface,
-                                            M4WRITER_DataInterface** SrcDataInterface);
-#endif
-
-/* ______________________________ */
-/*|                              |*/
-/*|  video encoder subscription  |*/
-/*|______________________________|*/
-#include "VideoEditorAudioEncoder.h"
-#include "VideoEditorVideoEncoder.h"
-
-
-/* ______________________________ */
-/*|                              |*/
-/*|  audio encoder subscription  |*/
-/*|______________________________|*/
-
-
-#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) if ((pointer) == M4OSA_NULL)\
-    return ((M4OSA_ERR)(retval));
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_SubscribeMediaAndCodec()
- * @brief    This function registers the reader, decoders, writers and encoders
- *          in the VSS.
- * @note
- * @param    pContext:    (IN) Execution context.
- * @return    M4NO_ERROR: there is no error
- * @return    M4ERR_PARAMETER    pContext is NULL
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_subscribeMediaAndCodec(M4VSS3GPP_MediaAndCodecCtxt *pContext)
-{
-    M4OSA_ERR                   err = M4NO_ERROR;
-
-    M4READER_MediaType          readerMediaType;
-    M4READER_GlobalInterface*   pReaderGlobalInterface;
-    M4READER_DataInterface*     pReaderDataInterface;
-
-    M4WRITER_OutputFileType     writerMediaType;
-    M4WRITER_GlobalInterface*   pWriterGlobalInterface;
-    M4WRITER_DataInterface*     pWriterDataInterface;
-
-    M4AD_Type                   audioDecoderType;
-    M4ENCODER_AudioFormat       audioCodecType;
-    M4ENCODER_AudioGlobalInterface* pAudioCodecInterface;
-    M4AD_Interface*             pAudioDecoderInterface;
-
-    M4DECODER_VideoType         videoDecoderType;
-    M4ENCODER_Format            videoCodecType;
-    M4ENCODER_GlobalInterface*  pVideoCodecInterface;
-    M4DECODER_VideoInterface*   pVideoDecoderInterface;
-
-    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext);
-
-    /* _______________________ */
-    /*|                       |*/
-    /*|  reader subscription  |*/
-    /*|_______________________|*/
-
-    /* --- 3GP --- */
-
-#ifdef M4VSS_SUPPORT_READER_3GP
-    err = VideoEditor3gpReader_getInterface( &readerMediaType, &pReaderGlobalInterface,
-         &pReaderDataInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4READER_3GP interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
-        pReaderDataInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register 3GP reader");
-#endif /* M4VSS_SUPPORT_READER_3GP */
-
-    /* --- AMR --- */
-
-#ifdef M4VSS_SUPPORT_READER_AMR
-    err = M4READER_AMR_getInterfaces( &readerMediaType, &pReaderGlobalInterface,
-        &pReaderDataInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4READER_AMR interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
-        pReaderDataInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register AMR reader");
-#endif /* M4VSS_SUPPORT_READER_AMR */
-
-    /* --- MP3 --- */
-
-#ifdef M4VSS_SUPPORT_READER_MP3
-    err = VideoEditorMp3Reader_getInterface( &readerMediaType, &pReaderGlobalInterface,
-         &pReaderDataInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4READER_MP3 interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
-        pReaderDataInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register MP3 reader");
-#endif /* M4VSS_SUPPORT_READER_MP3 */
-
-    /* --- PCM --- */
-
-#ifdef M4VSS_SUPPORT_READER_PCM
-    err = M4READER_PCM_getInterfaces( &readerMediaType, &pReaderGlobalInterface,
-        &pReaderDataInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4READER_PCM interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
-        pReaderDataInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register PCM reader");
-#endif /* M4VSS_SUPPORT_READER_PCM */
-
-    /* ______________________________ */
-    /*|                              |*/
-    /*|  video decoder subscription  |*/
-    /*|______________________________|*/
-
-    /* --- MPEG4 & H263 --- */
-
-#ifdef M4VSS_SUPPORT_VIDEC_3GP
-    err = VideoEditorVideoDecoder_getInterface_MPEG4(&videoDecoderType, (M4OSA_Void *)&pVideoDecoderInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4DECODER_MPEG4 interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerVideoDecoder( pContext, videoDecoderType, pVideoDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register MPEG4 decoder");
-#endif /* M4VSS_SUPPORT_VIDEC_3GP */
-
-#ifdef M4VSS_SUPPORT_VIDEO_AVC
-    err = VideoEditorVideoDecoder_getInterface_H264(&videoDecoderType, (M4OSA_Void *)&pVideoDecoderInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4DECODER_H264 interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerVideoDecoder( pContext, videoDecoderType, pVideoDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register H264 decoder");
-#endif /* M4VSS_SUPPORT_VIDEC_3GP */
-
-#ifdef M4VSS_SUPPORT_VIDEC_NULL
-    err = M4DECODER_NULL_getInterface(
-              &videoDecoderType, &pVideoDecoderInterface);
-    if (M4NO_ERROR != err) {
-        M4OSA_TRACE1_0("M4VD NULL Decoder interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerVideoDecoder(
-              pContext, videoDecoderType, pVideoDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err, "M4VSS3GPP_subscribeMediaAndCodec: \
-        can't register video NULL decoder");
-#endif
-    /* ______________________________ */
-    /*|                              |*/
-    /*|  audio decoder subscription  |*/
-    /*|______________________________|*/
-
-    /* --- AMRNB --- */
-
-#ifdef M4VSS_SUPPORT_AUDEC_AMRNB
-    err = VideoEditorAudioDecoder_getInterface_AMRNB(&audioDecoderType, &pAudioDecoderInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4 AMRNB interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register AMRNB decoder");
-#endif /* M4VSS_SUPPORT_AUDEC_AMRNB */
-
-    /* --- AAC --- */
-
-#ifdef M4VSS_SUPPORT_AUDEC_AAC
-    err = VideoEditorAudioDecoder_getInterface_AAC(&audioDecoderType, &pAudioDecoderInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4 AAC interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register AAC decoder");
-#endif /* M4VSS_SUPPORT_AUDEC_AAC */
-
-    /* --- MP3 --- */
-
-#ifdef M4VSS_SUPPORT_AUDEC_MP3
-    err = VideoEditorAudioDecoder_getInterface_MP3(&audioDecoderType, &pAudioDecoderInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4 MP3 interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register MP3 decoder");
-#endif  /* M4VSS_SUPPORT_AUDEC_MP3 */
-
-
-    /* --- NULL --- */
-
-#ifdef M4VSS_SUPPORT_AUDEC_NULL
-    err = M4AD_NULL_getInterface( &audioDecoderType, &pAudioDecoderInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4AD NULL Decoder interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register EVRC decoder");
-#endif  /* M4VSS_SUPPORT_AUDEC_NULL */
-
-    /* _______________________ */
-    /*|                       |*/
-    /*|  writer subscription  |*/
-    /*|_______________________|*/
-
-
-    /* --- 3GPP --- */
-
-#ifdef M4VSS_SUPPORT_WRITER_3GPP
-    /* retrieves the 3GPP writer media type and pointer to functions*/
-    err = M4WRITER_3GP_getInterfaces( &writerMediaType, &pWriterGlobalInterface,
-        &pWriterDataInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4WRITER_3GP interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerWriter( pContext, writerMediaType, pWriterGlobalInterface,
-        pWriterDataInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register 3GPP writer");
-#endif /* M4VSS_SUPPORT_WRITER_3GPP */
-
-    /* ______________________________ */
-    /*|                              |*/
-    /*|  video encoder subscription  |*/
-    /*|______________________________|*/
-
-    /* --- MPEG4 --- */
-
-#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
-    /* retrieves the MPEG4 encoder type and pointer to functions*/
-    err = VideoEditorVideoEncoder_getInterface_MPEG4(&videoCodecType, &pVideoCodecInterface,
-         M4ENCODER_OPEN_ADVANCED);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4MP4E_MPEG4 interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register video MPEG4 encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
-
-    /* --- H263 --- */
-
-#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
-    /* retrieves the H263 encoder type and pointer to functions*/
-    err = VideoEditorVideoEncoder_getInterface_H263(&videoCodecType, &pVideoCodecInterface,
-         M4ENCODER_OPEN_ADVANCED);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4MP4E_H263 interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register video H263 encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
-
-#ifdef M4VSS_SUPPORT_ENCODER_AVC
-    /* retrieves the H264 encoder type and pointer to functions*/
-    err = VideoEditorVideoEncoder_getInterface_H264(&videoCodecType, &pVideoCodecInterface,
-         M4ENCODER_OPEN_ADVANCED);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4VSS3GPP_subscribeMediaAndCodec: M4H264E interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register video H264 encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_AVC */
-
-    /* ______________________________ */
-    /*|                              |*/
-    /*|  audio encoder subscription  |*/
-    /*|______________________________|*/
-
-    /* --- AMR --- */
-
-#ifdef M4VSS_SUPPORT_ENCODER_AMR
-    /* retrieves the AMR encoder type and pointer to functions*/
-    err = VideoEditorAudioEncoder_getInterface_AMRNB(&audioCodecType, &pAudioCodecInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4AMR interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register audio AMR encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_AMR */
-
-    /* --- AAC --- */
-
-#ifdef M4VSS_SUPPORT_ENCODER_AAC
-    /* retrieves the AAC encoder type and pointer to functions*/
-    err = VideoEditorAudioEncoder_getInterface_AAC(&audioCodecType, &pAudioCodecInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4AAC interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register audio AAC encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_AAC */
-
-    /* --- EVRC --- */
-
-#ifdef M4VSS_SUPPORT_ENCODER_EVRC
-    /* retrieves the EVRC encoder type and pointer to functions*/
-    err = M4EVRC_getInterfaces( &audioCodecType, &pAudioCodecInterface);
-    if (M4NO_ERROR != err)
-    {
-        M4OSA_TRACE1_0("M4EVRC interface allocation error");
-        return err;
-    }
-    err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
-    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
-        "M4VSS3GPP_subscribeMediaAndCodec: can't register audio EVRC encoder");
-#endif /* M4VSS_SUPPORT_ENCODER_EVRC */
-
-#ifdef M4VSS_SUPPORT_OMX_CODECS
-    pContext->bAllowFreeingOMXCodecInterface = M4OSA_TRUE;   /* when NXP SW codecs are registered,
-                                                               then allow unregistration*/
-#endif
-
-
-    return err;
-}
-
diff --git a/libvideoeditor/vss/src/M4xVSS_API.c b/libvideoeditor/vss/src/M4xVSS_API.c
deleted file mode 100755
index 9f5410b..0000000
--- a/libvideoeditor/vss/src/M4xVSS_API.c
+++ /dev/null
@@ -1,6367 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4xVSS_API.c
- * @brief    API of eXtended Video Studio Service (Video Studio 2.1)
- * @note
- ******************************************************************************
- */
-
-/**
- * OSAL main types and errors ***/
-#include "M4OSA_Types.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_Debug.h"
-#include "M4OSA_FileReader.h"
-#include "M4OSA_FileWriter.h"
-#include "M4OSA_CoreID.h"
-#include "M4OSA_CharStar.h"
-// StageFright encoders require %16 resolution
-#include "M4ENCODER_common.h"
-#include "M4DECODER_Common.h"
-#include "VideoEditorVideoDecoder.h"
-
-/**
- * VSS 3GPP API definition */
-#include "M4VSS3GPP_ErrorCodes.h"
-
-/*************************
-Begin of xVSS API
- **************************/
-
-#include "M4xVSS_API.h"
-#include "M4xVSS_Internal.h"
-
-/* RC: to delete unecessary temp files on the fly */
-#include "M4VSS3GPP_InternalTypes.h"
-#include <utils/Log.h>
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_Init(M4OSA_Context* pContext, M4xVSS_InitParams* pParams)
- * @brief        This function initializes the xVSS
- * @note        Initializes the xVSS edit operation (allocates an execution context).
- *
- * @param    pContext            (OUT) Pointer on the xVSS edit context to allocate
- * @param    params                (IN) Parameters mandatory for xVSS
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- ******************************************************************************
- */
-
-M4OSA_ERR M4xVSS_Init( M4OSA_Context *pContext, M4xVSS_InitParams *pParams )
-{
-    M4xVSS_Context *xVSS_context;
-    M4OSA_UInt32 length = 0, i;
-
-    if( pParams == M4OSA_NULL )
-    {
-        M4OSA_TRACE1_0("Parameter structure for M4xVSS_Init function is NULL");
-        return M4ERR_PARAMETER;
-    }
-
-    if( pParams->pFileReadPtr == M4OSA_NULL
-        || pParams->pFileWritePtr == M4OSA_NULL )
-    {
-        M4OSA_TRACE1_0(
-            "pFileReadPtr or pFileWritePtr in M4xVSS_InitParams structure is NULL");
-        return M4ERR_PARAMETER;
-    }
-
-    xVSS_context = (M4xVSS_Context *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_Context), M4VS,
-        (M4OSA_Char *)"Context of the xVSS layer");
-
-    if( xVSS_context == M4OSA_NULL )
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
-        return M4ERR_ALLOC;
-    }
-
-    /* Initialize file read/write functions pointers */
-    xVSS_context->pFileReadPtr = pParams->pFileReadPtr;
-    xVSS_context->pFileWritePtr = pParams->pFileWritePtr;
-
-    /*UTF Conversion support: copy conversion functions pointers and allocate the temporary
-     buffer*/
-    if( pParams->pConvFromUTF8Fct != M4OSA_NULL )
-    {
-        if( pParams->pConvToUTF8Fct != M4OSA_NULL )
-        {
-            xVSS_context->UTFConversionContext.pConvFromUTF8Fct =
-                pParams->pConvFromUTF8Fct;
-            xVSS_context->UTFConversionContext.pConvToUTF8Fct =
-                pParams->pConvToUTF8Fct;
-            xVSS_context->UTFConversionContext.m_TempOutConversionSize =
-                UTF_CONVERSION_BUFFER_SIZE;
-            xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
-                (M4OSA_Void *)M4OSA_32bitAlignedMalloc(UTF_CONVERSION_BUFFER_SIZE
-                * sizeof(M4OSA_UInt8),
-                M4VA, (M4OSA_Char *)"M4xVSS_Init: UTF conversion buffer");
-
-            if( M4OSA_NULL
-                == xVSS_context->UTFConversionContext.pTempOutConversionBuffer )
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
-                free(xVSS_context->pTempPath);
-                xVSS_context->pTempPath = M4OSA_NULL;
-                free(xVSS_context);
-                xVSS_context = M4OSA_NULL;
-                return M4ERR_ALLOC;
-            }
-        }
-        else
-        {
-            M4OSA_TRACE1_0("M4xVSS_Init: one UTF conversion pointer is null and the other\
-                           is not null");
-            free(xVSS_context->pTempPath);
-            xVSS_context->pTempPath = M4OSA_NULL;
-            free(xVSS_context);
-            xVSS_context = M4OSA_NULL;
-            return M4ERR_PARAMETER;
-        }
-    }
-    else
-    {
-        xVSS_context->UTFConversionContext.pConvFromUTF8Fct = M4OSA_NULL;
-        xVSS_context->UTFConversionContext.pConvToUTF8Fct = M4OSA_NULL;
-        xVSS_context->UTFConversionContext.m_TempOutConversionSize = 0;
-        xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
-            M4OSA_NULL;
-    }
-
-    if( pParams->pTempPath != M4OSA_NULL )
-    {
-        /*No need to convert into UTF8 as all input of xVSS are in UTF8
-        (the conversion customer format into UTF8
-        is done in VA/VAL)*/
-        xVSS_context->pTempPath =
-            (M4OSA_Void *)M4OSA_32bitAlignedMalloc(strlen(pParams->pTempPath) + 1,
-            M4VS, (M4OSA_Char *)"xVSS Path for temporary files");
-
-        if( xVSS_context->pTempPath == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
-            return M4ERR_ALLOC;
-        }
-        memcpy((void *)xVSS_context->pTempPath, (void *)pParams->pTempPath,
-            strlen(pParams->pTempPath) + 1);
-        /* TODO: Check that no previous xVSS temporary files are present ? */
-    }
-    else
-    {
-        M4OSA_TRACE1_0("Path for temporary files is NULL");
-        free(xVSS_context);
-        xVSS_context = M4OSA_NULL;
-        return M4ERR_PARAMETER;
-    }
-
-    xVSS_context->pSettings =
-        (M4VSS3GPP_EditSettings *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_EditSettings),
-        M4VS, (M4OSA_Char *)"Copy of VSS structure");
-
-    if( xVSS_context->pSettings == M4OSA_NULL )
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
-        free(xVSS_context->pTempPath);
-        xVSS_context->pTempPath = M4OSA_NULL;
-        free(xVSS_context);
-        xVSS_context = M4OSA_NULL;
-        return M4ERR_ALLOC;
-    }
-
-    /* Initialize pointers in pSettings */
-    xVSS_context->pSettings->pClipList = M4OSA_NULL;
-    xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
-    xVSS_context->pSettings->Effects = M4OSA_NULL; /* RC */
-    xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
-
-    /* This is used to know if the user has added or removed some medias */
-    xVSS_context->previousClipNumber = 0;
-
-    /* "State machine" */
-    xVSS_context->editingStep = 0;
-    xVSS_context->analyseStep = 0;
-
-    xVSS_context->pcmPreviewFile = M4OSA_NULL;
-
-    /* Initialize Pto3GPP and MCS lists */
-    xVSS_context->pMCSparamsList = M4OSA_NULL;
-    xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
-    xVSS_context->pPTo3GPPcurrentParams = M4OSA_NULL;
-    xVSS_context->pMCScurrentParams = M4OSA_NULL;
-
-    xVSS_context->tempFileIndex = 0;
-
-    xVSS_context->targetedBitrate = 0;
-
-    xVSS_context->targetedTimescale = 0;
-
-    xVSS_context->pAudioMixContext = M4OSA_NULL;
-    xVSS_context->pAudioMixSettings = M4OSA_NULL;
-
-    /*FB: initialize to avoid crash when error during the editing*/
-    xVSS_context->pCurrentEditSettings = M4OSA_NULL;
-
-    /* Initialize state if all initializations are corrects */
-    xVSS_context->m_state = M4xVSS_kStateInitialized;
-
-    /* initialize MCS context*/
-    xVSS_context->pMCS_Ctxt = M4OSA_NULL;
-
-    *pContext = xVSS_context;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_ReduceTranscode
- * @brief        This function changes the given editing structure in order to
- *                minimize the transcoding time.
- * @note        The xVSS analyses this structure, and if needed, changes the
- *                output parameters (Video codec, video size, audio codec,
- *                audio nb of channels) to minimize the transcoding time.
- *
- * @param    pContext            (OUT) Pointer on the xVSS edit context to allocate
- * @param    pSettings            (IN) Edition settings (allocated by the user)
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_ReduceTranscode( M4OSA_Context pContext,
-                                 M4VSS3GPP_EditSettings *pSettings )
-{
-    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4VIDEOEDITING_ClipProperties fileProperties;
-    M4OSA_UInt8 i, j;
-    M4OSA_Bool bAudioTransition = M4OSA_FALSE;
-    M4OSA_Bool bIsBGMReplace = M4OSA_FALSE;
-    M4OSA_Bool bFound;
-    M4OSA_UInt32 videoConfig[9] =
-    {
-        0, 0, 0, 0, 0, 0, 0, 0, 0
-    };
-    /** Index <-> Video config **/
-    /* 0:        H263  SQCIF        */
-    /* 1:        H263  QCIF        */
-    /* 2:        H263  CIF        */
-    /* 3:        MPEG4 SQCIF        */
-    /* 4:        MPEG4 QQVGA        */
-    /* 5:        MPEG4 QCIF        */
-    /* 6:        MPEG4 QVGA        */
-    /* 7:        MPEG4 CIF        */
-    /* 8:        MPEG4 VGA        */
-    /****************************/
-    M4OSA_UInt32 audioConfig[3] =
-    {
-        0, 0, 0
-    };
-    /** Index <-> Audio config **/
-    /* 0:    AMR                    */
-    /* 1:    AAC    16kHz mono        */
-    /* 2:    AAC 16kHz stereo    */
-    /****************************/
-
-    /* Check state */
-    if( xVSS_context->m_state != M4xVSS_kStateInitialized \
-        && xVSS_context->m_state != M4xVSS_kStateOpened )
-    {
-        M4OSA_TRACE1_1(
-            "Bad state when calling M4xVSS_ReduceTranscode function! State is %d",
-            xVSS_context->m_state);
-        return M4ERR_STATE;
-    }
-
-    /* Check number of clips */
-    if( pSettings->uiClipNumber == 0 )
-    {
-        M4OSA_TRACE1_0("The number of input clip must be greater than 0 !");
-        return M4ERR_PARAMETER;
-    }
-
-    /* Check if there is a background music, and if its audio will replace input clip audio */
-    if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-    {
-        if( pSettings->xVSS.pBGMtrack->uiAddVolume == 100 )
-        {
-            bIsBGMReplace = M4OSA_TRUE;
-        }
-    }
-
-    /* Parse all clips, and give occurences of each combination */
-    for ( i = 0; i < pSettings->uiClipNumber; i++ )
-    {
-        /* We ignore JPG input files as they are always transcoded */
-        if( pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_3GPP )
-        {
-            /**
-            * UTF conversion: convert into the customer format*/
-            M4OSA_Void *pDecodedPath = pSettings->pClipList[i]->pFile;
-            M4OSA_UInt32 ConvertedSize = 0;
-
-            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                != M4OSA_NULL && xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer
-                != M4OSA_NULL )
-            {
-                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                    (M4OSA_Void *)pSettings->pClipList[i]->pFile,
-                    (M4OSA_Void *)xVSS_context->
-                    UTFConversionContext.pTempOutConversionBuffer,
-                    &ConvertedSize);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1("M4xVSS_ReduceTranscode:\
-                                   M4xVSS_internalConvertFromUTF8 returns err: 0x%x", err);
-                    return err;
-                }
-                pDecodedPath =
-                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-            }
-            /**
-            * End of the utf conversion, now use the converted path*/
-            err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
-                &fileProperties);
-
-            //err = M4xVSS_internalGetProperties(xVSS_context, pSettings->pClipList[i]->pFile,
-            //     &fileProperties);
-            if( err != M4NO_ERROR )
-            {
-                M4OSA_TRACE1_1(
-                    "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned 0x%x",
-                    err);
-                /* TODO: Translate error code of MCS to an xVSS error code ? */
-                return err;
-            }
-
-            /* Check best video settings */
-            if( fileProperties.uiVideoWidth == 128
-                && fileProperties.uiVideoHeight == 96 )
-            {
-                if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
-                {
-                    videoConfig[0] += fileProperties.uiClipVideoDuration;
-                }
-                else if( ( fileProperties.VideoStreamType
-                    == M4VIDEOEDITING_kMPEG4) \
-                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
-                {
-                    videoConfig[3] += fileProperties.uiClipVideoDuration;
-                }
-            }
-            else if( fileProperties.uiVideoWidth == 160
-                && fileProperties.uiVideoHeight == 120 )
-            {
-                if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
-                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
-                {
-                    videoConfig[4] += fileProperties.uiClipVideoDuration;
-                }
-            }
-            else if( fileProperties.uiVideoWidth == 176
-                && fileProperties.uiVideoHeight == 144 )
-            {
-                if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
-                {
-                    videoConfig[1] += fileProperties.uiClipVideoDuration;
-                }
-                else if( ( fileProperties.VideoStreamType
-                    == M4VIDEOEDITING_kMPEG4) \
-                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
-                {
-                    videoConfig[5] += fileProperties.uiClipVideoDuration;
-                }
-            }
-            else if( fileProperties.uiVideoWidth == 320
-                && fileProperties.uiVideoHeight == 240 )
-            {
-                if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
-                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
-                {
-                    videoConfig[6] += fileProperties.uiClipVideoDuration;
-                }
-            }
-            else if( fileProperties.uiVideoWidth == 352
-                && fileProperties.uiVideoHeight == 288 )
-            {
-                if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
-                {
-                    videoConfig[2] += fileProperties.uiClipVideoDuration;
-                }
-                else if( ( fileProperties.VideoStreamType
-                    == M4VIDEOEDITING_kMPEG4) \
-                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
-                {
-                    videoConfig[7] += fileProperties.uiClipVideoDuration;
-                }
-            }
-            else if( fileProperties.uiVideoWidth == 640
-                && fileProperties.uiVideoHeight == 480 )
-            {
-                if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
-                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
-                {
-                    videoConfig[8] += fileProperties.uiClipVideoDuration;
-                }
-            }
-
-            /* If there is a BGM that replaces existing audio track, we do not care about
-            audio track as it will be replaced */
-            /* If not, we try to minimize audio reencoding */
-            if( bIsBGMReplace == M4OSA_FALSE )
-            {
-                if( fileProperties.AudioStreamType == M4VIDEOEDITING_kAAC )
-                {
-                    if( fileProperties.uiSamplingFrequency == 16000 && \
-                        fileProperties.uiNbChannels == 1 )
-                    {
-                        audioConfig[1] += fileProperties.uiClipAudioDuration;
-                    }
-                    else if( fileProperties.uiSamplingFrequency == 16000 && \
-                        fileProperties.uiNbChannels == 2 )
-                    {
-                        audioConfig[2] += fileProperties.uiClipAudioDuration;
-                    }
-                }
-                else if( fileProperties.AudioStreamType
-                    == M4VIDEOEDITING_kAMR_NB )
-                {
-                    audioConfig[0] += fileProperties.uiClipAudioDuration;
-                }
-            }
-        }
-    }
-
-    /* Find best output video format (the most occuring combination) */
-    j = 0;
-    bFound = M4OSA_FALSE;
-
-    for ( i = 0; i < 9; i++ )
-    {
-        if( videoConfig[i] >= videoConfig[j] )
-        {
-            j = i;
-            bFound = M4OSA_TRUE;
-        }
-    }
-
-    if( bFound )
-    {
-        switch( j )
-        {
-            case 0:
-                pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
-                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kSQCIF;
-                break;
-
-            case 1:
-                pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
-                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQCIF;
-                break;
-
-            case 2:
-                pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
-                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kCIF;
-                break;
-
-            case 3:
-                pSettings->xVSS.outputVideoFormat =
-                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
-                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
-                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kSQCIF;
-                break;
-
-            case 4:
-                pSettings->xVSS.outputVideoFormat =
-                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
-                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
-                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQQVGA;
-                break;
-
-            case 5:
-                pSettings->xVSS.outputVideoFormat =
-                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
-                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
-                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQCIF;
-                break;
-
-            case 6:
-                pSettings->xVSS.outputVideoFormat =
-                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
-                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
-                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQVGA;
-                break;
-
-            case 7:
-                pSettings->xVSS.outputVideoFormat =
-                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
-                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
-                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kCIF;
-                break;
-
-            case 8:
-                pSettings->xVSS.outputVideoFormat =
-                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
-                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
-                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kVGA;
-                break;
-        }
-    }
-
-    /* Find best output audio format (the most occuring combination) */
-    j = 0;
-    bFound = M4OSA_FALSE;
-
-    for ( i = 0; i < 3; i++ )
-    {
-        if( audioConfig[i] >= audioConfig[j] )
-        {
-            j = i;
-            bFound = M4OSA_TRUE;
-        }
-    }
-
-    if( bFound )
-    {
-        switch( j )
-        {
-            case 0:
-                pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAMR_NB;
-                pSettings->xVSS.bAudioMono = M4OSA_TRUE;
-                break;
-
-            case 1:
-                pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAAC;
-                pSettings->xVSS.bAudioMono = M4OSA_TRUE;
-                break;
-
-            case 2:
-                pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAAC;
-                pSettings->xVSS.bAudioMono = M4OSA_FALSE;
-                break;
-        }
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_SendCommand(M4OSA_Context pContext,
- *                                         M4VSS3GPP_EditSettings* pSettings)
- * @brief        This function gives to the xVSS an editing structure
- * @note        The xVSS analyses this structure, and prepare edition
- *                This function must be called after M4xVSS_Init, after
- *                M4xVSS_CloseCommand, or after M4xVSS_PreviewStop.
- *                After this function, the user must call M4xVSS_Step until
- *                it returns another error than M4NO_ERROR.
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @param    pSettings            (IN) Edition settings (allocated by the user)
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_SendCommand( M4OSA_Context pContext,
-                             M4VSS3GPP_EditSettings *pSettings )
-{
-    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
-    M4OSA_UInt8 i, j;
-    M4OSA_UInt8 nbOfSameClip = 0;
-    M4OSA_ERR err;
-    M4OSA_Bool isNewBGM = M4OSA_TRUE;
-    M4xVSS_Pto3GPP_params *pPto3GPP_last = M4OSA_NULL;
-    M4xVSS_MCS_params *pMCS_last = M4OSA_NULL;
-    M4OSA_UInt32 width, height, samplingFreq;
-    M4OSA_Bool bIsTranscoding = M4OSA_FALSE;
-    M4OSA_Int32 totalDuration;
-    M4OSA_UInt32 outputSamplingFrequency = 0;
-    M4OSA_UInt32 length = 0;
-    M4OSA_Int8 masterClip = -1;
-
-    i = 0;
-    /* Check state */
-    if( xVSS_context->m_state != M4xVSS_kStateInitialized \
-        && xVSS_context->m_state != M4xVSS_kStateOpened )
-    {
-        M4OSA_TRACE1_1(
-            "Bad state when calling M4xVSS_SendCommand function! State is %d",
-            xVSS_context->m_state);
-        return M4ERR_STATE;
-    }
-
-    /* State is back to initialized to allow call of cleanup function in case of error */
-    xVSS_context->m_state = M4xVSS_kStateInitialized;
-
-    /* Check if a previous sendCommand has been called */
-    if( xVSS_context->previousClipNumber != 0 )
-    {
-        M4OSA_UInt32 pCmpResult = 0;
-
-        /* Compare BGM input */
-        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL \
-            && pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-        {
-            pCmpResult = strcmp((const char *)xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
-                (const char *)pSettings->xVSS.pBGMtrack->pFile);
-
-            if( pCmpResult == 0 )
-            {
-                /* Check if audio output parameters have changed */
-                if( xVSS_context->pSettings->xVSS.outputAudioFormat ==
-                    pSettings->xVSS.outputAudioFormat
-                    && xVSS_context->pSettings->xVSS.bAudioMono
-                    == pSettings->xVSS.bAudioMono )
-                {
-                    /* It means that BGM is the same as before, so, no need to redecode it */
-                    M4OSA_TRACE2_0(
-                        "BGM is the same as before, nothing to decode");
-                    isNewBGM = M4OSA_FALSE;
-                }
-                else
-                {
-                    /* We need to unallocate PCM preview file path in internal context */
-                    if( xVSS_context->pcmPreviewFile != M4OSA_NULL )
-                    {
-                        free(xVSS_context->pcmPreviewFile);
-                        xVSS_context->pcmPreviewFile = M4OSA_NULL;
-                    }
-                }
-            }
-            else
-            {
-                /* We need to unallocate PCM preview file path in internal context */
-                if( xVSS_context->pcmPreviewFile != M4OSA_NULL )
-                {
-                    free(xVSS_context->pcmPreviewFile);
-                    xVSS_context->pcmPreviewFile = M4OSA_NULL;
-                }
-            }
-        }
-
-        /* Check if output settings have changed */
-        if( xVSS_context->pSettings->xVSS.outputVideoSize
-            != pSettings->xVSS.outputVideoSize
-            || xVSS_context->pSettings->xVSS.outputVideoFormat
-            != pSettings->xVSS.outputVideoFormat
-            || xVSS_context->pSettings->xVSS.outputVideoProfile
-            != pSettings->xVSS.outputVideoProfile
-            || xVSS_context->pSettings->xVSS.outputVideoLevel
-            != pSettings->xVSS.outputVideoLevel
-            || xVSS_context->pSettings->xVSS.outputAudioFormat
-            != pSettings->xVSS.outputAudioFormat
-            || xVSS_context->pSettings->xVSS.bAudioMono
-            != pSettings->xVSS.bAudioMono
-            || xVSS_context->pSettings->xVSS.outputAudioSamplFreq
-            != pSettings->xVSS.outputAudioSamplFreq )
-        {
-            /* If it is the case, we can't reuse already transcoded/converted files */
-            /* so, we delete these files and remove them from chained list */
-            if( xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
-            {
-                M4xVSS_Pto3GPP_params *pParams =
-                    xVSS_context->pPTo3GPPparamsList;
-                M4xVSS_Pto3GPP_params *pParams_sauv;
-
-                while( pParams != M4OSA_NULL )
-                {
-                    if( pParams->pFileIn != M4OSA_NULL )
-                    {
-                        free(pParams->pFileIn);
-                        pParams->pFileIn = M4OSA_NULL;
-                    }
-
-                    if( pParams->pFileOut != M4OSA_NULL )
-                    {
-                        /* Delete temporary file */
-                        remove((const char *)pParams->pFileOut);
-                        free(pParams->pFileOut);
-                        pParams->pFileOut = M4OSA_NULL;
-                    }
-
-                    if( pParams->pFileTemp != M4OSA_NULL )
-                    {
-                        /* Delete temporary file */
-#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
-
-                        remove((const char *)pParams->pFileTemp);
-                        free(pParams->pFileTemp);
-
-#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
-
-                        pParams->pFileTemp = M4OSA_NULL;
-                    }
-                    pParams_sauv = pParams;
-                    pParams = pParams->pNext;
-                    free(pParams_sauv);
-                    pParams_sauv = M4OSA_NULL;
-                }
-                xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
-            }
-
-            if( xVSS_context->pMCSparamsList != M4OSA_NULL )
-            {
-                M4xVSS_MCS_params *pParams = xVSS_context->pMCSparamsList;
-                M4xVSS_MCS_params *pParams_sauv;
-                M4xVSS_MCS_params *pParams_bgm = M4OSA_NULL;
-
-                while( pParams != M4OSA_NULL )
-                {
-                    /* Here, we do not delete BGM */
-                    if( pParams->isBGM != M4OSA_TRUE )
-                    {
-                        if( pParams->pFileIn != M4OSA_NULL )
-                        {
-                            free(pParams->pFileIn);
-                            pParams->pFileIn = M4OSA_NULL;
-                        }
-
-                        if( pParams->pFileOut != M4OSA_NULL )
-                        {
-                            /* Delete temporary file */
-                            remove((const char *)pParams->pFileOut);
-                            free(pParams->pFileOut);
-                            pParams->pFileOut = M4OSA_NULL;
-                        }
-
-                        if( pParams->pFileTemp != M4OSA_NULL )
-                        {
-                            /* Delete temporary file */
-#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
-
-                            remove((const char *)pParams->pFileTemp);
-                            free(pParams->pFileTemp);
-
-#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
-
-                            pParams->pFileTemp = M4OSA_NULL;
-                        }
-                        pParams_sauv = pParams;
-                        pParams = pParams->pNext;
-                        free(pParams_sauv);
-                        pParams_sauv = M4OSA_NULL;
-                    }
-                    else
-                    {
-                        pParams_bgm = pParams;
-                        pParams = pParams->pNext;
-                        /*PR P4ME00003182 initialize this pointer because the following params
-                        element will be deallocated*/
-                        if( pParams != M4OSA_NULL
-                            && pParams->isBGM != M4OSA_TRUE )
-                        {
-                            pParams_bgm->pNext = M4OSA_NULL;
-                        }
-                    }
-                }
-                xVSS_context->pMCSparamsList = pParams_bgm;
-            }
-            /* Maybe need to implement framerate changing */
-            //xVSS_context->pSettings->videoFrameRate;
-        }
-
-        /* Unallocate previous xVSS_context->pSettings structure */
-        M4xVSS_freeSettings(xVSS_context->pSettings);
-
-        /*Unallocate output file path*/
-        if( xVSS_context->pSettings->pOutputFile != M4OSA_NULL )
-        {
-            free(xVSS_context->pSettings->pOutputFile);
-            xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
-        }
-        xVSS_context->pSettings->uiOutputPathSize = 0;
-        xVSS_context->pOutputFile = M4OSA_NULL;
-    }
-
-    /**********************************
-    Clips registering
-    **********************************/
-
-    /* Copy settings from user given structure to our "local" structure */
-    xVSS_context->pSettings->xVSS.outputVideoFormat =
-        pSettings->xVSS.outputVideoFormat;
-    xVSS_context->pSettings->xVSS.outputVideoProfile =
-        pSettings->xVSS.outputVideoProfile;
-    xVSS_context->pSettings->xVSS.outputVideoLevel =
-        pSettings->xVSS.outputVideoLevel;
-    xVSS_context->pSettings->xVSS.outputVideoSize =
-        pSettings->xVSS.outputVideoSize;
-    xVSS_context->pSettings->xVSS.outputAudioFormat =
-        pSettings->xVSS.outputAudioFormat;
-    xVSS_context->pSettings->xVSS.bAudioMono = pSettings->xVSS.bAudioMono;
-    xVSS_context->pSettings->xVSS.outputAudioSamplFreq =
-        pSettings->xVSS.outputAudioSamplFreq;
-    /*xVSS_context->pSettings->pOutputFile = pSettings->pOutputFile;*/
-    /*FB: VAL CR P4ME00003076
-    The output video and audio bitrate are given by the user in the edition settings structure*/
-    xVSS_context->pSettings->xVSS.outputVideoBitrate =
-        pSettings->xVSS.outputVideoBitrate;
-    xVSS_context->pSettings->xVSS.outputAudioBitrate =
-        pSettings->xVSS.outputAudioBitrate;
-    xVSS_context->pSettings->PTVolLevel = pSettings->PTVolLevel;
-
-    /*FB: bug fix if the output path is given in M4xVSS_sendCommand*/
-
-    if( pSettings->pOutputFile != M4OSA_NULL
-        && pSettings->uiOutputPathSize > 0 )
-    {
-        M4OSA_Void *pDecodedPath = pSettings->pOutputFile;
-        /*As all inputs of the xVSS are in UTF8, convert the output file path into the
-        customer format*/
-        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
-            != M4OSA_NULL )
-        {
-            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                (M4OSA_Void *)pSettings->pOutputFile,
-                (M4OSA_Void *)xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer, &length);
-
-            if( err != M4NO_ERROR )
-            {
-                M4OSA_TRACE1_1("M4xVSS_SendCommand:\
-                               M4xVSS_internalConvertFromUTF8 returns err: 0x%x", err);
-                return err;
-            }
-            pDecodedPath =
-                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-            pSettings->uiOutputPathSize = length;
-        }
-
-        xVSS_context->pSettings->pOutputFile = (M4OSA_Void *)M4OSA_32bitAlignedMalloc \
-            (pSettings->uiOutputPathSize + 1, M4VS,
-            (M4OSA_Char *)"output file path");
-
-        if( xVSS_context->pSettings->pOutputFile == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-            /*FB: to avoid leaks when there is an error in the send command*/
-            /* Free Send command */
-            M4xVSS_freeCommand(xVSS_context);
-            /**/
-            return M4ERR_ALLOC;
-        }
-        memcpy((void *)xVSS_context->pSettings->pOutputFile,
-            (void *)pDecodedPath, pSettings->uiOutputPathSize + 1);
-        xVSS_context->pSettings->uiOutputPathSize = pSettings->uiOutputPathSize;
-        xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
-    }
-    else
-    {
-        xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
-        xVSS_context->pSettings->uiOutputPathSize = 0;
-        xVSS_context->pOutputFile = M4OSA_NULL;
-    }
-    xVSS_context->pSettings->pTemporaryFile = pSettings->pTemporaryFile;
-    xVSS_context->pSettings->uiClipNumber = pSettings->uiClipNumber;
-    xVSS_context->pSettings->videoFrameRate = pSettings->videoFrameRate;
-    xVSS_context->pSettings->uiMasterClip =
-        0; /* With VSS 2.0, this new param is mandatory */
-    xVSS_context->pSettings->xVSS.pTextRenderingFct =
-        pSettings->xVSS.pTextRenderingFct; /* CR text handling */
-    xVSS_context->pSettings->xVSS.outputFileSize =
-        pSettings->xVSS.outputFileSize;
-
-    if( pSettings->xVSS.outputFileSize != 0 \
-        && pSettings->xVSS.outputAudioFormat != M4VIDEOEDITING_kAMR_NB )
-    {
-        M4OSA_TRACE1_0("M4xVSS_SendCommand: Impossible to limit file\
-                       size with other audio output than AAC");
-        return M4ERR_PARAMETER;
-    }
-    xVSS_context->nbStepTotal = 0;
-    xVSS_context->currentStep = 0;
-
-    if( xVSS_context->pSettings->xVSS.outputVideoFormat != M4VIDEOEDITING_kMPEG4
-        && xVSS_context->pSettings->xVSS.outputVideoFormat
-        != M4VIDEOEDITING_kH263
-        && xVSS_context->pSettings->xVSS.outputVideoFormat
-        != M4VIDEOEDITING_kH264 )
-    {
-        xVSS_context->pSettings->xVSS.outputVideoFormat =
-            M4VIDEOEDITING_kNoneVideo;
-    }
-
-    /* Get output width/height */
-    switch( xVSS_context->pSettings->xVSS.outputVideoSize )
-    {
-        case M4VIDEOEDITING_kSQCIF:
-            width = 128;
-            height = 96;
-            break;
-
-        case M4VIDEOEDITING_kQQVGA:
-            width = 160;
-            height = 120;
-            break;
-
-        case M4VIDEOEDITING_kQCIF:
-            width = 176;
-            height = 144;
-            break;
-
-        case M4VIDEOEDITING_kQVGA:
-            width = 320;
-            height = 240;
-            break;
-
-        case M4VIDEOEDITING_kCIF:
-            width = 352;
-            height = 288;
-            break;
-
-        case M4VIDEOEDITING_kVGA:
-            width = 640;
-            height = 480;
-            break;
-            /* +PR LV5807 */
-        case M4VIDEOEDITING_kWVGA:
-            width = 800;
-            height = 480;
-            break;
-
-        case M4VIDEOEDITING_kNTSC:
-            width = 720;
-            height = 480;
-            break;
-            /* -PR LV5807 */
-            /* +CR Google */
-        case M4VIDEOEDITING_k640_360:
-            width = 640;
-            height = 360;
-            break;
-
-        case M4VIDEOEDITING_k854_480:
-
-            // StageFright encoders require %16 resolution
-
-            width = M4ENCODER_854_480_Width;
-
-            height = 480;
-            break;
-
-        case M4VIDEOEDITING_k1280_720:
-            width = 1280;
-            height = 720;
-            break;
-
-        case M4VIDEOEDITING_k1080_720:
-            // StageFright encoders require %16 resolution
-            width = M4ENCODER_1080_720_Width;
-            height = 720;
-            break;
-
-        case M4VIDEOEDITING_k960_720:
-            width = 960;
-            height = 720;
-            break;
-
-        case M4VIDEOEDITING_k1920_1080:
-            width = 1920;
-            height = M4ENCODER_1920_1080_Height;
-            break;
-
-            /* -CR Google */
-        default: /* If output video size is not given, we take QCIF size */
-            width = 176;
-            height = 144;
-            xVSS_context->pSettings->xVSS.outputVideoSize =
-                M4VIDEOEDITING_kQCIF;
-            break;
-    }
-
-    /* Get output Sampling frequency */
-    switch( xVSS_context->pSettings->xVSS.outputAudioSamplFreq )
-    {
-        case M4VIDEOEDITING_k8000_ASF:
-            samplingFreq = 8000;
-            break;
-
-        case M4VIDEOEDITING_k16000_ASF:
-            samplingFreq = 16000;
-            break;
-
-        case M4VIDEOEDITING_k22050_ASF:
-            samplingFreq = 22050;
-            break;
-
-        case M4VIDEOEDITING_k24000_ASF:
-            samplingFreq = 24000;
-            break;
-
-        case M4VIDEOEDITING_k32000_ASF:
-            samplingFreq = 32000;
-            break;
-
-        case M4VIDEOEDITING_k44100_ASF:
-            samplingFreq = 44100;
-            break;
-
-        case M4VIDEOEDITING_k48000_ASF:
-            samplingFreq = 48000;
-            break;
-
-        case M4VIDEOEDITING_kDefault_ASF:
-        default:
-            if( xVSS_context->pSettings->xVSS.outputAudioFormat
-                == M4VIDEOEDITING_kAMR_NB )
-            {
-                samplingFreq = 8000;
-            }
-            else if( xVSS_context->pSettings->xVSS.outputAudioFormat
-                == M4VIDEOEDITING_kAAC )
-            {
-                samplingFreq = 16000;
-            }
-            else
-            {
-                samplingFreq = 0;
-            }
-            break;
-    }
-
-    /* Allocate clip/transitions if clip number is not null ... */
-    if( 0 < xVSS_context->pSettings->uiClipNumber )
-    {
-        if( xVSS_context->pSettings->pClipList != M4OSA_NULL )
-        {
-            free((xVSS_context->pSettings->pClipList));
-            xVSS_context->pSettings->pClipList = M4OSA_NULL;
-        }
-
-        if( xVSS_context->pSettings->pTransitionList != M4OSA_NULL )
-        {
-            free(xVSS_context->pSettings->pTransitionList);
-            xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
-        }
-
-        xVSS_context->pSettings->pClipList =
-            (M4VSS3GPP_ClipSettings ** )M4OSA_32bitAlignedMalloc \
-            (sizeof(M4VSS3GPP_ClipSettings *)*xVSS_context->pSettings->uiClipNumber,
-            M4VS, (M4OSA_Char *)"xVSS, copy of pClipList");
-
-        if( xVSS_context->pSettings->pClipList == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-            /*FB: to avoid leaks when there is an error in the send command*/
-            /* Free Send command */
-            M4xVSS_freeCommand(xVSS_context);
-            /**/
-            return M4ERR_ALLOC;
-        }
-        /* Set clip list to NULL */
-        memset((void *)xVSS_context->pSettings->pClipList,0,
-            sizeof(M4VSS3GPP_ClipSettings *)
-            *xVSS_context->pSettings->uiClipNumber);
-
-        if( xVSS_context->pSettings->uiClipNumber > 1 )
-        {
-            xVSS_context->pSettings->pTransitionList =
-                (M4VSS3GPP_TransitionSettings ** ) \
-                M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_TransitionSettings *)                \
-                *(xVSS_context->pSettings->uiClipNumber - 1), M4VS, (M4OSA_Char *) \
-                "xVSS, copy of pTransitionList");
-
-            if( xVSS_context->pSettings->pTransitionList == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                /*FB: to avoid leaks when there is an error in the send command*/
-                /* Free Send command */
-                M4xVSS_freeCommand(xVSS_context);
-                /**/
-                return M4ERR_ALLOC;
-            }
-            /* Set transition list to NULL */
-            memset(
-                (void *)xVSS_context->pSettings->pTransitionList,0,
-                sizeof(M4VSS3GPP_TransitionSettings *)
-                *(xVSS_context->pSettings->uiClipNumber - 1));
-        }
-        else
-        {
-            xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
-        }
-    }
-    /* else, there is a pb in the input settings structure */
-    else
-    {
-        M4OSA_TRACE1_0("No clip in this settings list !!");
-        /*FB: to avoid leaks when there is an error in the send command*/
-        /* Free Send command */
-        M4xVSS_freeCommand(xVSS_context);
-        /**/
-        return M4ERR_PARAMETER;
-    }
-
-    /* RC Allocate effects settings */
-    xVSS_context->pSettings->nbEffects = pSettings->nbEffects;
-
-    if( 0 < xVSS_context->pSettings->nbEffects )
-    {
-        xVSS_context->pSettings->Effects =
-            (M4VSS3GPP_EffectSettings *)M4OSA_32bitAlignedMalloc \
-            (xVSS_context->pSettings->nbEffects * sizeof(M4VSS3GPP_EffectSettings),
-            M4VS, (M4OSA_Char *)"effects settings");
-
-        if( xVSS_context->pSettings->Effects == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-            /*FB: to avoid leaks when there is an error in the send command*/
-            /* Free Send command */
-            M4xVSS_freeCommand(xVSS_context);
-            /**/
-            return M4ERR_ALLOC;
-        }
-        /*FB bug fix 19.03.2008: these pointers were not initialized -> crash when free*/
-        for ( i = 0; i < xVSS_context->pSettings->nbEffects; i++ )
-        {
-            xVSS_context->pSettings->Effects[i].xVSS.pFramingFilePath =
-                M4OSA_NULL;
-            xVSS_context->pSettings->Effects[i].xVSS.pFramingBuffer =
-                M4OSA_NULL;
-            xVSS_context->pSettings->Effects[i].xVSS.pTextBuffer = M4OSA_NULL;
-        }
-        /**/
-    }
-
-    if( xVSS_context->targetedTimescale == 0 )
-    {
-        M4OSA_UInt32 pTargetedTimeScale = 0;
-
-        err = M4xVSS_internalGetTargetedTimeScale(xVSS_context, pSettings,
-            &pTargetedTimeScale);
-
-        if( M4NO_ERROR != err || pTargetedTimeScale == 0 )
-        {
-            M4OSA_TRACE1_1("M4xVSS_SendCommand: M4xVSS_internalGetTargetedTimeScale\
-                           returned 0x%x", err);
-            /*FB: to avoid leaks when there is an error in the send command*/
-            /* Free Send command */
-            M4xVSS_freeCommand(xVSS_context);
-            /**/
-            return err;
-        }
-        xVSS_context->targetedTimescale = pTargetedTimeScale;
-    }
-
-    /* Initialize total duration variable */
-    totalDuration = 0;
-
-    /* Parsing list of clips given by application, and prepare analyzing */
-    for ( i = 0; i < xVSS_context->pSettings->uiClipNumber; i++ )
-    {
-        /* Allocate current clip */
-        xVSS_context->pSettings->pClipList[i] =
-            (M4VSS3GPP_ClipSettings *)M4OSA_32bitAlignedMalloc \
-            (sizeof(M4VSS3GPP_ClipSettings), M4VS, (M4OSA_Char *)"clip settings");
-
-        if( xVSS_context->pSettings->pClipList[i] == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-            /*FB: to avoid leaks when there is an error in the send command*/
-            /* Free Send command */
-            M4xVSS_freeCommand(xVSS_context);
-            /**/
-            return M4ERR_ALLOC;
-        }
-
-        /* Copy clip settings from given structure to our xVSS_context structure */
-        err =
-            M4xVSS_DuplicateClipSettings(xVSS_context->pSettings->pClipList[i],
-            pSettings->pClipList[i], M4OSA_TRUE);
-
-        if( err != M4NO_ERROR )
-        {
-            M4OSA_TRACE1_1(
-                "M4xVSS_SendCommand: M4xVSS_DuplicateClipSettings return error 0x%x",
-                err);
-            /*FB: to avoid leaks when there is an error in the send command*/
-            /* Free Send command */
-            M4xVSS_freeCommand(xVSS_context);
-            /**/
-            return err;
-        }
-
-        xVSS_context->pSettings->pClipList[i]->bTranscodingRequired =
-            M4OSA_FALSE;
-
-        /* Because there is 1 less transition than clip number */
-        if( i < xVSS_context->pSettings->uiClipNumber - 1 )
-        {
-            xVSS_context->pSettings->pTransitionList[i] =
-                (M4VSS3GPP_TransitionSettings
-                *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_TransitionSettings),
-                M4VS, (M4OSA_Char *)"transition settings");
-
-            if( xVSS_context->pSettings->pTransitionList[i] == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                /*FB: to avoid leaks when there is an error in the send command*/
-                /* Free Send command */
-                M4xVSS_freeCommand(xVSS_context);
-                /**/
-                return M4ERR_ALLOC;
-            }
-
-            memcpy(
-                (void *)xVSS_context->pSettings->pTransitionList[i],
-                (void *)pSettings->pTransitionList[i],
-                sizeof(M4VSS3GPP_TransitionSettings));
-            /* Initialize external effect context to NULL, to know if input jpg has already been
-            decoded or not */
-            xVSS_context->pSettings->pTransitionList[i]->
-                pExtVideoTransitionFctCtxt = M4OSA_NULL;
-
-            switch( xVSS_context->pSettings->
-                pTransitionList[i]->VideoTransitionType )
-            {
-                    /* If transition type is alpha magic, we need to decode input file */
-                case M4xVSS_kVideoTransitionType_AlphaMagic:
-                    /* Allocate our alpha magic settings structure to have a copy of the
-                    provided one */
-                    xVSS_context->pSettings->pTransitionList[i]->      \
-                     xVSS.transitionSpecific.pAlphaMagicSettings =
-                        (M4xVSS_AlphaMagicSettings *)M4OSA_32bitAlignedMalloc \
-                        (sizeof(M4xVSS_AlphaMagicSettings), M4VS,
-                        (M4OSA_Char *)"Input Alpha magic settings structure");
-
-                    if( xVSS_context->pSettings->pTransitionList[i]-> \
-                        xVSS.transitionSpecific.pAlphaMagicSettings == M4OSA_NULL )
-                    {
-                        M4OSA_TRACE1_0(
-                            "Allocation error in M4xVSS_SendCommand");
-                        /*FB: to avoid leaks when there is an error in the send command*/
-                        /* Free Send command */
-                        M4xVSS_freeCommand(xVSS_context);
-                        /**/
-                        return M4ERR_ALLOC;
-                    }
-                    /* Copy data from the provided alpha magic settings structure tou our
-                    structure */
-                    memcpy((void *)xVSS_context->pSettings->
-                        pTransitionList[i]-> \
-                        xVSS.transitionSpecific.pAlphaMagicSettings,
-                        (void *)pSettings->pTransitionList[i]-> \
-                        xVSS.transitionSpecific.pAlphaMagicSettings,
-                        sizeof(M4xVSS_AlphaMagicSettings));
-
-                    /* Allocate our alpha magic input filename */
-                    xVSS_context->pSettings->pTransitionList[i]-> \
-                        xVSS.transitionSpecific.pAlphaMagicSettings->
-                        pAlphaFilePath = M4OSA_32bitAlignedMalloc(
-                        (strlen(pSettings->pTransitionList[i]-> \
-                        xVSS.transitionSpecific.pAlphaMagicSettings->pAlphaFilePath)
-                        + 1), M4VS, (M4OSA_Char *)"Alpha magic file path");
-
-                    if( xVSS_context->pSettings->pTransitionList[i]-> \
-                        xVSS.transitionSpecific.pAlphaMagicSettings->pAlphaFilePath
-                        == M4OSA_NULL )
-                    {
-                        M4OSA_TRACE1_0(
-                            "Allocation error in M4xVSS_SendCommand");
-                        /*FB: to avoid leaks when there is an error in the send command*/
-                        /* Free Send command */
-                        M4xVSS_freeCommand(xVSS_context);
-                        /**/
-                        return M4ERR_ALLOC;
-                    }
-                    /* Copy data from the provided alpha magic filename to our */
-                    M4OSA_chrNCopy(
-                        xVSS_context->pSettings->pTransitionList[i]->xVSS.
-                        transitionSpecific.pAlphaMagicSettings->
-                        pAlphaFilePath,
-                        pSettings->pTransitionList[i]->xVSS.
-                        transitionSpecific.pAlphaMagicSettings->
-                        pAlphaFilePath, strlen(
-                        pSettings->pTransitionList[i]->xVSS.
-                        transitionSpecific.pAlphaMagicSettings->
-                        pAlphaFilePath) + 1);
-
-                    /* Parse all transition to know if the input jpg has already been decoded */
-                    for ( j = 0; j < i; j++ )
-                    {
-                        if( xVSS_context->pSettings->
-                            pTransitionList[j]->VideoTransitionType
-                            == M4xVSS_kVideoTransitionType_AlphaMagic )
-                        {
-                            M4OSA_UInt32 pCmpResult = 0;
-                            pCmpResult = strcmp((const char *)xVSS_context->pSettings->
-                                pTransitionList[i]->xVSS.
-                                transitionSpecific.pAlphaMagicSettings->
-                                pAlphaFilePath, (const char *)xVSS_context->pSettings->
-                                pTransitionList[j]->xVSS.
-                                transitionSpecific.
-                                pAlphaMagicSettings->pAlphaFilePath);
-
-                            if( pCmpResult == 0 )
-                            {
-                                M4xVSS_internal_AlphaMagicSettings
-                                    *alphaSettings;
-
-                                alphaSettings =
-                                    (M4xVSS_internal_AlphaMagicSettings
-                                    *)M4OSA_32bitAlignedMalloc(
-                                    sizeof(
-                                    M4xVSS_internal_AlphaMagicSettings),
-                                    M4VS,
-                                    (M4OSA_Char
-                                    *)
-                                    "Alpha magic settings structure 1");
-
-                                if( alphaSettings == M4OSA_NULL )
-                                {
-                                    M4OSA_TRACE1_0(
-                                        "Allocation error in M4xVSS_SendCommand");
-                                    /*FB: to avoid leaks when there is an error in the send
-                                     command*/
-                                    /* Free Send command */
-                                    M4xVSS_freeCommand(xVSS_context);
-                                    /**/
-                                    return M4ERR_ALLOC;
-                                }
-                                alphaSettings->pPlane =
-                                    ((M4xVSS_internal_AlphaMagicSettings *)(
-                                    xVSS_context->pSettings->
-                                    pTransitionList[j]->
-                                    pExtVideoTransitionFctCtxt))->
-                                    pPlane;
-
-                                if( xVSS_context->pSettings->
-                                    pTransitionList[i]->xVSS.transitionSpecific.
-                                    pAlphaMagicSettings->blendingPercent > 0
-                                    && xVSS_context->pSettings->
-                                    pTransitionList[i]->xVSS.
-                                    transitionSpecific.
-                                    pAlphaMagicSettings->blendingPercent
-                                    <= 100 )
-                                {
-                                    alphaSettings->blendingthreshold =
-                                        ( xVSS_context->pSettings->
-                                        pTransitionList[i]->xVSS.
-                                        transitionSpecific.
-                                        pAlphaMagicSettings->
-                                        blendingPercent) * 255 / 200;
-                                }
-                                else
-                                {
-                                    alphaSettings->blendingthreshold = 0;
-                                }
-                                alphaSettings->isreverse =
-                                    xVSS_context->pSettings->
-                                    pTransitionList[i]->xVSS.
-                                    transitionSpecific.
-                                    pAlphaMagicSettings->isreverse;
-                                /* It means that the input jpg file for alpha magic has already
-                                 been decoded -> no nedd to decode it again */
-                                if( alphaSettings->blendingthreshold == 0 )
-                                {
-                                    xVSS_context->pSettings->
-                                        pTransitionList[i]->
-                                        ExtVideoTransitionFct =
-                                        M4xVSS_AlphaMagic;
-                                }
-                                else
-                                {
-                                    xVSS_context->pSettings->
-                                        pTransitionList[i]->
-                                        ExtVideoTransitionFct =
-                                        M4xVSS_AlphaMagicBlending;
-                                }
-                                xVSS_context->pSettings->pTransitionList[i]->
-                                    pExtVideoTransitionFctCtxt = alphaSettings;
-                                break;
-                            }
-                        }
-                    }
-
-                    /* If the jpg has not been decoded yet ... */
-                    if( xVSS_context->pSettings->
-                        pTransitionList[i]->pExtVideoTransitionFctCtxt
-                        == M4OSA_NULL )
-                    {
-                        M4VIFI_ImagePlane *outputPlane;
-                        M4xVSS_internal_AlphaMagicSettings *alphaSettings;
-                        /*UTF conversion support*/
-                        M4OSA_Void *pDecodedPath = M4OSA_NULL;
-
-                        /*To support ARGB8888 : get the width and height */
-                        M4OSA_UInt32 width_ARGB888 =
-                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
-                            transitionSpecific.pAlphaMagicSettings->width;
-                        M4OSA_UInt32 height_ARGB888 =
-                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
-                            transitionSpecific.pAlphaMagicSettings->height;
-                        M4OSA_TRACE1_1(
-                            " TransitionListM4xVSS_SendCommand width State is %d",
-                            width_ARGB888);
-                        M4OSA_TRACE1_1(
-                            " TransitionListM4xVSS_SendCommand height! State is %d",
-                            height_ARGB888);
-                        /* Allocate output plane */
-                        outputPlane = (M4VIFI_ImagePlane *)M4OSA_32bitAlignedMalloc(3
-                            * sizeof(M4VIFI_ImagePlane), M4VS, (M4OSA_Char
-                            *)
-                            "Output plane for Alpha magic transition");
-
-                        if( outputPlane == M4OSA_NULL )
-                        {
-                            M4OSA_TRACE1_0(
-                                "Allocation error in M4xVSS_SendCommand");
-                            /*FB: to avoid leaks when there is an error in the send command*/
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            /**/
-                            return M4ERR_ALLOC;
-                        }
-
-                        outputPlane[0].u_width = width;
-                        outputPlane[0].u_height = height;
-                        outputPlane[0].u_topleft = 0;
-                        outputPlane[0].u_stride = width;
-                        outputPlane[0].pac_data = (M4VIFI_UInt8
-                            *)M4OSA_32bitAlignedMalloc(( width * height * 3)
-                            >> 1,
-                            M4VS,
-                            (M4OSA_Char
-                            *)
-                            "Alloc for the Alpha magic pac_data output YUV");
-                        ;
-
-                        if( outputPlane[0].pac_data == M4OSA_NULL )
-                        {
-                            free(outputPlane);
-                            outputPlane = M4OSA_NULL;
-                            M4OSA_TRACE1_0(
-                                "Allocation error in M4xVSS_SendCommand");
-                            /*FB: to avoid leaks when there is an error in the send command*/
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            /**/
-                            return M4ERR_ALLOC;
-                        }
-                        outputPlane[1].u_width = width >> 1;
-                        outputPlane[1].u_height = height >> 1;
-                        outputPlane[1].u_topleft = 0;
-                        outputPlane[1].u_stride = width >> 1;
-                        outputPlane[1].pac_data = outputPlane[0].pac_data
-                            + outputPlane[0].u_width * outputPlane[0].u_height;
-                        outputPlane[2].u_width = width >> 1;
-                        outputPlane[2].u_height = height >> 1;
-                        outputPlane[2].u_topleft = 0;
-                        outputPlane[2].u_stride = width >> 1;
-                        outputPlane[2].pac_data = outputPlane[1].pac_data
-                            + outputPlane[1].u_width * outputPlane[1].u_height;
-
-                        pDecodedPath =
-                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
-                            transitionSpecific.pAlphaMagicSettings->
-                            pAlphaFilePath;
-                        /**
-                        * UTF conversion: convert into the customer format, before being used*/
-                        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                            != M4OSA_NULL && xVSS_context->
-                            UTFConversionContext.
-                            pTempOutConversionBuffer != M4OSA_NULL )
-                        {
-                            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                                (M4OSA_Void *)xVSS_context->pSettings->
-                                pTransitionList[i]->xVSS.
-                                transitionSpecific.
-                                pAlphaMagicSettings->pAlphaFilePath,
-                                (M4OSA_Void *)xVSS_context->
-                                UTFConversionContext.
-                                pTempOutConversionBuffer, &length);
-
-                            if( err != M4NO_ERROR )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
-                                    err);
-                                /* Free Send command */
-                                M4xVSS_freeCommand(xVSS_context);
-                                return err;
-                            }
-                            pDecodedPath =
-                                xVSS_context->UTFConversionContext.
-                                pTempOutConversionBuffer;
-                        }
-                        /**
-                        End of the conversion, use the decoded path*/
-                        /*To support ARGB8888 : convert + resizing from ARGB8888 to yuv420 */
-
-                        err = M4xVSS_internalConvertAndResizeARGB8888toYUV420(
-                            pDecodedPath,
-                            xVSS_context->pFileReadPtr, outputPlane,
-                            width_ARGB888, height_ARGB888);
-
-                        if( err != M4NO_ERROR )
-                        {
-                            free(outputPlane[0].pac_data);
-                            outputPlane[0].pac_data = M4OSA_NULL;
-                            free(outputPlane);
-                            outputPlane = M4OSA_NULL;
-                            M4xVSS_freeCommand(xVSS_context);
-                            M4OSA_TRACE1_1(
-                                "M4xVSS_SendCommand: error when decoding alpha magic JPEG: 0x%x",
-                                err);
-                            return err;
-                        }
-
-                        /* Allocate alpha settings structure */
-                        alphaSettings =
-                            (M4xVSS_internal_AlphaMagicSettings *)M4OSA_32bitAlignedMalloc(
-                            sizeof(M4xVSS_internal_AlphaMagicSettings),
-                            M4VS, (M4OSA_Char
-                            *)"Alpha magic settings structure 2");
-
-                        if( alphaSettings == M4OSA_NULL )
-                        {
-                            M4OSA_TRACE1_0(
-                                "Allocation error in M4xVSS_SendCommand");
-                            /*FB: to avoid leaks when there is an error in the send command*/
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            /**/
-                            return M4ERR_ALLOC;
-                        }
-                        alphaSettings->pPlane = outputPlane;
-
-                        if( xVSS_context->pSettings->pTransitionList[i]->xVSS.
-                            transitionSpecific.pAlphaMagicSettings->
-                            blendingPercent > 0 && xVSS_context->pSettings->
-                            pTransitionList[i]->xVSS.
-                            transitionSpecific.pAlphaMagicSettings->
-                            blendingPercent <= 100 )
-                        {
-                            alphaSettings->blendingthreshold =
-                                ( xVSS_context->pSettings->
-                                pTransitionList[i]->xVSS.
-                                transitionSpecific.pAlphaMagicSettings->
-                                blendingPercent) * 255 / 200;
-                        }
-                        else
-                        {
-                            alphaSettings->blendingthreshold = 0;
-                        }
-                        alphaSettings->isreverse =
-                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
-                            transitionSpecific.pAlphaMagicSettings->
-                            isreverse;
-
-                        if( alphaSettings->blendingthreshold == 0 )
-                        {
-                            xVSS_context->pSettings->pTransitionList[i]->
-                                ExtVideoTransitionFct = M4xVSS_AlphaMagic;
-                        }
-                        else
-                        {
-                            xVSS_context->pSettings->pTransitionList[i]->
-                                ExtVideoTransitionFct =
-                                M4xVSS_AlphaMagicBlending;
-                        }
-                        xVSS_context->pSettings->pTransitionList[i]->
-                            pExtVideoTransitionFctCtxt = alphaSettings;
-                    }
-
-                    break;
-
-                case M4xVSS_kVideoTransitionType_SlideTransition:
-                    {
-                        M4xVSS_internal_SlideTransitionSettings *slideSettings;
-                        slideSettings =
-                            (M4xVSS_internal_SlideTransitionSettings *)M4OSA_32bitAlignedMalloc(
-                            sizeof(M4xVSS_internal_SlideTransitionSettings),
-                            M4VS, (M4OSA_Char
-                            *)"Internal slide transition settings");
-
-                        if( M4OSA_NULL == slideSettings )
-                        {
-                            M4OSA_TRACE1_0(
-                                "Allocation error in M4xVSS_SendCommand");
-                            /*FB: to avoid leaks when there is an error in the send command*/
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            /**/
-                            return M4ERR_ALLOC;
-                        }
-                        /* Just copy the lone parameter from the input settings to the internal
-                         context. */
-
-                        slideSettings->direction =
-                            pSettings->pTransitionList[i]->xVSS.transitionSpecific.
-                            pSlideTransitionSettings->direction;
-
-                        /* No need to keep our copy of the settings. */
-                        xVSS_context->pSettings->pTransitionList[i]->
-                            xVSS.transitionSpecific.pSlideTransitionSettings =
-                            M4OSA_NULL;
-                        xVSS_context->pSettings->pTransitionList[i]->
-                            ExtVideoTransitionFct = &M4xVSS_SlideTransition;
-                        xVSS_context->pSettings->pTransitionList[i]->
-                            pExtVideoTransitionFctCtxt = slideSettings;
-                    }
-                    break;
-
-                case M4xVSS_kVideoTransitionType_FadeBlack:
-                    {
-                        xVSS_context->pSettings->pTransitionList[i]->
-                            ExtVideoTransitionFct = &M4xVSS_FadeBlackTransition;
-                    }
-                    break;
-
-                case M4xVSS_kVideoTransitionType_External:
-                    {
-                        xVSS_context->pSettings->pTransitionList[i]->
-                            ExtVideoTransitionFct =
-                            pSettings->pTransitionList[i]->ExtVideoTransitionFct;
-                        xVSS_context->pSettings->pTransitionList[i]->
-                            pExtVideoTransitionFctCtxt =
-                            pSettings->pTransitionList[i]->
-                            pExtVideoTransitionFctCtxt;
-                        xVSS_context->pSettings->pTransitionList[i]->
-                            VideoTransitionType =
-                            M4VSS3GPP_kVideoTransitionType_External;
-                    }
-                    break;
-
-                default:
-                    break;
-                } // switch
-
-            /* Update total_duration with transition duration */
-            totalDuration -= xVSS_context->pSettings->
-                pTransitionList[i]->uiTransitionDuration;
-        }
-
-
-        if( xVSS_context->pSettings->pClipList[i]->FileType
-            == M4VIDEOEDITING_kFileType_ARGB8888 )
-        {
-            if(M4OSA_TRUE ==
-                   xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom) {
-                M4OSA_Char out_img[M4XVSS_MAX_PATH_LEN];
-                M4OSA_Char out_img_tmp[M4XVSS_MAX_PATH_LEN];
-                M4xVSS_Pto3GPP_params *pParams = M4OSA_NULL;
-                M4OSA_Context pARGBFileIn;
-                /*UTF conversion support*/
-                M4OSA_Void *pDecodedPath = pSettings->pClipList[i]->pFile;
-
-                /* Parse Pto3GPP params chained list to know if input file has already been
-                converted */
-                if( xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
-                {
-                    M4OSA_UInt32 pCmpResult = 0;
-
-                    pParams = xVSS_context->pPTo3GPPparamsList;
-                    /* We parse all Pto3gpp Param chained list */
-                    while( pParams != M4OSA_NULL )
-                    {
-                        pCmpResult = strcmp((const char *)pSettings->pClipList[i]->pFile,
-                            (const char *)pParams->pFileIn);
-
-                        if( pCmpResult == 0
-                            && (pSettings->pClipList[i]->uiEndCutTime
-                            == pParams->duration
-                            || pSettings->pClipList[i]->xVSS.uiDuration
-                            == pParams->duration)
-                            && pSettings->pClipList[i]->xVSS.MediaRendering
-                            == pParams->MediaRendering )
-
-
-
-                        {
-                            /* Replace JPG filename with existing 3GP filename */
-                            goto replaceARGB_3GP;
-                        }
-                        /* We need to update this variable, in case some pictures have been
-                         added between two */
-                        /* calls to M4xVSS_sendCommand */
-                        pPto3GPP_last = pParams;
-                        pParams = pParams->pNext;
-                    }
-                }
-
-                /* Construct output temporary 3GP filename */
-                err = M4OSA_chrSPrintf(out_img, M4XVSS_MAX_PATH_LEN - 1, (M4OSA_Char *)"%simg%d.3gp",
-                    xVSS_context->pTempPath, xVSS_context->tempFileIndex);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return err;
-                }
-
-    #ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
-
-                err = M4OSA_chrSPrintf(out_img_tmp, M4XVSS_MAX_PATH_LEN - 1, "%simg%d.tmp",
-                    xVSS_context->pTempPath, xVSS_context->tempFileIndex);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return err;
-                }
-
-    #endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
-
-                xVSS_context->tempFileIndex++;
-
-                /* Allocate last element Pto3GPP params structure */
-                pParams = (M4xVSS_Pto3GPP_params
-                    *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_Pto3GPP_params),
-                    M4VS, (M4OSA_Char *)"Element of Pto3GPP Params");
-
-                if( pParams == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0(
-                        "M4xVSS_sendCommand: Problem when allocating one element Pto3GPP Params");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-
-                /* Initializes pfilexxx members of pParams to be able to free them correctly */
-                pParams->pFileIn = M4OSA_NULL;
-                pParams->pFileOut = M4OSA_NULL;
-                pParams->pFileTemp = M4OSA_NULL;
-                pParams->pNext = M4OSA_NULL;
-                pParams->MediaRendering = M4xVSS_kResizing;
-
-                /*To support ARGB8888 :get the width and height */
-                pParams->height = pSettings->pClipList[
-                    i]->ClipProperties.uiStillPicHeight; //ARGB_Height;
-                    pParams->width = pSettings->pClipList[
-                        i]->ClipProperties.uiStillPicWidth; //ARGB_Width;
-                        M4OSA_TRACE3_1("CLIP M4xVSS_SendCommand ARGB8888 H = %d", pParams->height);
-                        M4OSA_TRACE3_1("CLIP M4xVSS_SendCommand ARGB8888 W = %d", pParams->width);
-
-                        if( xVSS_context->pPTo3GPPparamsList
-                            == M4OSA_NULL ) /* Means it is the first element of the list */
-                        {
-                            /* Initialize the xVSS context with the first element of the list */
-                            xVSS_context->pPTo3GPPparamsList = pParams;
-
-                            /* Save this element in case of other file to convert */
-                            pPto3GPP_last = pParams;
-                        }
-                        else
-                        {
-                            /* Update next pointer of the previous last element of the chain */
-                            pPto3GPP_last->pNext = pParams;
-
-                            /* Update save of last element of the chain */
-                            pPto3GPP_last = pParams;
-                        }
-
-                        /* Fill the last M4xVSS_Pto3GPP_params element */
-                        pParams->duration =
-                            xVSS_context->pSettings->pClipList[i]->uiEndCutTime;
-                        /* If duration is filled, let's use it instead of EndCutTime */
-                        if( xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration != 0 )
-                        {
-                            pParams->duration =
-                                xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration;
-                        }
-
-                        pParams->InputFileType = M4VIDEOEDITING_kFileType_ARGB8888;
-
-                        /**
-                        * UTF conversion: convert into the customer format, before being used*/
-                        pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
-                        length = strlen(pDecodedPath);
-
-                        /**
-                        * UTF conversion: convert into the customer format, before being used*/
-                        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                            != M4OSA_NULL && xVSS_context->
-                            UTFConversionContext.pTempOutConversionBuffer
-                            != M4OSA_NULL )
-                        {
-                            err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
-                                *)xVSS_context->pSettings->pClipList[i]->pFile,
-                                (M4OSA_Void *)xVSS_context->
-                                UTFConversionContext.pTempOutConversionBuffer,
-                                &length);
-
-                            if( err != M4NO_ERROR )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
-                                    err);
-                                /* Free Send command */
-                                M4xVSS_freeCommand(xVSS_context);
-                                return err;
-                            }
-                            pDecodedPath =
-                                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-                        }
-
-                        /**
-                        * End of the UTF conversion, use the converted file path*/
-                        pParams->pFileIn = (M4OSA_Void *)M4OSA_32bitAlignedMalloc(length + 1, M4VS,
-                            (M4OSA_Char *)"Pto3GPP Params: file in");
-
-                        if( pParams->pFileIn == M4OSA_NULL )
-                        {
-                            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                            /*FB: to avoid leaks when there is an error in the send command*/
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            /**/
-                            return M4ERR_ALLOC;
-                        }
-                        memcpy((void *)pParams->pFileIn, (void *)pDecodedPath,
-                            (length + 1)); /* Copy input file path */
-
-                        /* Check that JPG file is present on the FS (P4ME00002974) by just opening
-                         and closing it */
-                        err =
-                            xVSS_context->pFileReadPtr->openRead(&pARGBFileIn, pDecodedPath,
-                            M4OSA_kFileRead);
-
-                        if( err != M4NO_ERROR )
-                        {
-                            M4OSA_TRACE1_2("Can't open input jpg file %s, error: 0x%x\n",
-                                pDecodedPath, err);
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            return err;
-                        }
-                        err = xVSS_context->pFileReadPtr->closeRead(pARGBFileIn);
-
-                        if( err != M4NO_ERROR )
-                        {
-                            M4OSA_TRACE1_2("Can't close input jpg file %s, error: 0x%x\n",
-                                pDecodedPath, err);
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            return err;
-                        }
-
-                        /**
-                        * UTF conversion: convert into the customer format, before being used*/
-                        pDecodedPath = out_img;
-                        length = strlen(pDecodedPath);
-
-                        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                            != M4OSA_NULL && xVSS_context->
-                            UTFConversionContext.pTempOutConversionBuffer
-                            != M4OSA_NULL )
-                        {
-                            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                                (M4OSA_Void *)out_img, (M4OSA_Void *)xVSS_context->
-                                UTFConversionContext.pTempOutConversionBuffer, &length);
-
-                            if( err != M4NO_ERROR )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
-                                    err);
-                                /* Free Send command */
-                                M4xVSS_freeCommand(xVSS_context);
-                                return err;
-                            }
-                            pDecodedPath =
-                                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-                        }
-
-                        /**
-                        * End of the UTF conversion, use the converted file path*/
-                        pParams->pFileOut = (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
-                            (M4OSA_Char *)"Pto3GPP Params: file out");
-
-                        if( pParams->pFileOut == M4OSA_NULL )
-                        {
-                            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                            /*FB: to avoid leaks when there is an error in the send command*/
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            /**/
-                            return M4ERR_ALLOC;
-                        }
-                        memcpy((void *)pParams->pFileOut, (void *)pDecodedPath,
-                            (length + 1)); /* Copy output file path */
-
-    #ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
-                        /**
-                        * UTF conversion: convert into the customer format, before being used*/
-
-                        pDecodedPath = out_img_tmp;
-                        length = strlen(pDecodedPath);
-
-                        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                            != M4OSA_NULL && xVSS_context->
-                            UTFConversionContext.pTempOutConversionBuffer
-                            != M4OSA_NULL )
-                        {
-                            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                                (M4OSA_Void *)out_img_tmp, (M4OSA_Void *)xVSS_context->
-                                UTFConversionContext.pTempOutConversionBuffer, &length);
-
-                            if( err != M4NO_ERROR )
-                            {
-                                M4OSA_TRACE1_1("M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8\
-                                     returns err: 0x%x",
-                                    err);
-                                /* Free Send command */
-                                M4xVSS_freeCommand(xVSS_context);
-                                return err;
-                            }
-                            pDecodedPath =
-                                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-                        }
-
-                        /**
-                        * End of the UTF conversion, use the converted file path*/
-                        pParams->pFileTemp = (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
-                            (M4OSA_Char *)"Pto3GPP Params: file temp");
-
-                        if( pParams->pFileTemp == M4OSA_NULL )
-                        {
-                            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                            /*FB: to avoid leaks when there is an error in the send command*/
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            /**/
-                            return M4ERR_ALLOC;
-                        }
-                        memcpy((void *)pParams->pFileTemp, (void *)pDecodedPath,
-                            (length + 1)); /* Copy temporary file path */
-
-    #endif                         /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
-
-                        /* Fill PanAndZoom settings if needed */
-
-                        if( M4OSA_TRUE
-                            == xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom )
-                        {
-                            pParams->isPanZoom =
-                                xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom;
-                            /* Check that Pan & Zoom parameters are corrects */
-                            if( xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa > 1000
-                                || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa
-                                <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
-                                PanZoomTopleftXa > 1000
-                                || xVSS_context->pSettings->pClipList[i]->xVSS.
-                                PanZoomTopleftYa > 1000
-                                || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
-                                > 1000
-                                || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
-                                <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
-                                PanZoomTopleftXb > 1000
-                                || xVSS_context->pSettings->pClipList[i]->xVSS.
-                                PanZoomTopleftYb > 1000)
-                            {
-                                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                                M4xVSS_freeCommand(xVSS_context);
-                                return M4ERR_PARAMETER;
-                            }
-
-                            pParams->PanZoomXa =
-                                xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa;
-                            pParams->PanZoomTopleftXa =
-                                xVSS_context->pSettings->
-                                pClipList[i]->xVSS.PanZoomTopleftXa;
-                            pParams->PanZoomTopleftYa =
-                                xVSS_context->pSettings->
-                                pClipList[i]->xVSS.PanZoomTopleftYa;
-                            pParams->PanZoomXb =
-                                xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb;
-                            pParams->PanZoomTopleftXb =
-                                xVSS_context->pSettings->
-                                pClipList[i]->xVSS.PanZoomTopleftXb;
-                            pParams->PanZoomTopleftYb =
-                                xVSS_context->pSettings->
-                                pClipList[i]->xVSS.PanZoomTopleftYb;
-                        }
-                        else
-                        {
-                            pParams->isPanZoom = M4OSA_FALSE;
-                        }
-                        /*+ PR No: blrnxpsw#223*/
-                        /*Intializing the Video Frame Rate as it may not be intialized*/
-                        /*Other changes made is @ M4xVSS_Internal.c @ line 1518 in
-                        M4xVSS_internalStartConvertPictureTo3gp*/
-                        switch( xVSS_context->pSettings->videoFrameRate )
-                        {
-                            case M4VIDEOEDITING_k30_FPS:
-                                pParams->framerate = 33;
-                                break;
-
-                            case M4VIDEOEDITING_k25_FPS:
-                                pParams->framerate = 40;
-                                break;
-
-                            case M4VIDEOEDITING_k20_FPS:
-                                pParams->framerate = 50;
-                                break;
-
-                            case M4VIDEOEDITING_k15_FPS:
-                                pParams->framerate = 66;
-                                break;
-
-                            case M4VIDEOEDITING_k12_5_FPS:
-                                pParams->framerate = 80;
-                                break;
-
-                            case M4VIDEOEDITING_k10_FPS:
-                                pParams->framerate = 100;
-                                break;
-
-                            case M4VIDEOEDITING_k7_5_FPS:
-                                pParams->framerate = 133;
-                                break;
-
-                            case M4VIDEOEDITING_k5_FPS:
-                                pParams->framerate = 200;
-                                break;
-
-                            default:
-                                /*Making Default Frame Rate @ 15 FPS*/
-                                pParams->framerate = 66;
-                                break;
-                        }
-                        /*-PR No: blrnxpsw#223*/
-                        if( xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering
-                            == M4xVSS_kCropping
-                            || xVSS_context->pSettings->pClipList[i]->xVSS.
-                            MediaRendering == M4xVSS_kBlackBorders
-                            || xVSS_context->pSettings->pClipList[i]->xVSS.
-                            MediaRendering == M4xVSS_kResizing )
-                        {
-                            pParams->MediaRendering =
-                                xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering;
-                        }
-
-                        pParams->pNext = M4OSA_NULL;
-                        pParams->isCreated = M4OSA_FALSE;
-                        xVSS_context->nbStepTotal++;
-                       /* Set bTranscodingRequired to TRUE to indicate the kenburn video has
-                        * been generated in analysis phase, and does not need to be tanscoded again
-                        * in saving phase */
-                        xVSS_context->pSettings->pClipList[i]->bTranscodingRequired =
-                           M4OSA_TRUE;
-
-    replaceARGB_3GP:
-                        /* Update total duration */
-                        totalDuration += pParams->duration;
-
-                        /* Replacing in VSS structure the JPG file by the 3gp file */
-                        xVSS_context->pSettings->pClipList[i]->FileType =
-                            M4VIDEOEDITING_kFileType_3GPP;
-
-                        if( xVSS_context->pSettings->pClipList[i]->pFile != M4OSA_NULL )
-                        {
-                            free(xVSS_context->pSettings->pClipList[i]->pFile);
-                            xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_NULL;
-                        }
-
-                        /**
-                        * UTF conversion: convert into UTF8, before being used*/
-                        pDecodedPath = pParams->pFileOut;
-
-                        if( xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
-                            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
-                            != M4OSA_NULL )
-                        {
-                            err = M4xVSS_internalConvertToUTF8(xVSS_context,
-                                (M4OSA_Void *)pParams->pFileOut,
-                                (M4OSA_Void *)xVSS_context->
-                                UTFConversionContext.pTempOutConversionBuffer,
-                                &length);
-
-                            if( err != M4NO_ERROR )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err: \
-                                    0x%x",err);
-                                /* Free Send command */
-                                M4xVSS_freeCommand(xVSS_context);
-                                return err;
-                            }
-                            pDecodedPath =
-                                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-                        }
-                        else
-                        {
-                            length = strlen(pDecodedPath);
-                        }
-                        /**
-                        * End of the UTF conversion, use the converted file path*/
-                        xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_32bitAlignedMalloc((length
-                            + 1), M4VS, (M4OSA_Char *)"xVSS file path of ARGB to 3gp");
-
-                        if( xVSS_context->pSettings->pClipList[i]->pFile == M4OSA_NULL )
-                        {
-                            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                            /*FB: to avoid leaks when there is an error in the send command*/
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            /**/
-                            return M4ERR_ALLOC;
-                        }
-                        memcpy((void *)xVSS_context->pSettings->pClipList[i]->pFile,
-                            (void *)pDecodedPath, (length + 1));
-                        /*FB: add file path size because of UTF16 conversion*/
-                        xVSS_context->pSettings->pClipList[i]->filePathSize = length+1;
-            }
-        }
-        /************************
-        3GP input file type case
-        *************************/
-        else if( xVSS_context->pSettings->pClipList[i]->FileType
-            == M4VIDEOEDITING_kFileType_3GPP
-            || xVSS_context->pSettings->pClipList[i]->FileType
-            == M4VIDEOEDITING_kFileType_MP4
-            || xVSS_context->pSettings->pClipList[i]->FileType
-            == M4VIDEOEDITING_kFileType_M4V )
-        {
-            /*UTF conversion support*/
-            M4OSA_Void *pDecodedPath = M4OSA_NULL;
-
-            /* Need to call MCS in case 3GP video/audio types are not compatible
-            (H263/MPEG4 or AMRNB/AAC) */
-            /* => Need to fill MCS_Params structure with the right parameters ! */
-            /* Need also to parse MCS params struct to check if file has already been transcoded */
-
-            M4VIDEOEDITING_ClipProperties fileProperties;
-            M4xVSS_MCS_params *pParams;
-            M4OSA_Bool audioIsDifferent = M4OSA_FALSE;
-            M4OSA_Bool videoIsDifferent = M4OSA_FALSE;
-            M4OSA_Bool bAudioMono;
-            /* Initialize file properties structure */
-
-            memset((void *) &fileProperties,0,
-                sizeof(M4VIDEOEDITING_ClipProperties));
-
-            //fileProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
-
-            /* Prevent from bad initializing of percentage cut time */
-            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent
-                            > 100 || xVSS_context->pSettings->pClipList[i]->xVSS.
-                            uiBeginCutPercent > 100 )
-            {
-                /* These percentage cut time have probably not been initialized */
-                /* Let's not use them by setting them to 0 */
-                xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent = 0;
-                xVSS_context->pSettings->pClipList[i]->xVSS.uiBeginCutPercent =
-                    0;
-            }
-
-            /**
-            * UTF conversion: convert into the customer format, before being used*/
-            pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
-
-            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                != M4OSA_NULL && xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer
-                != M4OSA_NULL )
-            {
-                err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
-                    *)xVSS_context->pSettings->pClipList[i]->pFile,
-                    (M4OSA_Void *)xVSS_context->
-                    UTFConversionContext.pTempOutConversionBuffer,
-                    &length);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                        err);
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    return err;
-                }
-                pDecodedPath =
-                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-            }
-            /**
-            * End of the UTF conversion, use the converted file path*/
-            err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
-                &fileProperties);
-
-            if( err != M4NO_ERROR )
-            {
-                M4xVSS_freeCommand(xVSS_context);
-                M4OSA_TRACE1_1(
-                    "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned 0x%x",
-                    err);
-                /* TODO: Translate error code of MCS to an xVSS error code */
-                return err;
-            }
-
-            /* Parse MCS params chained list to know if input file has already been converted */
-            if( xVSS_context->pMCSparamsList != M4OSA_NULL )
-            {
-                M4OSA_UInt32 pCmpResult = 0;
-
-                pParams = xVSS_context->pMCSparamsList;
-                /* We parse all MCS Param chained list */
-                while( pParams != M4OSA_NULL )
-                {
-
-                    /**
-                    * UTF conversion: convert into UTF8, before being used*/
-                    pDecodedPath = pParams->pFileIn;
-
-                    if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
-                        != M4OSA_NULL && xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer
-                        != M4OSA_NULL )
-                    {
-                        err = M4xVSS_internalConvertToUTF8(xVSS_context,
-                            (M4OSA_Void *)pParams->pFileIn,
-                            (M4OSA_Void *)xVSS_context->
-                            UTFConversionContext.
-                            pTempOutConversionBuffer, &length);
-
-                        if( err != M4NO_ERROR )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err:\
-                                 0x%x", err);
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            return err;
-                        }
-                        pDecodedPath = xVSS_context->
-                            UTFConversionContext.pTempOutConversionBuffer;
-                    }
-
-                    /**
-                    * End of the UTF conversion, use the converted file path*/
-                    pCmpResult = strcmp((const char *)pSettings->pClipList[i]->pFile,
-                        (const char *)pDecodedPath);
-
-                    /* If input filenames are the same, and if this is not a BGM, we can reuse
-                    the transcoded file */
-                    if( pCmpResult == 0 && pParams->isBGM == M4OSA_FALSE
-                        && pParams->BeginCutTime
-                        == pSettings->pClipList[i]->uiBeginCutTime
-                        && (pParams->EndCutTime
-                        == pSettings->pClipList[i]->uiEndCutTime
-                        || pParams->EndCutTime
-                        == pSettings->pClipList[i]->uiBeginCutTime
-                        + pSettings->pClipList[i]->xVSS.uiDuration)
-                        && pSettings->pClipList[i]->xVSS.MediaRendering
-                        == pParams->MediaRendering )
-                    {
-                        if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-                        {
-                            if( pSettings->xVSS.pBGMtrack->uiAddVolume == 100
-                                || (pParams->OutputAudioFormat
-                                == M4VIDEOEDITING_kNullAudio
-                                && fileProperties.AudioStreamType
-                                == pSettings->xVSS.outputAudioFormat)
-                                || pParams->OutputAudioFormat
-                                == pSettings->xVSS.outputAudioFormat
-                                || fileProperties.AudioStreamType
-                                == M4VIDEOEDITING_kNoneAudio )
-                            {
-                                /* Replace 3GP filename with transcoded 3GP filename */
-                                goto replace3GP_3GP;
-                            }
-                        }
-                        else if( ( pParams->OutputAudioFormat
-                            == M4VIDEOEDITING_kNullAudio
-                            && fileProperties.AudioStreamType
-                            == pSettings->xVSS.outputAudioFormat)
-                            || pParams->OutputAudioFormat
-                            == pSettings->xVSS.outputAudioFormat
-                            || fileProperties.AudioStreamType
-                            == M4VIDEOEDITING_kNoneAudio )
-                        {
-                            /* Replace 3GP filename with transcoded 3GP filename */
-                            goto replace3GP_3GP;
-                        }
-                    }
-
-                    /* We need to update this variable, in case some 3GP files have been added
-                    between two */
-                    /* calls to M4xVSS_sendCommand */
-                    pMCS_last = pParams;
-                    pParams = pParams->pNext;
-                }
-            }
-
-            /* If we have percentage information let's use it... */
-            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent != 0
-                || xVSS_context->pSettings->pClipList[i]->xVSS.uiBeginCutPercent
-                != 0 )
-            {
-                /* If percentage information are not correct and if duration field is not filled */
-                if( ( xVSS_context->pSettings->pClipList[i]->xVSS.
-                    uiEndCutPercent
-                    <= xVSS_context->pSettings->pClipList[i]->xVSS.
-                    uiBeginCutPercent)
-                    && xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration
-                    == 0 )
-                {
-                    M4OSA_TRACE1_0(
-                        "M4xVSS_sendCommand: Bad percentage for begin and end cut time !");
-                    M4xVSS_freeCommand(xVSS_context);
-                    return M4ERR_PARAMETER;
-                }
-
-                /* We transform the percentage into absolute time */
-                xVSS_context->pSettings->pClipList[i]->uiBeginCutTime
-                    = (M4OSA_UInt32)(
-                    xVSS_context->pSettings->pClipList[i]->xVSS.
-                    uiBeginCutPercent
-                    * fileProperties.uiClipDuration / 100);
-                xVSS_context->pSettings->pClipList[i]->uiEndCutTime
-                    = (M4OSA_UInt32)(
-                    xVSS_context->pSettings->pClipList[i]->xVSS.
-                    uiEndCutPercent
-                    * fileProperties.uiClipDuration / 100);
-            }
-            /* ...Otherwise, we use absolute time. */
-            else
-            {
-                /* If endCutTime == 0, it means all the file is taken. Let's change to the file
-                duration, to accurate preview. */
-                if( xVSS_context->pSettings->pClipList[i]->uiEndCutTime == 0
-                    || xVSS_context->pSettings->pClipList[i]->uiEndCutTime
-                    > fileProperties.uiClipDuration )
-                {
-                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
-                        fileProperties.uiClipDuration;
-                }
-            }
-
-            /* If duration field is filled, it has priority on other fields on EndCutTime,
-             so let's use it */
-            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration != 0 )
-            {
-                xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
-                    xVSS_context->pSettings->pClipList[i]->uiBeginCutTime
-                    +xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration;
-
-                if( xVSS_context->pSettings->pClipList[i]->uiEndCutTime
-                    > fileProperties.uiClipDuration )
-                {
-                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
-                        fileProperties.uiClipDuration;
-                }
-            }
-
-            /* If output video format is not set, we take video format of the first 3GP video */
-            if( xVSS_context->pSettings->xVSS.outputVideoFormat
-                == M4VIDEOEDITING_kNoneVideo )
-            {
-                //xVSS_context->pSettings->xVSS.outputVideoFormat = fileProperties.VideoStreamType;
-                //M4OSA_TRACE2_1("Output video format is not set, set it to current clip: %d",
-                // xVSS_context->pSettings->xVSS.outputVideoFormat);
-                M4OSA_TRACE1_0(
-                    "Output video format is not set, an error parameter is returned.");
-                M4xVSS_freeCommand(xVSS_context);
-                return M4ERR_PARAMETER;
-            }
-
-            if( xVSS_context->pSettings->xVSS.outputAudioFormat
-                == M4VIDEOEDITING_kNoneAudio )
-            {
-                //xVSS_context->pSettings->xVSS.outputAudioFormat = fileProperties.AudioStreamType;
-                M4OSA_TRACE2_1(
-                    "Output audio format is not set -> remove audio track of clip: %d",
-                    i);
-            }
-
-            if( fileProperties.uiNbChannels == 1 )
-            {
-                bAudioMono = M4OSA_TRUE;
-            }
-            else
-            {
-                bAudioMono = M4OSA_FALSE;
-            }
-
-            if( fileProperties.AudioStreamType
-                != xVSS_context->pSettings->xVSS.outputAudioFormat
-                || (fileProperties.AudioStreamType == M4VIDEOEDITING_kAAC
-                && (fileProperties.uiSamplingFrequency != samplingFreq
-                || bAudioMono
-                != xVSS_context->pSettings->xVSS.bAudioMono)) )
-            {
-                audioIsDifferent = M4OSA_TRUE;
-                /* If we want to replace audio, there is no need to transcode audio */
-                if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-                {
-                    /* temp fix :PT volume not herad in the second clip */
-                    if( /*(pSettings->xVSS.pBGMtrack->uiAddVolume == 100
-                        && xVSS_context->pSettings->xVSS.outputFileSize == 0)
-                        ||*/
-                        fileProperties.AudioStreamType
-                        == M4VIDEOEDITING_kNoneAudio ) /*11/12/2008 CR 3283 VAL for the MMS
-                        use case, we need to transcode except the media without audio*/
-                    {
-                        audioIsDifferent = M4OSA_FALSE;
-                    }
-                }
-                else if( fileProperties.AudioStreamType
-                    == M4VIDEOEDITING_kNoneAudio )
-                {
-                    audioIsDifferent = M4OSA_FALSE;
-                }
-            }
-            /* Here check the clip video profile and level, if it exceeds
-             * the profile and level of export file, then the file needs
-             * to be transcoded(do not do compress domain trim).
-             * Also for MPEG4 fomart, always do transcoding since HW encoder
-             * may use different time scale value than the input clip*/
-           if ((fileProperties.uiVideoProfile >
-                     xVSS_context->pSettings->xVSS.outputVideoProfile) ||
-                (fileProperties.uiVideoLevel >
-                     xVSS_context->pSettings->xVSS.outputVideoLevel) ||
-                (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)) {
-               /* Set bTranscodingRequired to TRUE to indicate the video will be
-                * transcoded in MCS. */
-               xVSS_context->pSettings->pClipList[i]->bTranscodingRequired =
-                   M4OSA_TRUE;
-               videoIsDifferent = M4OSA_TRUE;
-           }
-
-            if( videoIsDifferent == M4OSA_TRUE || audioIsDifferent == M4OSA_TRUE)
-            {
-                M4OSA_Char out_3gp[M4XVSS_MAX_PATH_LEN];
-                M4OSA_Char out_3gp_tmp[M4XVSS_MAX_PATH_LEN];
-
-                /* Construct output temporary 3GP filename */
-                err = M4OSA_chrSPrintf(out_3gp, M4XVSS_MAX_PATH_LEN - 1, (M4OSA_Char *)"%svid%d.3gp",
-                    xVSS_context->pTempPath, xVSS_context->tempFileIndex);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
-                    return err;
-                }
-
-#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
-
-                err = M4OSA_chrSPrintf(out_3gp_tmp, M4XVSS_MAX_PATH_LEN - 1, "%svid%d.tmp",
-                    xVSS_context->pTempPath, xVSS_context->tempFileIndex);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
-                    return err;
-                }
-
-#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
-
-                xVSS_context->tempFileIndex++;
-
-                pParams =
-                    (M4xVSS_MCS_params *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_MCS_params),
-                    M4VS, (M4OSA_Char *)"Element of MCS Params (for 3GP)");
-
-                if( pParams == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0(
-                        "M4xVSS_sendCommand: Problem when allocating one element MCS Params");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-                pParams->MediaRendering = M4xVSS_kResizing;
-                pParams->videoclipnumber = i; // Indicates video clip index
-
-                if( xVSS_context->pMCSparamsList
-                    == M4OSA_NULL ) /* Means it is the first element of the list */
-                {
-                    /* Initialize the xVSS context with the first element of the list */
-                    xVSS_context->pMCSparamsList = pParams;
-                }
-                else
-                {
-                    /* Update next pointer of the previous last element of the chain */
-                    pMCS_last->pNext = pParams;
-                }
-
-                /* Save this element in case of other file to convert */
-                pMCS_last = pParams;
-
-                /* Fill the last M4xVSS_MCS_params element */
-                pParams->InputFileType = M4VIDEOEDITING_kFileType_3GPP;
-                pParams->OutputFileType = M4VIDEOEDITING_kFileType_3GPP;
-
-                pParams->OutputVideoTimescale = xVSS_context->targetedTimescale;
-
-                /* We do not need to reencode video if its parameters do not differ */
-                /* from output settings parameters */
-                if( videoIsDifferent == M4OSA_TRUE )
-                {
-                    pParams->OutputVideoFormat =
-                        xVSS_context->pSettings->xVSS.outputVideoFormat;
-                    pParams->outputVideoProfile =
-                        xVSS_context->pSettings->xVSS.outputVideoProfile;
-                    pParams->outputVideoLevel =
-                        xVSS_context->pSettings->xVSS.outputVideoLevel;
-                    pParams->OutputVideoFrameRate =
-                        xVSS_context->pSettings->videoFrameRate;
-                    pParams->OutputVideoFrameSize =
-                        xVSS_context->pSettings->xVSS.outputVideoSize;
-
-                    /*FB: VAL CR P4ME00003076
-                    The output video bitrate is now directly given by the user in the edition
-                    settings structure If the bitrate given by the user is irrelevant
-                    (the MCS minimum and maximum video bitrate are used),
-                    the output video bitrate is hardcoded according to the output video size*/
-                    if( xVSS_context->pSettings->xVSS.outputVideoBitrate
-                        >= M4VIDEOEDITING_k16_KBPS
-                        && xVSS_context->pSettings->xVSS.outputVideoBitrate
-                        <= M4VIDEOEDITING_k8_MBPS ) /*+ New Encoder bitrates */
-                    {
-                        pParams->OutputVideoBitrate =
-                            xVSS_context->pSettings->xVSS.outputVideoBitrate;
-                    }
-                    else
-                    {
-                        switch( xVSS_context->pSettings->xVSS.outputVideoSize )
-                        {
-                            case M4VIDEOEDITING_kSQCIF:
-                                pParams->OutputVideoBitrate =
-                                    M4VIDEOEDITING_k48_KBPS;
-                                break;
-
-                            case M4VIDEOEDITING_kQQVGA:
-                                pParams->OutputVideoBitrate =
-                                    M4VIDEOEDITING_k64_KBPS;
-                                break;
-
-                            case M4VIDEOEDITING_kQCIF:
-                                pParams->OutputVideoBitrate =
-                                    M4VIDEOEDITING_k128_KBPS;
-                                break;
-
-                            case M4VIDEOEDITING_kQVGA:
-                                pParams->OutputVideoBitrate =
-                                    M4VIDEOEDITING_k384_KBPS;
-                                break;
-
-                            case M4VIDEOEDITING_kCIF:
-                                pParams->OutputVideoBitrate =
-                                    M4VIDEOEDITING_k384_KBPS;
-                                break;
-
-                            case M4VIDEOEDITING_kVGA:
-                                pParams->OutputVideoBitrate =
-                                    M4VIDEOEDITING_k512_KBPS;
-                                break;
-
-                            default: /* Should not happen !! */
-                                pParams->OutputVideoBitrate =
-                                    M4VIDEOEDITING_k64_KBPS;
-                                break;
-                        }
-                    }
-                }
-                else
-                {
-                    pParams->outputVideoProfile =
-                        xVSS_context->pSettings->xVSS.outputVideoProfile;
-                    pParams->outputVideoLevel =
-                        xVSS_context->pSettings->xVSS.outputVideoLevel;
-                    pParams->OutputVideoFormat = M4VIDEOEDITING_kNullVideo;
-                    pParams->OutputVideoFrameRate =
-                        M4VIDEOEDITING_k15_FPS; /* Must be set, otherwise, MCS returns an error */
-                }
-
-                if( audioIsDifferent == M4OSA_TRUE )
-                {
-                    pParams->OutputAudioFormat =
-                        xVSS_context->pSettings->xVSS.outputAudioFormat;
-
-                    switch( xVSS_context->pSettings->xVSS.outputAudioFormat )
-                    {
-                        case M4VIDEOEDITING_kNoneAudio:
-                            break;
-
-                        case M4VIDEOEDITING_kAMR_NB:
-                            pParams->OutputAudioBitrate =
-                                M4VIDEOEDITING_k12_2_KBPS;
-                            pParams->bAudioMono = M4OSA_TRUE;
-                            pParams->OutputAudioSamplingFrequency =
-                                M4VIDEOEDITING_kDefault_ASF;
-                            break;
-
-                        case M4VIDEOEDITING_kAAC:
-                            {
-                                /*FB: VAL CR P4ME00003076
-                                The output audio bitrate in the AAC case is now directly given by
-                                the user in the edition settings structure
-                                If the bitrate given by the user is irrelevant or undefined
-                                (the MCS minimum and maximum audio bitrate are used),
-                                the output audio bitrate is hard coded according to the output
-                                audio sampling frequency*/
-
-                                /*Check if the audio bitrate is correctly defined*/
-
-                                /*Mono
-                                MCS values for AAC Mono are min: 16kbps and max: 192 kbps*/
-                                if( xVSS_context->pSettings->xVSS.outputAudioBitrate
-                                    >= M4VIDEOEDITING_k16_KBPS
-                                    && xVSS_context->pSettings->
-                                    xVSS.outputAudioBitrate
-                                    <= M4VIDEOEDITING_k192_KBPS
-                                    && xVSS_context->pSettings->xVSS.bAudioMono
-                                    == M4OSA_TRUE )
-                                {
-                                    pParams->OutputAudioBitrate =
-                                        xVSS_context->pSettings->
-                                        xVSS.outputAudioBitrate;
-                                }
-                                /*Stereo
-                                MCS values for AAC Mono are min: 32kbps and max: 192 kbps*/
-                                else if( xVSS_context->pSettings->
-                                    xVSS.outputAudioBitrate
-                                    >= M4VIDEOEDITING_k32_KBPS
-                                    && xVSS_context->pSettings->
-                                    xVSS.outputAudioBitrate
-                                    <= M4VIDEOEDITING_k192_KBPS
-                                    && xVSS_context->pSettings->xVSS.bAudioMono
-                                    == M4OSA_FALSE )
-                                {
-                                    pParams->OutputAudioBitrate =
-                                        xVSS_context->pSettings->
-                                        xVSS.outputAudioBitrate;
-                                }
-
-                                /*The audio bitrate is hard coded according to the output audio
-                                 sampling frequency*/
-                                else
-                                {
-                                    switch( xVSS_context->pSettings->
-                                        xVSS.outputAudioSamplFreq )
-                                    {
-                                        case M4VIDEOEDITING_k16000_ASF:
-                                            pParams->OutputAudioBitrate =
-                                                M4VIDEOEDITING_k24_KBPS;
-                                            break;
-
-                                        case M4VIDEOEDITING_k22050_ASF:
-                                        case M4VIDEOEDITING_k24000_ASF:
-                                            pParams->OutputAudioBitrate =
-                                                M4VIDEOEDITING_k32_KBPS;
-                                            break;
-
-                                        case M4VIDEOEDITING_k32000_ASF:
-                                            pParams->OutputAudioBitrate =
-                                                M4VIDEOEDITING_k48_KBPS;
-                                            break;
-
-                                        case M4VIDEOEDITING_k44100_ASF:
-                                        case M4VIDEOEDITING_k48000_ASF:
-                                            pParams->OutputAudioBitrate =
-                                                M4VIDEOEDITING_k64_KBPS;
-                                            break;
-
-                                        default:
-                                            pParams->OutputAudioBitrate =
-                                                M4VIDEOEDITING_k64_KBPS;
-                                            break;
-                                    }
-
-                                    if( xVSS_context->pSettings->xVSS.bAudioMono
-                                        == M4OSA_FALSE )
-                                    {
-                                        /* Output bitrate have to be doubled */
-                                        pParams->OutputAudioBitrate +=
-                                            pParams->OutputAudioBitrate;
-                                    }
-                                }
-
-                                pParams->bAudioMono =
-                                    xVSS_context->pSettings->xVSS.bAudioMono;
-
-                                if( xVSS_context->pSettings->
-                                    xVSS.outputAudioSamplFreq
-                                    == M4VIDEOEDITING_k8000_ASF )
-                                {
-                                    /* Prevent from unallowed sampling frequencies */
-                                    pParams->OutputAudioSamplingFrequency =
-                                        M4VIDEOEDITING_kDefault_ASF;
-                                }
-                                else
-                                {
-                                    pParams->OutputAudioSamplingFrequency =
-                                        xVSS_context->pSettings->
-                                        xVSS.outputAudioSamplFreq;
-                                }
-                                break;
-                            }
-
-                        default: /* Should not happen !! */
-                            pParams->OutputAudioFormat = M4VIDEOEDITING_kAMR_NB;
-                            pParams->OutputAudioBitrate =
-                                M4VIDEOEDITING_k12_2_KBPS;
-                            pParams->bAudioMono = M4OSA_TRUE;
-                            pParams->OutputAudioSamplingFrequency =
-                                M4VIDEOEDITING_kDefault_ASF;
-                            break;
-                        }
-                }
-                else
-                {
-                    pParams->OutputAudioFormat = M4VIDEOEDITING_kNullAudio;
-                }
-
-                /**
-                * UTF conversion: convert into the customer format, before being used*/
-                pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
-                length = strlen(pDecodedPath);
-
-                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                    != M4OSA_NULL && xVSS_context->
-                    UTFConversionContext.pTempOutConversionBuffer
-                    != M4OSA_NULL )
-                {
-                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                        (M4OSA_Void *)xVSS_context->pSettings->
-                        pClipList[i]->pFile,
-                        (M4OSA_Void *)xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer,
-                        &length);
-
-                    if( err != M4NO_ERROR )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                            err);
-                        /* Free Send command */
-                        M4xVSS_freeCommand(xVSS_context);
-                        return err;
-                    }
-                    pDecodedPath = xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer;
-                }
-
-                /**
-                * End of the UTF conversion, use the converted file path*/
-                pParams->pFileIn =
-                    (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
-                    (M4OSA_Char *)"MCS 3GP Params: file in");
-
-                if( pParams->pFileIn == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-                memcpy((void *)pParams->pFileIn, (void *)pDecodedPath,
-                    (length + 1)); /* Copy input file path */
-
-                /**
-                * UTF conversion: convert into the customer format, before being used*/
-                pDecodedPath = out_3gp;
-                length = strlen(pDecodedPath);
-
-                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                    != M4OSA_NULL && xVSS_context->
-                    UTFConversionContext.pTempOutConversionBuffer
-                    != M4OSA_NULL )
-                {
-                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                        (M4OSA_Void *)out_3gp, (M4OSA_Void *)xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer,
-                        &length);
-
-                    if( err != M4NO_ERROR )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                            err);
-                        /* Free Send command */
-                        M4xVSS_freeCommand(xVSS_context);
-                        return err;
-                    }
-                    pDecodedPath = xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer;
-                }
-
-                /**
-                * End of the UTF conversion, use the converted file path*/
-                pParams->pFileOut =
-                    (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
-                    (M4OSA_Char *)"MCS 3GP Params: file out");
-
-                if( pParams->pFileOut == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-                memcpy((void *)pParams->pFileOut, (void *)pDecodedPath,
-                    (length + 1)); /* Copy output file path */
-
-#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
-                /**
-                * UTF conversion: convert into the customer format, before being used*/
-
-                pDecodedPath = out_3gp_tmp;
-                length = strlen(pDecodedPath);
-
-                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                    != M4OSA_NULL && xVSS_context->
-                    UTFConversionContext.pTempOutConversionBuffer
-                    != M4OSA_NULL )
-                {
-                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                        (M4OSA_Void *)out_3gp_tmp,
-                        (M4OSA_Void *)xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer,
-                        &length);
-
-                    if( err != M4NO_ERROR )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                            err);
-                        /* Free Send command */
-                        M4xVSS_freeCommand(xVSS_context);
-                        return err;
-                    }
-                    pDecodedPath = xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer;
-                }
-
-                /**
-                * End of the UTF conversion, use the converted file path*/
-                pParams->pFileTemp =
-                    (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
-                    (M4OSA_Char *)"MCS 3GP Params: file temp");
-
-                if( pParams->pFileTemp == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-                memcpy((void *)pParams->pFileTemp, (void *)pDecodedPath,
-                    (length + 1)); /* Copy temporary file path */
-
-#else
-
-                pParams->pFileTemp = M4OSA_NULL;
-
-#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
-
-                /*FB 2008/10/20 keep media aspect ratio, add media rendering parameter*/
-
-                if( xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering
-                    == M4xVSS_kCropping
-                    || xVSS_context->pSettings->pClipList[i]->xVSS.
-                    MediaRendering == M4xVSS_kBlackBorders
-                    || xVSS_context->pSettings->pClipList[i]->xVSS.
-                    MediaRendering == M4xVSS_kResizing )
-                {
-                    pParams->MediaRendering =
-                        xVSS_context->pSettings->pClipList[i]->xVSS.
-                        MediaRendering;
-                }
-
-                /*FB: transcoding per parts*/
-                pParams->BeginCutTime =
-                    xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
-                pParams->EndCutTime =
-                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime;
-
-                pParams->pNext = M4OSA_NULL;
-                pParams->isBGM = M4OSA_FALSE;
-                pParams->isCreated = M4OSA_FALSE;
-                xVSS_context->nbStepTotal++;
-                bIsTranscoding = M4OSA_TRUE;
-
-replace3GP_3GP:
-                /* Update total duration */
-                totalDuration +=
-                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime
-                    - xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
-
-                /* Replacing in VSS structure the original 3GP file by the transcoded 3GP file */
-                xVSS_context->pSettings->pClipList[i]->FileType =
-                    M4VIDEOEDITING_kFileType_3GPP;
-
-                if( xVSS_context->pSettings->pClipList[i]->pFile != M4OSA_NULL )
-                {
-                    free(xVSS_context->pSettings->pClipList[i]->pFile);
-                    xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_NULL;
-                }
-
-                /**
-                * UTF conversion: convert into the customer format, before being used*/
-                pDecodedPath = pParams->pFileOut;
-
-                if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
-                    != M4OSA_NULL && xVSS_context->
-                    UTFConversionContext.pTempOutConversionBuffer
-                    != M4OSA_NULL )
-                {
-                    err = M4xVSS_internalConvertToUTF8(xVSS_context,
-                        (M4OSA_Void *)pParams->pFileOut,
-                        (M4OSA_Void *)xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer,
-                        &length);
-
-                    if( err != M4NO_ERROR )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err: 0x%x",
-                            err);
-                        /* Free Send command */
-                        M4xVSS_freeCommand(xVSS_context);
-                        return err;
-                    }
-                    pDecodedPath = xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer;
-                }
-                else
-                {
-                    length = strlen(pDecodedPath);
-                }
-                /**
-                * End of the UTF conversion, use the converted file path*/
-                xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_32bitAlignedMalloc(
-                    (length + 1),
-                    M4VS, (M4OSA_Char *)"xVSS file path of 3gp to 3gp");
-
-                if( xVSS_context->pSettings->pClipList[i]->pFile == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-                memcpy((void *)xVSS_context->pSettings->pClipList[i]->pFile,
-                    (void *)pDecodedPath, (length + 1));
-                /*FB: add file path size because of UTF 16 conversion*/
-                xVSS_context->pSettings->pClipList[i]->filePathSize = length+1;
-
-                /* We define master clip as first 3GP input clip */
-                /*if(xVSS_context->pSettings->uiMasterClip == 0 && fileProperties.
-                AudioStreamType != M4VIDEOEDITING_kNoneAudio)
-                {
-                xVSS_context->pSettings->uiMasterClip = i;
-                }*/
-            }
-            else
-            {
-                /* Update total duration */
-                totalDuration +=
-                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime
-                    - xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
-            }
-            /* We define master clip as first 3GP input clip */
-            if( masterClip == -1
-                && fileProperties.AudioStreamType != M4VIDEOEDITING_kNoneAudio )
-            {
-                masterClip = i;
-                xVSS_context->pSettings->uiMasterClip = i;
-            }
-
-        }
-        /**************************
-        Other input file type case
-        ***************************/
-        else
-        {
-            M4OSA_TRACE1_0("Bad file type as input clip");
-            /*FB: to avoid leaks when there is an error in the send command*/
-            /* Free Send command */
-            M4xVSS_freeCommand(xVSS_context);
-            /**/
-            return M4ERR_PARAMETER;
-        }
-    }
-
-    /*********************************************************
-    * Parse all effects to make some adjustment for framing, *
-    * text and to transform relative time into absolute time *
-    **********************************************************/
-    for ( j = 0; j < xVSS_context->pSettings->nbEffects; j++ )
-    {
-        /* Copy effect to "local" structure */
-        memcpy((void *) &(xVSS_context->pSettings->Effects[j]),
-            (void *) &(pSettings->Effects[j]),
-            sizeof(M4VSS3GPP_EffectSettings));
-
-        /* Prevent from bad initializing of effect percentage time */
-        if( xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent > 100
-            || xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent > 100 )
-        {
-            /* These percentage time have probably not been initialized */
-            /* Let's not use them by setting them to 0 */
-            xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent = 0;
-            xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent = 0;
-        }
-
-        /* If we have percentage information let's use it... Otherwise, we use absolute time. */
-        if( xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent != 0 )
-        {
-            xVSS_context->pSettings->
-                Effects[j].uiStartTime = (M4OSA_UInt32)(totalDuration
-                * xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent
-                / 100);
-            /* The percentage of effect duration is based on the duration of the clip -
-            start time */
-            xVSS_context->pSettings->
-                Effects[j].uiDuration = (M4OSA_UInt32)(totalDuration
-                * xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent
-                / 100);
-        }
-
-        /* If there is a framing effect, we need to allocate framing effect structure */
-        if( xVSS_context->pSettings->Effects[j].VideoEffectType
-            == M4xVSS_kVideoEffectType_Framing )
-        {
-#ifdef DECODE_GIF_ON_SAVING
-
-            M4xVSS_FramingContext *framingCtx;
-            /*UTF conversion support*/
-            M4OSA_Void *pDecodedPath = M4OSA_NULL;
-
-#else
-
-            M4xVSS_FramingStruct *framingCtx;
-
-#endif /*DECODE_GIF_ON_SAVING*/
-
-            M4OSA_Char *pExt2 = M4OSA_NULL;
-            M4VIFI_ImagePlane *pPlane =
-                xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
-            M4OSA_Int32 result1, result2;
-
-            /* Copy framing file path */
-            if( pSettings->Effects[j].xVSS.pFramingFilePath != M4OSA_NULL )
-            {
-                xVSS_context->pSettings->
-                    Effects[j].xVSS.pFramingFilePath = M4OSA_32bitAlignedMalloc(
-                    strlen(pSettings->Effects[j].xVSS.pFramingFilePath)
-                    + 1, M4VS, (M4OSA_Char *)"Local Framing file path");
-
-                if( xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath
-                    == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-                memcpy((void *)xVSS_context->pSettings->
-                    Effects[j].xVSS.pFramingFilePath,
-                    (void *)pSettings->
-                    Effects[j].xVSS.pFramingFilePath, strlen(
-                    pSettings->Effects[j].xVSS.pFramingFilePath) + 1);
-
-                pExt2 =
-                    xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
-            }
-
-#ifdef DECODE_GIF_ON_SAVING
-
-            framingCtx = (M4xVSS_FramingContext
-                *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingContext),
-                M4VS, (M4OSA_Char *)"Context of the framing effect");
-
-            if( framingCtx == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                /*FB: to avoid leaks when there is an error in the send command*/
-                /* Free Send command */
-                M4xVSS_freeCommand(xVSS_context);
-                /**/
-                return M4ERR_ALLOC;
-            }
-            framingCtx->aFramingCtx = M4OSA_NULL;
-            framingCtx->aFramingCtx_last = M4OSA_NULL;
-            framingCtx->pSPSContext = M4OSA_NULL;
-            framingCtx->outputVideoSize =
-                xVSS_context->pSettings->xVSS.outputVideoSize;
-            framingCtx->topleft_x =
-                xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
-            framingCtx->topleft_y =
-                xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
-            framingCtx->bEffectResize =
-                xVSS_context->pSettings->Effects[j].xVSS.bResize;
-            framingCtx->pEffectFilePath =
-                xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
-            framingCtx->pFileReadPtr = xVSS_context->pFileReadPtr;
-            framingCtx->pFileWritePtr = xVSS_context->pFileWritePtr;
-            framingCtx->effectDuration =
-                xVSS_context->pSettings->Effects[j].uiDuration;
-            framingCtx->b_IsFileGif = M4OSA_FALSE;
-            framingCtx->alphaBlendingStruct = M4OSA_NULL;
-            framingCtx->b_animated = M4OSA_FALSE;
-
-            /* Output ratio for the effect is stored in uiFiftiesOutFrameRate parameters of the
-            extended xVSS effects structure */
-            if( xVSS_context->pSettings->Effects[j].xVSS.uiFiftiesOutFrameRate
-                != 0 )
-            {
-                framingCtx->frameDurationRatio =
-                    (M4OSA_Float)(( xVSS_context->pSettings->
-                    Effects[j].xVSS.uiFiftiesOutFrameRate) / 1000.0);
-            }
-            else
-            {
-                framingCtx->frameDurationRatio = 1.0;
-            }
-
-            /*Alpha blending*/
-            /*Check if the alpha blending parameters are corrects*/
-            if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 100 )
-            {
-                pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime = 0;
-            }
-
-            if( pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime > 100 )
-            {
-                pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime = 0;
-            }
-
-            if( pSettings->Effects[j].xVSS.uialphaBlendingEnd > 100 )
-            {
-                pSettings->Effects[j].xVSS.uialphaBlendingEnd = 100;
-            }
-
-            if( pSettings->Effects[j].xVSS.uialphaBlendingMiddle > 100 )
-            {
-                pSettings->Effects[j].xVSS.uialphaBlendingMiddle = 100;
-            }
-
-            if( pSettings->Effects[j].xVSS.uialphaBlendingStart > 100 )
-            {
-                pSettings->Effects[j].xVSS.uialphaBlendingStart = 100;
-            }
-
-            if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 0
-                || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime > 0 )
-            {
-                /*Allocate the alpha blending structure*/
-                framingCtx->alphaBlendingStruct =
-                    (M4xVSS_internalEffectsAlphaBlending *)M4OSA_32bitAlignedMalloc(
-                    sizeof(M4xVSS_internalEffectsAlphaBlending),
-                    M4VS, (M4OSA_Char *)"alpha blending structure");
-
-                if( framingCtx->alphaBlendingStruct == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    M4xVSS_freeCommand(xVSS_context);
-                    return M4ERR_ALLOC;
-                }
-                /*Fill the alpha blending structure*/
-                framingCtx->alphaBlendingStruct->m_fadeInTime =
-                    pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime;
-                framingCtx->alphaBlendingStruct->m_fadeOutTime =
-                    pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime;
-                framingCtx->alphaBlendingStruct->m_end =
-                    pSettings->Effects[j].xVSS.uialphaBlendingEnd;
-                framingCtx->alphaBlendingStruct->m_middle =
-                    pSettings->Effects[j].xVSS.uialphaBlendingMiddle;
-                framingCtx->alphaBlendingStruct->m_start =
-                    pSettings->Effects[j].xVSS.uialphaBlendingStart;
-
-                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime
-                    + pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
-                        > 100 )
-                {
-                    framingCtx->alphaBlendingStruct->m_fadeOutTime =
-                        100 - framingCtx->alphaBlendingStruct->m_fadeInTime;
-                }
-            }
-
-            /**
-            * UTF conversion: convert into the customer format, before being used*/
-            pDecodedPath =
-                xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
-            length = strlen(pDecodedPath);
-
-            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                != M4OSA_NULL && xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer
-                != M4OSA_NULL )
-            {
-                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                    (M4OSA_Void *)xVSS_context->pSettings->
-                    Effects[j].xVSS.pFramingFilePath,
-                    (M4OSA_Void *)xVSS_context->
-                    UTFConversionContext.pTempOutConversionBuffer,
-                    &length);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                        err);
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    return err;
-                }
-                pDecodedPath =
-                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-            }
-
-            /**
-            * End of the UTF conversion, use the converted file path*/
-            framingCtx->pEffectFilePath = M4OSA_32bitAlignedMalloc(length + 1, M4VS,
-                (M4OSA_Char *)"Local Framing file path");
-
-            if( framingCtx->pEffectFilePath == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                /*FB: to avoid leaks when there is an error in the send command*/
-                /* Free Send command */
-                M4xVSS_freeCommand(xVSS_context);
-                /**/
-                return M4ERR_ALLOC;
-            }
-            memcpy((void *)framingCtx->pEffectFilePath,
-                (void *)pDecodedPath, length + 1);
-
-            /* Save framing structure associated with corresponding effect */
-            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                framingCtx;
-
-#else
-
-            framingCtx = (M4xVSS_FramingStruct
-                *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingStruct),
-                M4VS, (M4OSA_Char *)"Context of the framing effect");
-
-            if( framingCtx == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                /*FB: to avoid leaks when there is an error in the send command*/
-                /* Free Send command */
-                M4xVSS_freeCommand(xVSS_context);
-                /**/
-                return M4ERR_ALLOC;
-            }
-
-            framingCtx->topleft_x =
-                xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
-            framingCtx->topleft_y =
-                xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
-
-            /* BugFix 1.2.0: Leak when decoding error */
-            framingCtx->FramingRgb = M4OSA_NULL;
-            framingCtx->FramingYuv = M4OSA_NULL;
-            framingCtx->pNext = framingCtx;
-            /* Save framing structure associated with corresponding effect */
-            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                framingCtx;
-
-#endif /*DECODE_GIF_ON_SAVING*/
-
-            if( pExt2 != M4OSA_NULL )
-            {
-                /* Decode the image associated to the effect, and fill framing structure */
-                pExt2 += (strlen((const char *)pExt2) - 4);
-
-                result1 = strcmp((const char *)pExt2,(const char *)".rgb");
-                result2 = strcmp((const char *)pExt2,(const char *)".RGB");
-
-                if( 0 == result1 || 0 == result2 )
-                {
-#ifdef DECODE_GIF_ON_SAVING
-
-                    framingCtx->aFramingCtx =
-                        (M4xVSS_FramingStruct
-                        *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingStruct),
-                        M4VS,
-                        (M4OSA_Char
-                        *)
-                        "M4xVSS_internalDecodeGIF: Context of the framing effect");
-
-                    if( framingCtx->aFramingCtx == M4OSA_NULL )
-                    {
-                        M4OSA_TRACE1_0(
-                            "Allocation error in M4xVSS_SendCommand");
-                        /* TODO: Translate error code of SPS to an xVSS error code */
-                        M4xVSS_freeCommand(xVSS_context);
-                        return M4ERR_ALLOC;
-                    }
-                    framingCtx->aFramingCtx->pCurrent =
-                        M4OSA_NULL; /* Only used by the first element of the chain */
-                    framingCtx->aFramingCtx->previousClipTime = -1;
-                    framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
-                    framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
-                    framingCtx->aFramingCtx->topleft_x =
-                        xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
-                    framingCtx->aFramingCtx->topleft_y =
-                        xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
-                    /*To support ARGB8888 : get the width and height */
-
-                    framingCtx->aFramingCtx->width =
-                        xVSS_context->pSettings->Effects[j].xVSS.width;
-                    framingCtx->aFramingCtx->height =
-                        xVSS_context->pSettings->Effects[j].xVSS.height;
-                    M4OSA_TRACE1_1("FRAMMING BEFORE M4xVSS_SendCommand  %d",
-                        framingCtx->aFramingCtx->width);
-                    M4OSA_TRACE1_1("FRAMMING BEFORE M4xVSS_SendCommand  %d",
-                        framingCtx->aFramingCtx->height);
-
-#endif
-
-                    err = M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(
-                        xVSS_context,
-                        &(xVSS_context->pSettings->Effects[j]),
-                        framingCtx->aFramingCtx,xVSS_context->pSettings->xVSS.outputVideoSize);
-                    M4OSA_TRACE3_1("FRAMING WIDTH BEFORE M4xVSS_SendCommand  %d",
-                        framingCtx->aFramingCtx->width);
-                    M4OSA_TRACE3_1("FRAMING HEIGHT BEFORE M4xVSS_SendCommand  %d",
-                        framingCtx->aFramingCtx->height);
-
-                    if( err != M4NO_ERROR )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4xVSS_SendCommand: M4xVSS_internalDecodePNG returned 0x%x",
-                            err);
-                        /* TODO: Translate error code of SPS to an xVSS error code */
-                        M4xVSS_freeCommand(xVSS_context);
-                        return err;
-                    }
-                }
-                else
-                {
-                    M4OSA_TRACE1_1(
-                        "M4xVSS_SendCommand: Not supported still picture format 0x%x",
-                        err);
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_PARAMETER;
-                }
-            }
-            else if( pPlane != M4OSA_NULL )
-            {
-#ifdef DECODE_GIF_ON_SAVING
-
-                framingCtx->aFramingCtx = (M4xVSS_FramingStruct
-                    *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingStruct),
-                    M4VS, (M4OSA_Char *)"Context of the framing effect");
-
-                if( framingCtx->aFramingCtx == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-
-                framingCtx->aFramingCtx->topleft_x =
-                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
-                framingCtx->aFramingCtx->topleft_y =
-                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
-
-                /* BugFix 1.2.0: Leak when decoding error */
-                framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
-                framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
-                framingCtx->aFramingCtx->pNext = framingCtx->aFramingCtx;
-                framingCtx->aFramingCtx->pCurrent = framingCtx->aFramingCtx;
-                framingCtx->aFramingCtx->duration = 0;
-                framingCtx->aFramingCtx->previousClipTime = -1;
-                framingCtx->aFramingCtx->FramingRgb =
-                    xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
-                /* Force input RGB buffer to even size to avoid errors in YUV conversion */
-                framingCtx->aFramingCtx->FramingRgb->u_width =
-                    framingCtx->aFramingCtx->FramingRgb->u_width & ~1;
-                framingCtx->aFramingCtx->FramingRgb->u_height =
-                    framingCtx->aFramingCtx->FramingRgb->u_height & ~1;
-                /* Input RGB plane is provided, let's convert it to YUV420, and update framing
-                structure  */
-                err = M4xVSS_internalConvertRGBtoYUV(framingCtx->aFramingCtx);
-
-#else
-
-                framingCtx->FramingRgb =
-                    xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
-                /* Force input RGB buffer to even size to avoid errors in YUV conversion */
-                framingCtx->FramingRgb.u_width =
-                    framingCtx->FramingRgb.u_width & ~1;
-                framingCtx->FramingRgb.u_height =
-                    framingCtx->FramingRgb.u_height & ~1;
-                /* Input RGB plane is provided, let's convert it to YUV420, and update framing
-                 structure  */
-                err = M4xVSS_internalConvertRGBtoYUV(framingCtx);
-
-#endif
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
-                        err);
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return err;
-                }
-            }
-            else
-            {
-                M4OSA_TRACE1_0(
-                    "M4xVSS_sendCommand: No input image/plane provided for framing effect.");
-                /*FB: to avoid leaks when there is an error in the send command*/
-                /* Free Send command */
-                M4xVSS_freeCommand(xVSS_context);
-                /**/
-                return M4ERR_PARAMETER;
-            }
-        }
-        /* CR: Add text handling with external text interface */
-        /* If effect type is text, we call external text function to get RGB 565 buffer */
-        if( xVSS_context->pSettings->Effects[j].VideoEffectType
-            == M4xVSS_kVideoEffectType_Text )
-        {
-            /* Call the font engine function pointer to get RGB565 buffer */
-            /* We transform text effect into framing effect from buffer */
-            if( xVSS_context->pSettings->xVSS.pTextRenderingFct != M4OSA_NULL )
-            {
-                /*FB: add UTF convertion for text buffer*/
-                M4OSA_Void *pDecodedPath = M4OSA_NULL;
-#ifdef DECODE_GIF_ON_SAVING
-
-                M4xVSS_FramingContext *framingCtx;
-
-#else
-
-                M4xVSS_FramingStruct *framingCtx;
-
-#endif /*DECODE_GIF_ON_SAVING*/
-
-#ifdef DECODE_GIF_ON_SAVING
-
-                framingCtx = (M4xVSS_FramingContext
-                    *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingContext),
-                    M4VS, (M4OSA_Char *)"Context of the framing effect");
-
-                if( framingCtx == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-                framingCtx->aFramingCtx = M4OSA_NULL;
-                framingCtx->aFramingCtx_last = M4OSA_NULL;
-                framingCtx->pSPSContext = M4OSA_NULL;
-                framingCtx->outputVideoSize =
-                    xVSS_context->pSettings->xVSS.outputVideoSize;
-                framingCtx->topleft_x =
-                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
-                framingCtx->topleft_y =
-                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
-                framingCtx->bEffectResize =
-                    xVSS_context->pSettings->Effects[j].xVSS.bResize;
-                framingCtx->pEffectFilePath =
-                    xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
-                framingCtx->pFileReadPtr = xVSS_context->pFileReadPtr;
-                framingCtx->pFileWritePtr = xVSS_context->pFileWritePtr;
-                framingCtx->effectDuration =
-                    xVSS_context->pSettings->Effects[j].uiDuration;
-                framingCtx->b_IsFileGif = M4OSA_FALSE;
-                framingCtx->b_animated = M4OSA_FALSE;
-                framingCtx->alphaBlendingStruct = M4OSA_NULL;
-
-                /* Save framing structure associated with corresponding effect */
-                xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                    framingCtx;
-
-                framingCtx->aFramingCtx = (M4xVSS_FramingStruct
-                    *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingStruct),
-                    M4VS, (M4OSA_Char *)"Context of the framing effect");
-
-                if( framingCtx->aFramingCtx == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-
-                framingCtx->aFramingCtx->topleft_x =
-                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
-                framingCtx->aFramingCtx->topleft_y =
-                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
-
-                /* BugFix 1.2.0: Leak when decoding error */
-                framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
-                framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
-                framingCtx->aFramingCtx->pNext = framingCtx->aFramingCtx;
-                framingCtx->aFramingCtx->pCurrent = framingCtx->aFramingCtx;
-                framingCtx->aFramingCtx->duration = 0;
-                framingCtx->aFramingCtx->previousClipTime = -1;
-
-                /*Alpha blending*/
-                /*Check if the alpha blending parameters are corrects*/
-                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 100 )
-                {
-                    pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime = 0;
-                }
-
-                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime > 100 )
-                {
-                    pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime = 0;
-                }
-
-                if( pSettings->Effects[j].xVSS.uialphaBlendingEnd > 100 )
-                {
-                    pSettings->Effects[j].xVSS.uialphaBlendingEnd = 100;
-                }
-
-                if( pSettings->Effects[j].xVSS.uialphaBlendingMiddle > 100 )
-                {
-                    pSettings->Effects[j].xVSS.uialphaBlendingMiddle = 100;
-                }
-
-                if( pSettings->Effects[j].xVSS.uialphaBlendingStart > 100 )
-                {
-                    pSettings->Effects[j].xVSS.uialphaBlendingStart = 100;
-                }
-
-                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 0
-                    || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
-                    > 0 )
-                {
-                    /*Allocate the alpha blending structure*/
-                    framingCtx->alphaBlendingStruct =
-                        (M4xVSS_internalEffectsAlphaBlending *)M4OSA_32bitAlignedMalloc(
-                        sizeof(M4xVSS_internalEffectsAlphaBlending),
-                        M4VS, (M4OSA_Char *)"alpha blending structure");
-
-                    if( framingCtx->alphaBlendingStruct == M4OSA_NULL )
-                    {
-                        M4OSA_TRACE1_0(
-                            "Allocation error in M4xVSS_SendCommand");
-                        M4xVSS_freeCommand(xVSS_context);
-                        return M4ERR_ALLOC;
-                    }
-                    /*Fill the alpha blending structure*/
-                    framingCtx->alphaBlendingStruct->m_fadeInTime =
-                        pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime;
-                    framingCtx->alphaBlendingStruct->m_fadeOutTime =
-                        pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime;
-                    framingCtx->alphaBlendingStruct->m_end =
-                        pSettings->Effects[j].xVSS.uialphaBlendingEnd;
-                    framingCtx->alphaBlendingStruct->m_middle =
-                        pSettings->Effects[j].xVSS.uialphaBlendingMiddle;
-                    framingCtx->alphaBlendingStruct->m_start =
-                        pSettings->Effects[j].xVSS.uialphaBlendingStart;
-
-                    if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime
-                        + pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
-                            > 100 )
-                    {
-                        framingCtx->alphaBlendingStruct->m_fadeOutTime =
-                            100 - framingCtx->alphaBlendingStruct->m_fadeInTime;
-                    }
-                }
-#else
-
-                framingCtx = (M4xVSS_FramingStruct
-                    *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingStruct),
-                    M4VS, (M4OSA_Char
-                    *)"Context of the framing effect (for text)");
-
-                if( framingCtx == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-
-                framingCtx->topleft_x =
-                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
-                framingCtx->topleft_y =
-                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
-                framingCtx->FramingRgb = M4OSA_NULL;
-
-                /* BugFix 1.2.0: Leak when decoding error */
-                framingCtx->FramingYuv = M4OSA_NULL;
-                framingCtx->pNext = framingCtx;
-
-#endif
-                /* Save framing structure associated with corresponding effect */
-
-                xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                    framingCtx;
-
-                /* FB: changes for Video Artist: memcopy pTextBuffer so that it can be changed
-                after a complete analysis*/
-                if( pSettings->Effects[j].xVSS.pTextBuffer == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("M4xVSS_SendCommand: pTextBuffer is null");
-                    M4xVSS_freeCommand(xVSS_context);
-                    return M4ERR_PARAMETER;
-                }
-
-                /*Convert text buffer into customer format before being used*/
-                /**
-                * UTF conversion: convert into the customer format, before being used*/
-                pDecodedPath = pSettings->Effects[j].xVSS.pTextBuffer;
-                xVSS_context->pSettings->Effects[j].xVSS.textBufferSize =
-                    pSettings->Effects[j].xVSS.textBufferSize;
-
-                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                    != M4OSA_NULL && xVSS_context->
-                    UTFConversionContext.pTempOutConversionBuffer
-                    != M4OSA_NULL )
-                {
-                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                        (M4OSA_Void *)pSettings->
-                        Effects[j].xVSS.pTextBuffer,
-                        (M4OSA_Void *)xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer,
-                        &length);
-
-                    if( err != M4NO_ERROR )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                            err);
-                        /* Free Send command */
-                        M4xVSS_freeCommand(xVSS_context);
-                        return err;
-                    }
-                    pDecodedPath = xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer;
-                    xVSS_context->pSettings->Effects[j].xVSS.textBufferSize =
-                        length;
-                }
-                /**
-                * End of the UTF conversion, use the converted file path*/
-
-                xVSS_context->pSettings->
-                    Effects[j].xVSS.pTextBuffer = M4OSA_32bitAlignedMalloc(
-                    xVSS_context->pSettings->Effects[j].xVSS.textBufferSize + 1,
-                    M4VS, (M4OSA_Char *)"Local text buffer effect");
-
-                //xVSS_context->pSettings->Effects[j].xVSS.pTextBuffer =
-                // M4OSA_32bitAlignedMalloc(strlen(pSettings->Effects[j].xVSS.pTextBuffer)+1,
-                // M4VS, "Local text buffer effect");
-                if( xVSS_context->pSettings->Effects[j].xVSS.pTextBuffer
-                    == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-
-                if( pSettings->Effects[j].xVSS.pTextBuffer != M4OSA_NULL )
-                {
-                    //memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->Effects[j]
-                    //.xVSS.pTextBuffer, (M4OSA_MemAddr8)pSettings->Effects[j].xVSS.pTextBuffer,
-                    // strlen(pSettings->Effects[j].xVSS.pTextBuffer)+1);
-                    memcpy((void *)xVSS_context->pSettings->
-                        Effects[j].xVSS.pTextBuffer,
-                        (void *)pDecodedPath, xVSS_context->pSettings->
-                        Effects[j].xVSS.textBufferSize + 1);
-                }
-
-                /*Allocate the text RGB buffer*/
-                framingCtx->aFramingCtx->FramingRgb =
-                    (M4VIFI_ImagePlane *)M4OSA_32bitAlignedMalloc(sizeof(M4VIFI_ImagePlane),
-                    M4VS,
-                    (M4OSA_Char *)"RGB structure for the text effect");
-
-                if( framingCtx->aFramingCtx->FramingRgb == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-
-                if( xVSS_context->pSettings->Effects[j].xVSS.uiTextBufferWidth
-                    == 0 || xVSS_context->pSettings->
-                    Effects[j].xVSS.uiTextBufferHeight == 0 )
-                {
-                    M4OSA_TRACE1_0(
-                        "M4xVSS_SendCommand: text plane width and height are not defined");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_PARAMETER;
-                }
-                /* Allocate input RGB text buffer and force it to even size to avoid errors in
-                 YUV conversion */
-                framingCtx->aFramingCtx->FramingRgb->u_width =
-                    xVSS_context->pSettings->
-                    Effects[j].xVSS.uiTextBufferWidth & ~1;
-                framingCtx->aFramingCtx->FramingRgb->u_height =
-                    xVSS_context->pSettings->
-                    Effects[j].xVSS.uiTextBufferHeight & ~1;
-                framingCtx->aFramingCtx->FramingRgb->u_stride =
-                    2 * framingCtx->aFramingCtx->FramingRgb->u_width;
-                framingCtx->aFramingCtx->FramingRgb->u_topleft = 0;
-                framingCtx->aFramingCtx->FramingRgb->pac_data =
-                    (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(
-                    framingCtx->aFramingCtx->FramingRgb->u_height
-                    * framingCtx->aFramingCtx->FramingRgb->u_stride,
-                    M4VS, (M4OSA_Char *)"Text RGB plane->pac_data");
-
-                if( framingCtx->aFramingCtx->FramingRgb->pac_data
-                    == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                    /*FB: to avoid leaks when there is an error in the send command*/
-                    /* Free Send command */
-                    M4xVSS_freeCommand(xVSS_context);
-                    /**/
-                    return M4ERR_ALLOC;
-                }
-
-#ifdef DECODE_GIF_ON_SAVING
-                /**/
-                /* Call text rendering function */
-
-                err = xVSS_context->pSettings->xVSS.pTextRenderingFct(
-                    xVSS_context->pSettings->Effects[j].xVSS.pRenderingData,
-                    xVSS_context->pSettings->
-                    Effects[j].xVSS.pTextBuffer,
-                    xVSS_context->pSettings->
-                    Effects[j].xVSS.textBufferSize,
-                    &(framingCtx->aFramingCtx->FramingRgb));
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_0("Text rendering external function failed\n");
-                    M4xVSS_freeCommand(xVSS_context);
-                    return err;
-                }
-
-                /* Check that RGB buffer is set */
-                if( framingCtx->aFramingCtx->FramingRgb == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0(
-                        "Text rendering function did not set RGB buffer correctly !");
-                    M4xVSS_freeCommand(xVSS_context);
-                    return M4ERR_PARAMETER;
-                }
-
-                /* Convert RGB plane to YUV420 and update framing structure */
-                err = M4xVSS_internalConvertRGBtoYUV(framingCtx->aFramingCtx);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
-                        err);
-                    M4xVSS_freeCommand(xVSS_context);
-                    return err;
-                }
-
-#else
-                /**/
-                /* Call text rendering function */
-
-                err = xVSS_context->pSettings->xVSS.pTextRenderingFct(
-                    xVSS_context->pSettings->Effects[j].xVSS.pRenderingData,
-                    xVSS_context->pSettings->
-                    Effects[j].xVSS.pTextBuffer,
-                    xVSS_context->pSettings->
-                    Effects[j].xVSS.textBufferSize,
-                    &(framingCtx->FramingRgb));
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_0("Text rendering external function failed\n");
-                    M4xVSS_freeCommand(xVSS_context);
-                    return err;
-                }
-
-                /* Check that RGB buffer is set */
-                if( framingCtx->FramingRgb == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0(
-                        "Text rendering function did not set RGB buffer correctly !");
-                    M4xVSS_freeCommand(xVSS_context);
-                    return M4ERR_PARAMETER;
-                }
-
-                /* Convert RGB plane to YUV420 and update framing structure */
-                err = M4xVSS_internalConvertRGBtoYUV(framingCtx);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
-                        err);
-                    M4xVSS_freeCommand(xVSS_context);
-                    return err;
-                }
-
-#endif /*DECODE_GIF_ON_SAVING*/
-
-                /* Change internally effect type from "text" to framing */
-
-                xVSS_context->pSettings->Effects[j].VideoEffectType =
-                    M4xVSS_kVideoEffectType_Framing;
-                xVSS_context->pSettings->Effects[j].xVSS.bResize = M4OSA_FALSE;
-            }
-            else
-            {
-                M4OSA_TRACE1_0(
-                    "M4xVSS_sendCommand: No text rendering function set !!");
-                M4xVSS_freeCommand(xVSS_context);
-                return M4ERR_PARAMETER;
-            }
-        }
-
-        /* Allocate the structure to store the data needed by the Fifties effect */
-        else if( xVSS_context->pSettings->Effects[j].VideoEffectType
-            == M4xVSS_kVideoEffectType_Fifties )
-        {
-            M4xVSS_FiftiesStruct *fiftiesCtx;
-
-            /* Check the expected frame rate for the fifties effect (must be above 0) */
-            if( 0 == xVSS_context->pSettings->
-                Effects[j].xVSS.uiFiftiesOutFrameRate )
-            {
-                M4OSA_TRACE1_0(
-                    "The frame rate for the fifties effect must be greater than 0 !");
-                M4xVSS_freeCommand(xVSS_context);
-                return M4ERR_PARAMETER;
-            }
-
-            fiftiesCtx = (M4xVSS_FiftiesStruct
-                *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FiftiesStruct),
-                M4VS, (M4OSA_Char *)"Context of the fifties effect");
-
-            if( fiftiesCtx == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                /* Free Send command */
-                M4xVSS_freeCommand(xVSS_context);
-                return M4ERR_ALLOC;
-            }
-
-            fiftiesCtx->previousClipTime = -1;
-            fiftiesCtx->fiftiesEffectDuration = 1000 / xVSS_context->pSettings->
-                Effects[j].xVSS.uiFiftiesOutFrameRate;
-            fiftiesCtx->shiftRandomValue = 0;
-            fiftiesCtx->stripeRandomValue = 0;
-
-            /* Save the structure associated with corresponding effect */
-            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                fiftiesCtx;
-        }
-
-        /* Allocate the structure to store the data needed by the Color effect */
-        else if( xVSS_context->pSettings->Effects[j].VideoEffectType
-            == M4xVSS_kVideoEffectType_ColorRGB16
-            || xVSS_context->pSettings->Effects[j].VideoEffectType
-            == M4xVSS_kVideoEffectType_BlackAndWhite
-            || xVSS_context->pSettings->Effects[j].VideoEffectType
-            == M4xVSS_kVideoEffectType_Pink
-            || xVSS_context->pSettings->Effects[j].VideoEffectType
-            == M4xVSS_kVideoEffectType_Green
-            || xVSS_context->pSettings->Effects[j].VideoEffectType
-            == M4xVSS_kVideoEffectType_Sepia
-            || xVSS_context->pSettings->Effects[j].VideoEffectType
-            == M4xVSS_kVideoEffectType_Negative
-            || xVSS_context->pSettings->Effects[j].VideoEffectType
-            == M4xVSS_kVideoEffectType_Gradient )
-        {
-            M4xVSS_ColorStruct *ColorCtx;
-
-            ColorCtx =
-                (M4xVSS_ColorStruct *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_ColorStruct),
-                M4VS, (M4OSA_Char *)"Context of the color effect");
-
-            if( ColorCtx == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-                /* Free Send command */
-                M4xVSS_freeCommand(xVSS_context);
-                return M4ERR_ALLOC;
-            }
-
-            ColorCtx->colorEffectType =
-                xVSS_context->pSettings->Effects[j].VideoEffectType;
-
-            if( xVSS_context->pSettings->Effects[j].VideoEffectType
-                == M4xVSS_kVideoEffectType_ColorRGB16
-                || xVSS_context->pSettings->Effects[j].VideoEffectType
-                == M4xVSS_kVideoEffectType_Gradient )
-            {
-                ColorCtx->rgb16ColorData =
-                    xVSS_context->pSettings->Effects[j].xVSS.uiRgb16InputColor;
-            }
-            else
-            {
-                ColorCtx->rgb16ColorData = 0;
-            }
-
-            /* Save the structure associated with corresponding effect */
-            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                ColorCtx;
-        }
-    }
-
-    /**********************************
-    Background music registering
-    **********************************/
-    if( pSettings->xVSS.pBGMtrack != M4OSA_NULL && isNewBGM == M4OSA_TRUE )
-    {
-#ifdef PREVIEW_ENABLED
-
-        M4xVSS_MCS_params *pParams;
-        M4OSA_Char *out_pcm;
-        /*UTF conversion support*/
-        M4OSA_Void *pDecodedPath = M4OSA_NULL;
-
-#endif
-
-        /* We save output file pointer, because we will need to use it when saving audio mixed
-         file (last save step) */
-
-        xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
-        xVSS_context->pTemporaryFile = xVSS_context->pSettings->pTemporaryFile;
-
-        /* If a previous BGM has already been registered, delete it */
-        /* Here can be implemented test to know if the same BGM is registered */
-        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-        {
-            if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL )
-            {
-                free(xVSS_context->pSettings->xVSS.pBGMtrack->
-                    pFile);
-                xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
-            }
-            free(xVSS_context->pSettings->xVSS.pBGMtrack);
-            xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
-        }
-
-        /* Allocate BGM */
-        xVSS_context->pSettings->xVSS.pBGMtrack =
-            (M4xVSS_BGMSettings *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_BGMSettings), M4VS,
-            (M4OSA_Char *)"xVSS_context->pSettings->xVSS.pBGMtrack");
-
-        if( xVSS_context->pSettings->xVSS.pBGMtrack == M4OSA_NULL )
-        {
-            M4xVSS_freeCommand(xVSS_context);
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-            return M4ERR_ALLOC;
-        }
-
-        /* Copy input structure to our structure */
-        memcpy((void *)xVSS_context->pSettings->xVSS.pBGMtrack,
-            (void *)pSettings->xVSS.pBGMtrack,
-            sizeof(M4xVSS_BGMSettings));
-        /* Allocate file name, and copy file name buffer to our structure */
-        xVSS_context->pSettings->xVSS.pBGMtrack->pFile =
-            M4OSA_32bitAlignedMalloc((strlen(pSettings->xVSS.pBGMtrack->pFile)
-            + 1), M4VS, (M4OSA_Char *)"xVSS BGM file path");
-
-        if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
-        {
-            M4xVSS_freeCommand(xVSS_context);
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-            return M4ERR_ALLOC;
-        }
-        memcpy((void *)xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
-            (void *)pSettings->xVSS.pBGMtrack->pFile,
-            strlen(pSettings->xVSS.pBGMtrack->pFile) + 1);
-
-#ifdef PREVIEW_ENABLED
-        /* Decode BGM track to pcm output file */
-
-        pParams =
-            (M4xVSS_MCS_params *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_MCS_params), M4VS,
-            (M4OSA_Char *)"Element of MCS Params (for BGM)");
-
-        if( pParams == M4OSA_NULL )
-        {
-            M4xVSS_freeCommand(xVSS_context);
-            M4OSA_TRACE1_0(
-                "M4xVSS_sendCommand: Problem when allocating one element MCS Params");
-            return M4ERR_ALLOC;
-        }
-
-        /* Initialize the pointers in case of problem (PR 2273) */
-        pParams->pFileIn = M4OSA_NULL;
-        pParams->pFileOut = M4OSA_NULL;
-        pParams->pFileTemp = M4OSA_NULL;
-        pParams->pNext = M4OSA_NULL;
-        pParams->BeginCutTime = 0;
-        pParams->EndCutTime = 0;
-
-        if( xVSS_context->pMCSparamsList
-            == M4OSA_NULL ) /* Means it is the first element of the list */
-        {
-            /* Initialize the xVSS context with the first element of the list */
-            xVSS_context->pMCSparamsList = pParams;
-
-        }
-        else
-        {
-            M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
-            M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
-
-            /* Parse MCS params chained list to find and delete BGM element */
-            while( pParams_temp != M4OSA_NULL )
-            {
-                if( pParams_temp->isBGM == M4OSA_TRUE )
-                {
-                    /* Remove this element */
-                    if( pParams_temp->pFileIn != M4OSA_NULL )
-                    {
-                        free(pParams_temp->pFileIn);
-                        pParams_temp->pFileIn = M4OSA_NULL;
-                    }
-
-                    if( pParams_temp->pFileOut != M4OSA_NULL )
-                    {
-                        /* Remove PCM temporary file */
-                        remove((const char *)pParams_temp->pFileOut);
-                        free(pParams_temp->pFileOut);
-                        pParams_temp->pFileOut = M4OSA_NULL;
-                    }
-                    /* Chain previous element with next element = remove BGM chained
-                         list element */
-                    if( pParams_prev != M4OSA_NULL )
-                    {
-                        pParams_prev->pNext = pParams_temp->pNext;
-                    }
-                    /* If current pointer is the first of the chained list and next pointer of
-                    the chained list is NULL */
-                    /* it means that there was only one element in the list */
-                    /* => we put the context variable to NULL to reaffect the first chained list
-                     element */
-                    if( pParams_temp == xVSS_context->pMCSparamsList
-                        && pParams_temp->pNext == M4OSA_NULL )
-                    {
-                        xVSS_context->pMCSparamsList = M4OSA_NULL;
-                    }
-                    /* In that case, BGM pointer is the first one, but there are others elements
-                     after it */
-                    /* So, we need to change first chained list element */
-                    else if( pParams_temp->pNext != M4OSA_NULL
-                        && pParams_prev == M4OSA_NULL )
-                    {
-                        xVSS_context->pMCSparamsList = pParams_temp->pNext;
-                    }
-
-                    if( pParams_temp->pNext != M4OSA_NULL )
-                    {
-                        pParams_prev = pParams_temp->pNext;
-                        free(pParams_temp);
-                        pParams_temp = M4OSA_NULL;
-                        pParams_temp = pParams_prev;
-                    }
-                    else
-                    {
-                        free(pParams_temp);
-                        pParams_temp = M4OSA_NULL;
-                    }
-                }
-                else
-                {
-                    pParams_prev = pParams_temp;
-                    pParams_temp = pParams_temp->pNext;
-                }
-            }
-            /* We need to initialize the last element of the chained list to be able to add new
-             BGM element */
-            pMCS_last = pParams_prev;
-
-            if( xVSS_context->pMCSparamsList == M4OSA_NULL )
-            {
-                /* In that case, it means that there was only one element in the chained list */
-                /* So, we need to save the new params*/
-                xVSS_context->pMCSparamsList = pParams;
-            }
-            else
-            {
-                /* Update next pointer of the previous last element of the chain */
-                pMCS_last->pNext = pParams;
-            }
-
-        }
-
-        /* Fill the last M4xVSS_MCS_params element */
-        pParams->InputFileType =
-            xVSS_context->pSettings->xVSS.pBGMtrack->FileType;
-        pParams->OutputFileType = M4VIDEOEDITING_kFileType_PCM;
-        pParams->OutputVideoFormat = M4VIDEOEDITING_kNoneVideo;
-        pParams->OutputVideoFrameSize = M4VIDEOEDITING_kQCIF;
-        pParams->OutputVideoFrameRate = M4VIDEOEDITING_k15_FPS;
-
-        if( xVSS_context->pSettings->xVSS.outputAudioFormat
-            == M4VIDEOEDITING_kAAC )
-        {
-            pParams->OutputAudioFormat = M4VIDEOEDITING_kAAC;
-            pParams->OutputAudioSamplingFrequency = M4VIDEOEDITING_kDefault_ASF;
-
-            /*FB: VAL CR P4ME00003076
-            The output audio bitrate in the AAC case is now directly given by the user*/
-            /*Check if the audio bitrate is correctly defined*/
-            /*Mono
-            MCS values for AAC Mono are min: 16kbps and max: 192 kbps*/
-            if( xVSS_context->pSettings->xVSS.outputAudioBitrate
-                >= M4VIDEOEDITING_k16_KBPS
-                && xVSS_context->pSettings->xVSS.outputAudioBitrate
-                <= M4VIDEOEDITING_k192_KBPS
-                && xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_TRUE )
-            {
-                pParams->OutputAudioBitrate =
-                    xVSS_context->pSettings->xVSS.outputAudioBitrate;
-            }
-            /*Stereo
-            MCS values for AAC Mono are min: 32kbps and max: 192 kbps*/
-            else if( xVSS_context->pSettings->xVSS.outputAudioBitrate
-                >= M4VIDEOEDITING_k32_KBPS
-                && xVSS_context->pSettings->xVSS.outputAudioBitrate
-                <= M4VIDEOEDITING_k192_KBPS
-                && xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_FALSE )
-            {
-                pParams->OutputAudioBitrate =
-                    xVSS_context->pSettings->xVSS.outputAudioBitrate;
-            }
-            else
-            {
-                pParams->OutputAudioBitrate = M4VIDEOEDITING_k32_KBPS;
-            }
-            pParams->bAudioMono = xVSS_context->pSettings->xVSS.bAudioMono;
-        }
-        else
-        {
-            pParams->OutputAudioFormat = M4VIDEOEDITING_kAMR_NB;
-            pParams->OutputAudioSamplingFrequency = M4VIDEOEDITING_kDefault_ASF;
-            pParams->OutputAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
-            pParams->bAudioMono = M4OSA_TRUE;
-        }
-        pParams->OutputVideoBitrate = M4VIDEOEDITING_kUndefinedBitrate;
-
-        /* Prepare output filename */
-        /* 21 is the size of "preview_16000_2.pcm" + \0 */
-        out_pcm =
-            (M4OSA_Char *)M4OSA_32bitAlignedMalloc(strlen(xVSS_context->pTempPath)
-            + 21, M4VS, (M4OSA_Char *)"Temp char* for pcmPreviewFile");
-
-        if( out_pcm == M4OSA_NULL )
-        {
-            M4xVSS_freeCommand(xVSS_context);
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
-            return M4ERR_ALLOC;
-        }
-
-        /* Copy temporary path to final preview path string */
-        M4OSA_chrNCopy(out_pcm, xVSS_context->pTempPath,
-            strlen(xVSS_context->pTempPath) + 1);
-
-        /* Depending of the output sample frequency and nb of channels, we construct preview
-        output filename */
-        if( xVSS_context->pSettings->xVSS.outputAudioFormat
-            == M4VIDEOEDITING_kAAC )
-        {
-            /* Construct output temporary PCM filename */
-            if( xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_TRUE )
-            {
-                strncat((char *)out_pcm, (const char *)"preview_16000_1.pcm\0",
-                    20);
-            }
-            else
-            {
-                strncat((char *)out_pcm, (const char *)"preview_16000_2.pcm\0",
-                    20);
-            }
-        }
-        else if( xVSS_context->pSettings->xVSS.outputAudioFormat
-            == M4VIDEOEDITING_kAMR_NB )
-        {
-            /* Construct output temporary PCM filename */
-            strncat((char *)out_pcm, (const char *)"preview_08000_1.pcm\0", 20);
-        }
-        else
-        {
-            if( out_pcm != M4OSA_NULL )
-            {
-                free(out_pcm);
-                out_pcm = M4OSA_NULL;
-            }
-            M4xVSS_freeCommand(xVSS_context);
-            M4OSA_TRACE1_0("Bad audio output format \n");
-            return M4ERR_PARAMETER;
-        }
-
-        xVSS_context->pcmPreviewFile = out_pcm;
-
-        /**
-        * UTF conversion: convert into the customer format, before being used*/
-        pDecodedPath = out_pcm;
-        length = strlen(pDecodedPath);
-
-        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
-            != M4OSA_NULL )
-        {
-            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                (M4OSA_Void *)out_pcm, (M4OSA_Void *)xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer, &length);
-
-            if( err != M4NO_ERROR )
-            {
-                M4OSA_TRACE1_1(
-                    "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                    err);
-                /* Free Send command */
-                M4xVSS_freeCommand(xVSS_context);
-                return err;
-            }
-            pDecodedPath =
-                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-        }
-
-        /**
-        * End of the UTF conversion, use the converted file path*/
-        xVSS_context->pcmPreviewFile =
-            (M4OSA_Void *)M4OSA_32bitAlignedMalloc(length + 1, M4VS,
-            (M4OSA_Char *)"pcmPreviewFile");
-
-        if( xVSS_context->pcmPreviewFile == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-            free(out_pcm);
-            out_pcm = M4OSA_NULL;
-            /*FB: to avoid leaks when there is an error in the send command*/
-            /* Free Send command */
-            M4xVSS_freeCommand(xVSS_context);
-            /**/
-            return M4ERR_ALLOC;
-        }
-        memcpy((void *)xVSS_context->pcmPreviewFile, (void *)pDecodedPath, length + 1);
-
-        /* Free temporary output filename */
-        if( out_pcm != M4OSA_NULL )
-        {
-            free(out_pcm);
-            out_pcm = M4OSA_NULL;
-        }
-
-        pParams->pFileOut = M4OSA_32bitAlignedMalloc((length + 1), M4VS,
-            (M4OSA_Char *)"MCS BGM Params: file out");
-
-        if( pParams->pFileOut == M4OSA_NULL )
-        {
-            M4xVSS_freeCommand(xVSS_context);
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-            return M4ERR_ALLOC;
-        }
-        pParams->pFileTemp = M4OSA_NULL;
-
-        memcpy((void *)pParams->pFileOut,(void *) xVSS_context->pcmPreviewFile,
-            (length + 1)); /* Copy output file path */
-
-        /**
-        * UTF conversion: convert into the customer format, before being used*/
-
-        pDecodedPath = xVSS_context->pSettings->xVSS.pBGMtrack->pFile;
-        length = strlen(pDecodedPath);
-
-        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
-            != M4OSA_NULL )
-        {
-            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                (M4OSA_Void *)xVSS_context->pSettings->xVSS.pBGMtrack->
-                pFile, (M4OSA_Void *)xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer, &length);
-
-            if( err != M4NO_ERROR )
-            {
-                M4OSA_TRACE1_1(
-                    "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                    err);
-                /* Free Send command */
-                M4xVSS_freeCommand(xVSS_context);
-                return err;
-            }
-            pDecodedPath =
-                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-        }
-
-        /**
-        * End of the UTF conversion, use the converted file path*/
-        pParams->pFileIn = (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
-            (M4OSA_Char *)"MCS BGM Params: file in");
-
-        if( pParams->pFileIn == M4OSA_NULL )
-        {
-            M4xVSS_freeCommand(xVSS_context);
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
-            return M4ERR_ALLOC;
-        }
-        memcpy((void *)pParams->pFileIn, (void *)pDecodedPath,
-            (length + 1)); /* Copy input file path */
-
-        pParams->isBGM = M4OSA_TRUE;
-        pParams->isCreated = M4OSA_FALSE;
-        xVSS_context->nbStepTotal++;
-        bIsTranscoding = M4OSA_TRUE;
-#endif /* PREVIEW_ENABLED */
-
-    }
-    else if( pSettings->xVSS.pBGMtrack != M4OSA_NULL
-        && isNewBGM == M4OSA_FALSE )
-    {
-#ifdef PREVIEW_ENABLED
-        /* BGM is the same as previously, no need to redecode audio */
-        /* Need to update MCS params chained list, to signal M4xVSS_step function to skip
-        BGM decoding */
-
-        M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
-        M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
-
-#endif /* PREVIEW_ENABLED */
-        /* We save output file pointer, because we will need to use it when saving audio
-         mixed file (last save step) */
-
-        xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
-        xVSS_context->pTemporaryFile = xVSS_context->pSettings->pTemporaryFile;
-
-        /* Re-write BGM settings in case they have changed between two sendCommand */
-        xVSS_context->pSettings->xVSS.pBGMtrack->uiAddCts =
-            pSettings->xVSS.pBGMtrack->uiAddCts;
-        xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume =
-            pSettings->xVSS.pBGMtrack->uiAddVolume;
-        xVSS_context->pSettings->xVSS.pBGMtrack->uiBeginLoop =
-            pSettings->xVSS.pBGMtrack->uiBeginLoop;
-        xVSS_context->pSettings->xVSS.pBGMtrack->uiEndLoop =
-            pSettings->xVSS.pBGMtrack->uiEndLoop;
-
-#ifdef PREVIEW_ENABLED
-        /* Parse MCS params chained list to find and delete BGM element */
-
-        while( pParams_temp != M4OSA_NULL )
-        {
-            if( pParams_temp->isBGM == M4OSA_TRUE )
-            {
-                pParams_temp->isCreated = M4OSA_TRUE;
-                break;
-            }
-            pParams_prev = pParams_temp;
-            pParams_temp = pParams_temp->pNext;
-        }
-
-#endif /* PREVIEW_ENABLED */
-
-        M4OSA_TRACE2_0("M4xVSS_SendCommand has been recalled, BGM is the same");
-    }
-    else
-    {
-        M4OSA_TRACE1_0("No BGM in this xVSS command");
-
-        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-        {
-#ifdef PREVIEW_ENABLED
-            /* Need to remove MCS previous params chained list */
-
-            M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
-            M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
-
-            /* Parse MCS params chained list to find and delete BGM element */
-            while( pParams_temp != M4OSA_NULL )
-            {
-                if( pParams_temp->isBGM == M4OSA_TRUE )
-                {
-                    /* Remove this element */
-                    if( pParams_temp->pFileIn != M4OSA_NULL )
-                    {
-                        free(pParams_temp->pFileIn);
-                        pParams_temp->pFileIn = M4OSA_NULL;
-                    }
-
-                    if( pParams_temp->pFileOut != M4OSA_NULL )
-                    {
-                        free(pParams_temp->pFileOut);
-                        pParams_temp->pFileOut = M4OSA_NULL;
-                    }
-                    /* Chain previous element with next element */
-                    if( pParams_prev != M4OSA_NULL )
-                    {
-                        pParams_prev->pNext = pParams_temp->pNext;
-                    }
-                    /* If current pointer is the first of the chained list and next pointer
-                     of the chained list is NULL */
-                    /* it means that there was only one element in the list */
-                    /* => we put the context variable to NULL */
-                    if( pParams_temp == xVSS_context->pMCSparamsList
-                        && pParams_temp->pNext == M4OSA_NULL )
-                    {
-                        free(pParams_temp);
-                        xVSS_context->pMCSparamsList = M4OSA_NULL;
-                    }
-                    /* In that case, BGM pointer is the first one, but there are others
-                     elements after it */
-                    /* So, we need to change first chained list element */
-                    else if( pParams_temp->pNext != M4OSA_NULL )
-                    {
-                        xVSS_context->pMCSparamsList = pParams_temp->pNext;
-                        free(pParams_temp);
-                        pParams_temp = M4OSA_NULL;
-                    }
-                    /* In all other cases, nothing else to do except freeing the chained
-                    list element */
-                    else
-                    {
-                        free(pParams_temp);
-                        pParams_temp = M4OSA_NULL;
-                    }
-                    break;
-                }
-                pParams_prev = pParams_temp;
-                pParams_temp = pParams_temp->pNext;
-            }
-
-#endif /* PREVIEW_ENABLED */
-            /* Here, we unallocate all BGM components and put xVSS_context->pSettings->
-            xVSS.pBGMtrack to NULL */
-
-            if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-            {
-                if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile
-                    != M4OSA_NULL )
-                {
-                    free(xVSS_context->pSettings->xVSS.pBGMtrack->pFile);
-                    xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
-                }
-                free(xVSS_context->pSettings->xVSS.pBGMtrack);
-                xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
-            }
-        }
-    }
-
-    /* Changed to be able to mix with video only files -> in case no master clip is found
-    (i.e only JPG input or video only input) */
-    /* and if there is a BGM, we force the added volume to 100 (i.e replace audio) */
-
-    if( masterClip == -1
-        && xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-    {
-        /* In that case, it means that no input 3GP file has a video track.
-        Therefore, if a mixing is asked, it will fail. Thus, we force replace audio. */
-        xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume = 100;
-    }
-
-    /* Save clip number to know if a M4xVSS_sendCommand has already been called */
-    xVSS_context->previousClipNumber = xVSS_context->pSettings->uiClipNumber;
-
-    /* Change state */
-    xVSS_context->m_state = M4xVSS_kStateAnalyzing;
-
-    /* In case of MMS use case, we compute here the max video bitrate */
-    /* In case of too low bitrate, a specific warning is returned */
-    if( xVSS_context->pSettings->xVSS.outputFileSize != 0 && totalDuration > 0 )
-    {
-        M4OSA_UInt32 targetedBitrate = 0;
-        M4VIDEOEDITING_ClipProperties fileProperties;
-        M4OSA_Double ratio;
-
-        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-        {
-            if( xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume
-                == 100 ) /* We are in "replace audio mode, need to check the filetype */
-            {
-                if( xVSS_context->pSettings->xVSS.pBGMtrack->FileType
-                    == M4VIDEOEDITING_kFileType_3GPP )
-                {
-                    M4OSA_Void *pDecodedPath;
-                    /**
-                    * UTF conversion: convert into the customer format, before being used*/
-                    pDecodedPath =
-                        xVSS_context->pSettings->xVSS.pBGMtrack->pFile;
-                    length = strlen(pDecodedPath);
-
-                    if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                        != M4OSA_NULL && xVSS_context->
-                        UTFConversionContext.pTempOutConversionBuffer
-                        != M4OSA_NULL )
-                    {
-                        err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                            (M4OSA_Void *)xVSS_context->pSettings->
-                            xVSS.pBGMtrack->pFile,
-                            (M4OSA_Void *)xVSS_context->
-                            UTFConversionContext.
-                            pTempOutConversionBuffer, &length);
-
-                        if( err != M4NO_ERROR )
-                        {
-                            M4OSA_TRACE1_1("M4xVSS_SendCommand: \
-                                M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                                err);
-                            /* Free Send command */
-                            M4xVSS_freeCommand(xVSS_context);
-                            return err;
-                        }
-                        pDecodedPath = xVSS_context->
-                            UTFConversionContext.pTempOutConversionBuffer;
-                    }
-
-                    /**
-                    * End of the UTF conversion, use the converted file path*/
-                    err =
-                        M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
-                        &fileProperties);
-
-                    /* Get the properties of the BGM track */
-                    /*err = M4xVSS_internalGetProperties(xVSS_context, xVSS_context->pSettings->
-                    xVSS.pBGMtrack->pFile, &fileProperties);*/
-                    if( err != M4NO_ERROR )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned an error:\
-                             0x%x", err);
-                        return err;
-                    }
-
-                    if( fileProperties.AudioStreamType
-                        != M4VIDEOEDITING_kAMR_NB )
-                    {
-                        M4OSA_TRACE1_0(
-                            "M4xVSS_sendCommand: Impossible to use MMS mode with BGM != AMR-NB");
-                        return M4ERR_PARAMETER;
-                    }
-                }
-                else if( xVSS_context->pSettings->xVSS.pBGMtrack->FileType
-                    != M4VIDEOEDITING_kFileType_AMR
-                    && xVSS_context->pSettings->xVSS.pBGMtrack->FileType
-                    != M4VIDEOEDITING_kFileType_MP3 )
-                {
-                    M4OSA_TRACE1_0("M4xVSS_sendCommand: Bad input BGM file");
-                    return M4ERR_PARAMETER;
-                }
-            }
-        }
-
-        /* Compute targeted bitrate, with 8% margin (moov) */
-        if( totalDuration > 1000 )
-        {
-            targetedBitrate =
-                (M4OSA_UInt32)(( xVSS_context->pSettings->xVSS.outputFileSize
-                * 8 * 0.84) / (totalDuration / 1000));
-        }
-        else
-        {
-            targetedBitrate = 0;
-        }
-
-        /* Remove audio bitrate */
-        if( targetedBitrate >= 12200 )
-        {
-            targetedBitrate -= 12200; /* Only AMR is supported in MMS case */
-        }
-        else
-        {
-            targetedBitrate = 0;
-        }
-
-        /* Compute an indicator of "complexity" depending on nb of sequences and total duration */
-        /* The highest is the number of sequences, the more there are some I frames */
-        /* In that case, it is necessary to reduce the target bitrate */
-        ratio =
-            (M4OSA_Double)((M4OSA_Double)(xVSS_context->pSettings->uiClipNumber
-            * 100000) / (M4OSA_Double)(totalDuration));
-        M4OSA_TRACE2_3(
-            "Ratio clip_nb/duration = %f\nTargeted bitrate = %d\nTotal duration: %d",
-            (M4OSA_Double)((M4OSA_Double)(xVSS_context->pSettings->uiClipNumber
-            * 100000) / (M4OSA_Double)(totalDuration)),
-            targetedBitrate, totalDuration);
-
-        if( ratio > 50 && ratio <= 75 )
-        {
-            /* It means that there is a potential risk of having a higher file size
-            than specified */
-            targetedBitrate -= (M4OSA_UInt32)(targetedBitrate * 0.1);
-            M4OSA_TRACE2_2(
-                "New bitrate1 !!\nRatio clip_nb/duration = %f\nTargeted bitrate = %d",
-                ratio, targetedBitrate);
-        }
-        else if( ratio > 75 )
-        {
-            targetedBitrate -= (M4OSA_UInt32)(targetedBitrate * 0.15);
-            M4OSA_TRACE2_2(
-                "New bitrate2 !!\nRatio clip_nb/duration = %f\nTargeted bitrate = %d",
-                ratio, targetedBitrate);
-        }
-
-        /*CR 3283 MMS use case for VAL:
-        Decrease the output file size to keep a margin of 5%
-        The writer will stop when the targeted output file size will be reached*/
-        xVSS_context->pSettings->xVSS.outputFileSize -=
-            (M4OSA_UInt32)(xVSS_context->pSettings->xVSS.outputFileSize * 0.05);
-
-        switch( xVSS_context->pSettings->xVSS.outputVideoSize )
-        {
-            case M4VIDEOEDITING_kSQCIF:
-                if( targetedBitrate < 32000 )
-                {
-                    xVSS_context->targetedBitrate = 32000;
-                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
-                }
-                break;
-
-            case M4VIDEOEDITING_kQQVGA:
-                if( targetedBitrate < 32000 )              /*48000)*/
-                {
-                    xVSS_context->targetedBitrate = 32000; /*48000;*/
-                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
-                }
-                break;
-
-            case M4VIDEOEDITING_kQCIF:
-                if( targetedBitrate < 48000 )              /*64000)*/
-                {
-                    xVSS_context->targetedBitrate = 48000; /*64000;*/
-                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
-                }
-                break;
-
-            case M4VIDEOEDITING_kQVGA:
-                if( targetedBitrate < 64000 )              /*128000)*/
-                {
-                    xVSS_context->targetedBitrate = 64000; /*128000;*/
-                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
-                }
-                break;
-
-            case M4VIDEOEDITING_kCIF:
-                if( targetedBitrate < 128000 )
-                {
-                    xVSS_context->targetedBitrate = 128000;
-                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
-                }
-                break;
-
-            case M4VIDEOEDITING_kVGA:
-                if( targetedBitrate < 192000 )
-                {
-                    xVSS_context->targetedBitrate = 192000;
-                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
-                }
-                break;
-
-            default:
-                /* Cannot happen */
-                M4OSA_TRACE1_0(
-                    "M4xVSS_sendCommand: Error in output fileSize !");
-                return M4ERR_PARAMETER;
-                break;
-        }
-        xVSS_context->targetedBitrate = (M4OSA_UInt32)targetedBitrate;
-    }
-
-    if( bIsTranscoding )
-    {
-        return M4VSS3GPP_WAR_TRANSCODING_NECESSARY;
-    }
-    else
-    {
-        return M4NO_ERROR;
-    }
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_SaveStart(M4OSA_Context pContext, M4OSA_Char* pFilePath)
- * @brief        This function prepare the save
- * @note        The xVSS create 3GP edited final file
- *                This function must be called once M4xVSS_Step has returned
- *                M4VSS3GPP_WAR_ANALYZING_DONE
- *                After this function, the user must call M4xVSS_Step until
- *                it returns another error than M4NO_ERROR.
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @param    pFilePath            (IN) If the user wants to provide a different
- *                                output filename, else can be NULL (allocated by the user)
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_SaveStart( M4OSA_Context pContext, M4OSA_Void *pFilePath,
-                           M4OSA_UInt32 filePathSize )
-{
-    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
-    M4OSA_ERR err;
-
-    /*Add for UTF conversion: copy the pSettings structure into a new pCurrentEditSettings*/
-    M4VSS3GPP_EditSettings *pEditSavingSettings = M4OSA_NULL;
-    M4OSA_UInt8 i, j;
-    M4OSA_UInt32 offset = 0;
-    M4OSA_UInt8 nbEffects = 0;
-    /*only for UTF conversion support*/
-    M4OSA_Void *pDecodedPath = M4OSA_NULL;
-    M4OSA_UInt32 length = 0;
-    /**/
-
-    /* Check state */
-    if( xVSS_context->m_state != M4xVSS_kStateOpened )
-    {
-        M4OSA_TRACE1_1(
-            "Bad state when calling M4xVSS_SaveStart function! State is %d",
-            xVSS_context->m_state);
-        return M4ERR_STATE;
-    }
-
-    /* RC: to temporary handle changing of output filepath */
-    /* TO BE CHANGED CLEANLY WITH A MALLOC/MEMCPY !!!! */
-    if( pFilePath != M4OSA_NULL )
-    {
-        if( xVSS_context->pSettings->pOutputFile != M4OSA_NULL )
-        {
-            /*it means that pOutputFile has been allocated in M4xVSS_sendCommand()*/
-            free(xVSS_context->pSettings->pOutputFile);
-            xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
-            xVSS_context->pSettings->uiOutputPathSize = 0;
-        }
-
-        pDecodedPath = pFilePath;
-        /*As all inputs of the xVSS are in UTF8, convert the output file path into the customer
-         format*/
-        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
-            != M4OSA_NULL )
-        {
-            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                (M4OSA_Void *)pFilePath, (M4OSA_Void *)xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer, &length);
-
-            if( err != M4NO_ERROR )
-            {
-                M4OSA_TRACE1_1(
-                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                    err);
-                return err;
-            }
-            pDecodedPath =
-                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-            filePathSize = length;
-        }
-
-        xVSS_context->pOutputFile =
-            (M4OSA_Void *)M4OSA_32bitAlignedMalloc(filePathSize + 1, M4VS,
-            (M4OSA_Char *)"M4xVSS_SaveStart: output file");
-
-        if( xVSS_context->pOutputFile == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-            return M4ERR_ALLOC;
-        }
-        memcpy((void *)xVSS_context->pOutputFile, (void *)pDecodedPath, filePathSize + 1);
-        xVSS_context->pOutputFile[filePathSize] = '\0';
-        xVSS_context->pSettings->pOutputFile = xVSS_context->pOutputFile;
-        xVSS_context->pSettings->uiOutputPathSize = filePathSize;
-    }
-
-    /**
-    ***/
-
-    /*FB: Add for UTF conversion: copy the pSettings structure into a new pCurrentEditSettings*/
-    /*It is the same principle as in the PreviewStart()*/
-    pEditSavingSettings =
-        (M4VSS3GPP_EditSettings *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_EditSettings),
-        M4VS, (M4OSA_Char *)"Saving, copy of VSS structure");
-
-    if( pEditSavingSettings == M4OSA_NULL )
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-        if( xVSS_context->pOutputFile != M4OSA_NULL )
-        {
-            free(xVSS_context->pOutputFile);
-            xVSS_context->pOutputFile = M4OSA_NULL;
-        }
-        return M4ERR_ALLOC;
-    }
-
-    /* Copy settings from input structure */
-    memcpy((void *) &(pEditSavingSettings->xVSS),
-        (void *) &(xVSS_context->pSettings->xVSS),
-        sizeof(M4xVSS_EditSettings));
-
-    /* Initialize pEditSavingSettings structure */
-    pEditSavingSettings->xVSS.pBGMtrack = M4OSA_NULL;
-
-    pEditSavingSettings->videoFrameRate =
-        xVSS_context->pSettings->videoFrameRate;
-    pEditSavingSettings->uiClipNumber = xVSS_context->pSettings->uiClipNumber;
-    pEditSavingSettings->uiMasterClip =
-        xVSS_context->pSettings->uiMasterClip; /* VSS2.0 mandatory parameter */
-
-    /* Allocate savingSettings.pClipList/pTransitions structure */
-    pEditSavingSettings->pClipList = (M4VSS3GPP_ClipSettings *
-        * )M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_ClipSettings *)
-        *pEditSavingSettings->uiClipNumber,
-        M4VS, (M4OSA_Char *)"xVSS, saving , copy of pClipList");
-
-    if( pEditSavingSettings->pClipList == M4OSA_NULL )
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-        if( xVSS_context->pOutputFile != M4OSA_NULL )
-        {
-            free(xVSS_context->pOutputFile);
-            xVSS_context->pOutputFile = M4OSA_NULL;
-        }
-        return M4ERR_ALLOC;
-    }
-
-    if( pEditSavingSettings->uiClipNumber > 1 )
-    {
-        pEditSavingSettings->pTransitionList = (M4VSS3GPP_TransitionSettings *
-            * )M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_TransitionSettings *)
-            *(pEditSavingSettings->uiClipNumber - 1),
-            M4VS, (M4OSA_Char *)"xVSS, saving, copy of pTransitionList");
-
-        if( pEditSavingSettings->pTransitionList == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-            if( xVSS_context->pOutputFile != M4OSA_NULL )
-            {
-                free(xVSS_context->pOutputFile);
-                xVSS_context->pOutputFile = M4OSA_NULL;
-            }
-            return M4ERR_ALLOC;
-        }
-    }
-    else
-    {
-        pEditSavingSettings->pTransitionList = M4OSA_NULL;
-    }
-
-    for ( i = 0; i < pEditSavingSettings->uiClipNumber; i++ )
-    {
-        pEditSavingSettings->pClipList[i] = (M4VSS3GPP_ClipSettings
-            *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_ClipSettings),
-            M4VS, (M4OSA_Char *)"saving clip settings");
-
-        if( pEditSavingSettings->pClipList[i] == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-            if( xVSS_context->pOutputFile != M4OSA_NULL )
-            {
-                free(xVSS_context->pOutputFile);
-                xVSS_context->pOutputFile = M4OSA_NULL;
-            }
-            return M4ERR_ALLOC;
-        }
-
-        if( i < pEditSavingSettings->uiClipNumber
-            - 1 ) /* Because there is 1 less transition than clip number */
-        {
-            pEditSavingSettings->pTransitionList[i] =
-                (M4VSS3GPP_TransitionSettings
-                *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_TransitionSettings),
-                M4VS, (M4OSA_Char *)"saving transition settings");
-
-            if( pEditSavingSettings->pTransitionList[i] == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-                if( xVSS_context->pOutputFile != M4OSA_NULL )
-                {
-                    free(xVSS_context->pOutputFile);
-                    xVSS_context->pOutputFile = M4OSA_NULL;
-                }
-                return M4ERR_ALLOC;
-            }
-        }
-    }
-
-    for ( i = 0; i < xVSS_context->pSettings->uiClipNumber; i++ )
-    {
-        // Add MP4 file support
-
-        if( ( xVSS_context->pSettings->pClipList[i]->FileType
-            == M4VIDEOEDITING_kFileType_3GPP)
-            || (xVSS_context->pSettings->pClipList[i]->FileType
-            == M4VIDEOEDITING_kFileType_MP4)
-            || (xVSS_context->pSettings->pClipList[i]->FileType
-            == M4VIDEOEDITING_kFileType_M4V)
-            || (xVSS_context->pSettings->pClipList[i]->FileType
-            == M4VIDEOEDITING_kFileType_ARGB8888))
-
-        {
-            /* Copy data from given structure to our saving structure */
-            M4xVSS_DuplicateClipSettings(pEditSavingSettings->pClipList[i],
-                xVSS_context->pSettings->pClipList[i],
-                M4OSA_FALSE /* remove effects */);
-
-            /**
-            * UTF conversion: convert into the customer format, before being used*/
-            pDecodedPath = pEditSavingSettings->pClipList[i]->pFile;
-            length = strlen(pDecodedPath);
-
-            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
-                != M4OSA_NULL && xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer
-                != M4OSA_NULL )
-            {
-                err =
-                    M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
-                    *)pEditSavingSettings->pClipList[i]->pFile,
-                    (M4OSA_Void *)xVSS_context->
-                    UTFConversionContext.pTempOutConversionBuffer,
-                    &length);
-
-                if( err != M4NO_ERROR )
-                {
-                    M4OSA_TRACE1_1(
-                        "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                        err);
-
-                    if( xVSS_context->pOutputFile != M4OSA_NULL )
-                    {
-                        free(xVSS_context->pOutputFile);
-                        xVSS_context->pOutputFile = M4OSA_NULL;
-                    }
-                    return err;
-                }
-                pDecodedPath = xVSS_context->
-                    UTFConversionContext.pTempOutConversionBuffer;
-
-                /**
-                * End of the UTF conversion, use the converted file path*/
-                free(
-                    pEditSavingSettings->pClipList[i]->pFile);
-                pEditSavingSettings->pClipList[i]->pFile = (M4OSA_Void
-                    *)M4OSA_32bitAlignedMalloc((length + 1),
-                    M4VS, (M4OSA_Char *)"saving transition settings");
-
-                if( pEditSavingSettings->pClipList[i]->pFile == M4OSA_NULL )
-                {
-                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-                    if( xVSS_context->pOutputFile != M4OSA_NULL )
-                    {
-                        free(xVSS_context->pOutputFile);
-                        xVSS_context->pOutputFile = M4OSA_NULL;
-                    }
-                    return M4ERR_ALLOC;
-                }
-                memcpy((void *)pEditSavingSettings->pClipList[i]->pFile,
-                    (void *)pDecodedPath, length + 1);
-            }
-            /*FB: add file path size because of UTF 16 conversion*/
-            pEditSavingSettings->pClipList[i]->filePathSize = length+1;
-
-            if( i
-                < xVSS_context->pSettings->uiClipNumber
-                - 1 ) /* Because there is 1 less transition than clip number */
-            {
-                memcpy(
-                    (void *)pEditSavingSettings->pTransitionList[i],
-                    (void *)xVSS_context->pSettings->
-                    pTransitionList[i],
-                    sizeof(M4VSS3GPP_TransitionSettings));
-            }
-        }
-        else
-        {
-            M4OSA_TRACE1_0(
-                "M4xVSS_SaveStart: Error when parsing xVSS_context->pSettings->pClipList[i]:\
-                 Bad file type");
-
-            if( xVSS_context->pOutputFile != M4OSA_NULL )
-            {
-                free(xVSS_context->pOutputFile);
-                xVSS_context->pOutputFile = M4OSA_NULL;
-            }
-            return M4ERR_PARAMETER;
-        }
-    }
-
-    /* Count the number of video effects, used to know how much memory is needed to allocate*/
-    /* FB 2008/10/15: removed : not compatible with M4VSS3GPP_kVideoEffectType_None
-    for(j=0;j<xVSS_context->pSettings->nbEffects;j++)
-    {
-    if(xVSS_context->pSettings->Effects[j].VideoEffectType != M4VSS3GPP_kVideoEffectType_None)
-    {
-    nbEffects++;
-    }
-    }*/
-    nbEffects = xVSS_context->pSettings->nbEffects;
-
-    /* Allocate effects saving structure with correct number of effects */
-    if( nbEffects != 0 )
-    {
-        pEditSavingSettings->Effects =
-            (M4VSS3GPP_EffectSettings *)M4OSA_32bitAlignedMalloc(nbEffects
-            * sizeof(M4VSS3GPP_EffectSettings), M4VS, (M4OSA_Char
-            *)"Saving settings, effects table of structure settings");
-
-        if( pEditSavingSettings->Effects == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-            if( xVSS_context->pOutputFile != M4OSA_NULL )
-            {
-                free(xVSS_context->pOutputFile);
-                xVSS_context->pOutputFile = M4OSA_NULL;
-            }
-            return M4ERR_ALLOC;
-        }
-
-        /* Just copy effect structure to saving structure, as effects time are now */
-        /* relative to output clip time*/
-        memcpy((void *)pEditSavingSettings->Effects,
-            (void *)xVSS_context->pSettings->Effects,
-            nbEffects * sizeof(M4VSS3GPP_EffectSettings));
-    }
-    else
-    {
-        pEditSavingSettings->Effects = M4OSA_NULL;
-        pEditSavingSettings->nbEffects = 0;
-    }
-    pEditSavingSettings->nbEffects = nbEffects;
-
-    if( pFilePath != M4OSA_NULL )
-    {
-        pEditSavingSettings->pOutputFile = pFilePath;
-    }
-
-    /* Save pointer of saving video editor to use in step function */
-    xVSS_context->pCurrentEditSettings = pEditSavingSettings;
-
-    /* Change output file name to temporary output file name, because final file will be
-     generated by audio mixer */
-    if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-    {
-
-        M4OSA_Char out_3gp[M4XVSS_MAX_PATH_LEN];
-        M4OSA_Char out_3gp_tmp[M4XVSS_MAX_PATH_LEN];
-
-        /**/
-        pEditSavingSettings->xVSS.pBGMtrack =
-            (M4xVSS_BGMSettings *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_BGMSettings), M4VS,
-            (M4OSA_Char
-            *)"Saving settings, effects table of structure settings");
-
-        if( pEditSavingSettings->xVSS.pBGMtrack == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-            if( xVSS_context->pOutputFile != M4OSA_NULL )
-            {
-                free(xVSS_context->pOutputFile);
-                xVSS_context->pOutputFile = M4OSA_NULL;
-            }
-            return M4ERR_ALLOC;
-        }
-
-        /* Just copy effect structure to saving structure, as effects time are now */
-        /* relative to output clip time*/
-        memcpy((void *)pEditSavingSettings->xVSS.pBGMtrack,
-            (void *)xVSS_context->pSettings->xVSS.pBGMtrack,
-            sizeof(M4xVSS_BGMSettings));
-
-        /* Allocate file name, and copy file name buffer to our structure */
-        pEditSavingSettings->xVSS.pBGMtrack->pFile = M4OSA_32bitAlignedMalloc(
-            (strlen(xVSS_context->pSettings->xVSS.pBGMtrack->pFile)
-            + 1),
-            M4VS, (M4OSA_Char *)"Saving struct xVSS BGM file path");
-
-        if( pEditSavingSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
-        {
-            M4xVSS_freeCommand(xVSS_context);
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-            if( xVSS_context->pOutputFile != M4OSA_NULL )
-            {
-                free(xVSS_context->pOutputFile);
-                xVSS_context->pOutputFile = M4OSA_NULL;
-            }
-            return M4ERR_ALLOC;
-        }
-        memcpy((void *)pEditSavingSettings->xVSS.pBGMtrack->pFile,
-            (void *)xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
-            strlen(xVSS_context->pSettings->xVSS.pBGMtrack->pFile)
-            + 1);
-
-        /*Copy BGM track file path*/
-
-        /**
-        * UTF conversion*/
-        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
-            != M4OSA_NULL )
-        {
-            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                (M4OSA_Void *)pEditSavingSettings->xVSS.pBGMtrack->pFile,
-                (M4OSA_Void *)xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer, &length);
-
-            if( err != M4NO_ERROR )
-            {
-                M4OSA_TRACE1_1(
-                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                    err);
-
-                if( xVSS_context->pOutputFile != M4OSA_NULL )
-                {
-                    free(xVSS_context->pOutputFile);
-                    xVSS_context->pOutputFile = M4OSA_NULL;
-                }
-                return err;
-            }
-            pDecodedPath =
-                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-
-            free(pEditSavingSettings->xVSS.pBGMtrack->pFile);
-            pEditSavingSettings->xVSS.pBGMtrack->pFile =
-                (M4OSA_Void *)M4OSA_32bitAlignedMalloc(length + 1, M4VS, (M4OSA_Char
-                *)"M4xVSS_SaveStart: Temp filename in case of BGM");
-
-            if( pEditSavingSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-                if( xVSS_context->pOutputFile != M4OSA_NULL )
-                {
-                    free(xVSS_context->pOutputFile);
-                    xVSS_context->pOutputFile = M4OSA_NULL;
-                }
-                return M4ERR_ALLOC;
-            }
-            memcpy((void *)pEditSavingSettings->xVSS.pBGMtrack->pFile,
-                (void *)pDecodedPath, length + 1);
-        }
-
-        /**/
-
-        M4OSA_chrNCopy(out_3gp, xVSS_context->pTempPath, M4XVSS_MAX_PATH_LEN - 1);
-        M4OSA_chrNCopy(out_3gp_tmp, xVSS_context->pTempPath, M4XVSS_MAX_PATH_LEN - 1);
-
-        /* Construct output temporary 3GP filename */
-        strncat((char *)out_3gp, (const char *)"savetemp.3gp\0", 13);
-        strncat((char *)out_3gp_tmp, (const char *)"savetemp.tmp\0", 13);
-
-        /**
-        * UTF conversion: convert into the customer format, before being used*/
-        pDecodedPath = out_3gp;
-        length = strlen(pDecodedPath);
-
-        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
-            != M4OSA_NULL )
-        {
-            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                (M4OSA_Void *)out_3gp, (M4OSA_Void *)xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer, &length);
-
-            if( err != M4NO_ERROR )
-            {
-                M4OSA_TRACE1_1(
-                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                    err);
-
-                if( xVSS_context->pOutputFile != M4OSA_NULL )
-                {
-                    free(xVSS_context->pOutputFile);
-                    xVSS_context->pOutputFile = M4OSA_NULL;
-                }
-                return err;
-            }
-            pDecodedPath =
-                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-        }
-
-        /**
-        * End of the UTF conversion, use the converted file path*/
-        xVSS_context->pCurrentEditSettings->pOutputFile =
-            (M4OSA_Void *)M4OSA_32bitAlignedMalloc(length + 1, M4VS,
-            (M4OSA_Char *)"M4xVSS_SaveStart: Temp filename in case of BGM");
-
-        if( xVSS_context->pCurrentEditSettings->pOutputFile == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-            if( xVSS_context->pOutputFile != M4OSA_NULL )
-            {
-                free(xVSS_context->pOutputFile);
-                xVSS_context->pOutputFile = M4OSA_NULL;
-            }
-            return M4ERR_ALLOC;
-        }
-        memcpy((void *)xVSS_context->pCurrentEditSettings->pOutputFile,
-            (void *)pDecodedPath, length + 1);
-        xVSS_context->pCurrentEditSettings->uiOutputPathSize = length + 1;
-
-        /**
-        * UTF conversion: convert into the customer format, before being used*/
-        pDecodedPath = out_3gp_tmp;
-        length = strlen(pDecodedPath);
-
-        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
-            != M4OSA_NULL )
-        {
-            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                (M4OSA_Void *)out_3gp_tmp, (M4OSA_Void *)xVSS_context->
-                UTFConversionContext.pTempOutConversionBuffer, &length);
-
-            if( err != M4NO_ERROR )
-            {
-                M4OSA_TRACE1_1(
-                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
-                    err);
-
-                if( xVSS_context->pOutputFile != M4OSA_NULL )
-                {
-                    free(xVSS_context->pOutputFile);
-                    xVSS_context->pOutputFile = M4OSA_NULL;
-                }
-                return err;
-            }
-            pDecodedPath =
-                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-        }
-
-        /**
-        * End of the UTF conversion, use the converted file path*/
-        xVSS_context->pCurrentEditSettings->pTemporaryFile =
-            (M4OSA_Void *)M4OSA_32bitAlignedMalloc(length + 1, M4VS,
-            (M4OSA_Char *)"M4xVSS_SaveStart: Temporary file");
-
-        if( xVSS_context->pCurrentEditSettings->pTemporaryFile == M4OSA_NULL )
-        {
-            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
-
-            if( xVSS_context->pOutputFile != M4OSA_NULL )
-            {
-                free(xVSS_context->pOutputFile);
-                xVSS_context->pOutputFile = M4OSA_NULL;
-            }
-            return M4ERR_ALLOC;
-        }
-        memcpy((void *)xVSS_context->pCurrentEditSettings->pTemporaryFile,
-            (void *)pDecodedPath, length + 1);
-
-        /* Put nb of step for progression monitoring to 2, because audio mixing is needed */
-        xVSS_context->nbStepTotal = 2;
-    }
-    else
-    {
-        xVSS_context->pCurrentEditSettings->pOutputFile =
-            xVSS_context->pOutputFile;
-        xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
-
-        /* Put nb of step for progression monitoring to 1, because no audio mixing is needed */
-        xVSS_context->nbStepTotal = 1;
-    }
-
-    /**
-    ***/
-
-    err = M4xVSS_internalGenerateEditedFile(xVSS_context);
-
-    if( err != M4NO_ERROR )
-    {
-        M4OSA_TRACE1_1(
-            "M4xVSS_SaveStart: M4xVSS_internalGenerateEditedFile returned an error: 0x%x",
-            err);
-
-        /**/
-        if( xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL
-            && xVSS_context->pSettings->xVSS.pBGMtrack == M4OSA_NULL )
-        {
-            free(xVSS_context->pCurrentEditSettings->
-                pOutputFile);
-            xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
-            xVSS_context->pOutputFile = M4OSA_NULL;
-        }
-
-        if( xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL
-            && xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
-        {
-            free(xVSS_context->pCurrentEditSettings->
-                pTemporaryFile);
-            xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
-        }
-
-        if( xVSS_context->pOutputFile != M4OSA_NULL )
-        {
-            free(xVSS_context->pOutputFile);
-            xVSS_context->pOutputFile = M4OSA_NULL;
-        }
-        /* TODO: Translate error code of VSS to an xVSS error code */
-        return err;
-    }
-
-    /* Reinitialize current step number for progression monitoring */
-    xVSS_context->currentStep = 0;
-
-    /* Change xVSS state */
-    xVSS_context->m_state = M4xVSS_kStateSaving;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_SaveStop(M4OSA_Context pContext)
- * @brief        This function unallocate save ressources and change xVSS
- *                internal state.
- * @note        This function must be called once M4xVSS_Step has returned
- *                M4VSS3GPP_WAR_SAVING_DONE
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_SaveStop( M4OSA_Context pContext )
-{
-    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    /* Check state */
-    if( xVSS_context->m_state != M4xVSS_kStateSaving )
-    {
-        M4OSA_TRACE1_1(
-            "Bad state when calling M4xVSS_SaveStop function! State is %d",
-            xVSS_context->m_state);
-        return M4ERR_STATE;
-    }
-
-    /* Free saving structures */
-    M4xVSS_internalFreeSaving(xVSS_context);
-
-    if( xVSS_context->pOutputFile != M4OSA_NULL )
-    {
-        free(xVSS_context->pOutputFile);
-        xVSS_context->pOutputFile = M4OSA_NULL;
-    }
-
-    /* Change xVSS state */
-    xVSS_context->m_state = M4xVSS_kStateSaved;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_Step(M4OSA_Context pContext, M4OSA_UInt8 *pProgress)
- * @brief        This function executes differents tasks, depending of xVSS
- *                internal state.
- * @note        This function:
- *                    - analyses editing structure if called after M4xVSS_SendCommand
- *                    - generates preview file if called after M4xVSS_PreviewStart
- *                    - generates final edited file if called after M4xVSS_SaveStart
- *
- * @param    pContext                        (IN) Pointer on the xVSS edit context
- * @param    pProgress                        (IN/OUT) Pointer on an integer giving a
- *                                            progress indication (between 0-100)
- * @return    M4NO_ERROR:                        No error, the user must call M4xVSS_Step again
- * @return    M4ERR_PARAMETER:                At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:                    This function cannot not be called at this time
- * @return    M4VSS3GPP_WAR_PREVIEW_READY:    Preview file is generated
- * @return    M4VSS3GPP_WAR_SAVING_DONE:        Final edited file is generated
- * @return    M4VSS3GPP_WAR_ANALYZING_DONE:    Analyse is done
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_Step( M4OSA_Context pContext, M4OSA_UInt8 *pProgress )
-{
-    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
-    M4VSS3GPP_EditContext pVssCtxt = xVSS_context->pCurrentEditContext;
-    M4VSS3GPP_AudioMixingContext pAudioMixingCtxt =
-        xVSS_context->pAudioMixContext;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt8 uiProgress = 0;
-
-    switch( xVSS_context->m_state )
-    {
-        case M4xVSS_kStateSaving:
-        //case M4xVSS_kStateGeneratingPreview:
-            {
-                if( xVSS_context->editingStep
-                    == M4xVSS_kMicroStateEditing ) /* VSS -> creating effects, transitions ... */
-                {
-                    /* RC: to delete unecessary temp files on the fly */
-                    M4VSS3GPP_InternalEditContext *pVSSContext =
-                        (M4VSS3GPP_InternalEditContext *)pVssCtxt;
-
-                    err = M4VSS3GPP_editStep(pVssCtxt, &uiProgress);
-
-                    if( ( err != M4NO_ERROR) && (err != M4VSS3GPP_WAR_EDITING_DONE)
-                        && (err != M4VSS3GPP_WAR_SWITCH_CLIP) )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4xVSS_Step: M4VSS3GPP_editStep returned 0x%x\n", err);
-                        M4VSS3GPP_editCleanUp(pVssCtxt);
-                        /* TODO ? : Translate error code of VSS to an xVSS error code ? */
-                        xVSS_context->pCurrentEditContext = M4OSA_NULL;
-                        return err;
-                    }
-
-                    /* RC: to delete unecessary temp files on the fly */
-                    if( err == M4VSS3GPP_WAR_SWITCH_CLIP )
-                    {
-#ifndef DO_NOT_REMOVE_TEMP_FILES
-                        /* It means we can delete the temporary file */
-                        /* First step, check the temp file is not use somewhere else after */
-
-                        M4OSA_UInt32 i;
-                        M4OSA_Int32 cmpResult = -1;
-
-                        for ( i = pVSSContext->uiCurrentClip;
-                            i < pVSSContext->uiClipNumber; i++ )
-                        {
-                            if( pVSSContext->pClipList[pVSSContext->uiCurrentClip
-                                - 1].filePathSize
-                                == pVSSContext->pClipList[i].filePathSize )
-                            {
-                                cmpResult = memcmp((void *)pVSSContext->
-                                    pClipList[pVSSContext->uiCurrentClip
-                                    - 1].pFile, (void *)pVSSContext->pClipList[i].pFile,
-                                    pVSSContext->
-                                    pClipList[pVSSContext->uiCurrentClip
-                                    - 1].filePathSize);
-
-                                if( cmpResult == 0 )
-                                {
-                                    /* It means we found a corresponding file, we do not delete
-                                    this temporary file */
-                                    break;
-                                }
-                            }
-                        }
-
-                        if( cmpResult != 0 )
-                        {
-                            M4OSA_UInt32 ConvertedSize = 0;
-                            M4OSA_Char *toto;
-                            M4OSA_Char *pTmpStr;
-
-                            /* Convert result in UTF8 to check if we can delete it or not */
-                            if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
-                                != M4OSA_NULL && xVSS_context->
-                                UTFConversionContext.
-                                pTempOutConversionBuffer != M4OSA_NULL )
-                            {
-                                M4xVSS_internalConvertToUTF8(xVSS_context,
-                                    (M4OSA_Void *)pVSSContext->
-                                    pClipList[pVSSContext->uiCurrentClip
-                                    - 1].pFile, (M4OSA_Void *)xVSS_context->
-                                    UTFConversionContext.
-                                    pTempOutConversionBuffer, &ConvertedSize);
-                                toto = (M4OSA_Char *)strstr((const char *)xVSS_context->
-                                    UTFConversionContext.
-                                    pTempOutConversionBuffer,
-                                    (const char *)xVSS_context->pTempPath);
-                                pTmpStr =
-                                    xVSS_context->UTFConversionContext.
-                                    pTempOutConversionBuffer;
-                            }
-                            else
-                            {
-                                toto = (M4OSA_Char *)strstr((const char *)pVSSContext->
-                                    pClipList[pVSSContext->uiCurrentClip
-                                    - 1].pFile, (const char *)xVSS_context->pTempPath);
-                                pTmpStr = pVSSContext->
-                                    pClipList[pVSSContext->uiCurrentClip
-                                    - 1].pFile;
-                            }
-
-                            if( toto != M4OSA_NULL )
-                            {
-                                /* As temporary files can be imgXXX.3gp or vidXXX.3gp */
-                                pTmpStr +=
-                                    (strlen((const char *)pTmpStr)
-                                    - 10); /* Because temporary files have a length at most of
-                                    10 bytes */
-                                toto = (M4OSA_Char *)strstr((const char *)pTmpStr,
-                                    (const char *)"img");
-
-                                if( toto != M4OSA_NULL )
-                                {
-                                    toto = (M4OSA_Char *)strstr((const char *)pTmpStr,
-                                        (const char *)"vid");
-                                }
-
-                                if( err
-                                    == M4NO_ERROR ) /* It means the file is a temporary file, we
-                                    can delete it */
-                                {
-                                    remove((const char *)pVSSContext->
-                                        pClipList[pVSSContext->uiCurrentClip
-                                        - 1].pFile);
-                                }
-                            }
-                        }
-
-#endif /* DO_NOT_REMOVE_TEMP_FILES*/
-                        /* */
-
-                        err = M4NO_ERROR;
-                    }
-
-                    if( err == M4VSS3GPP_WAR_EDITING_DONE )
-                    {
-                        xVSS_context->currentStep++;
-                        /* P4ME00003276: When a step is complete, increment currentStep and reset
-                        uiProgress unless progress would be wrong */
-                        uiProgress = 0;
-                        err = M4xVSS_internalCloseEditedFile(xVSS_context);
-                        /* Fix for  blrnxpsw#234---> */
-                        if( err != M4NO_ERROR )
-                        {
-                            if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
-                            {
-                                err = M4xVSSERR_NO_MORE_SPACE;
-                            }
-                            M4OSA_TRACE1_1(
-                                "M4xVSS_internalCloseEditedFile returned an error: 0x%x",
-                                err);
-                            return err;
-                        }
-                        /*<---- Fix for  blrnxpsw#234 */
-                        if( xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack
-                            != M4OSA_NULL )
-                        {
-                            xVSS_context->editingStep =
-                                M4xVSS_kMicroStateAudioMixing;
-                            /* Open Audio mixing component */
-                            err = M4xVSS_internalGenerateAudioMixFile(xVSS_context);
-
-                            if( err != M4NO_ERROR )
-                            {
-                                M4OSA_TRACE1_1(
-                                    "M4xVSS_internalGenerateAudioMixFile returned an error: 0x%x",
-                                    err);
-                                /* TODO ? : Translate error code of VSS to an xVSS error code */
-                                return err;
-                            }
-                            err = M4NO_ERROR;
-                            goto end_step;
-                        }
-                        else
-                        {
-
-                            err = M4VSS3GPP_WAR_SAVING_DONE;
-                            goto end_step;
-
-                        }
-                    }
-                }
-                else if( xVSS_context->editingStep
-                    == M4xVSS_kMicroStateAudioMixing ) /* Audio mixing: mix/replace audio track
-                    with given BGM */
-                {
-                    err = M4VSS3GPP_audioMixingStep(pAudioMixingCtxt, &uiProgress);
-
-                    if( ( err != M4NO_ERROR)
-                        && (err != M4VSS3GPP_WAR_END_OF_AUDIO_MIXING) )
-                    {
-                        M4OSA_TRACE1_1(
-                            "M4VSS3GPP_audioMixingMain: M4VSS3GPP_audioMixingStep returned 0x%x\n",
-                            err);
-                        /* TODO ? : Translate error code of VSS to an xVSS error code */
-                        return err;
-                    }
-
-                    if( err == M4VSS3GPP_WAR_END_OF_AUDIO_MIXING )
-                    {
-                        xVSS_context->currentStep++;
-                        /* P4ME00003276: When a step is complete, increment currentStep and reset
-                        uiProgress unless progress would be wrong */
-                        uiProgress = 0;
-                        err = M4xVSS_internalCloseAudioMixedFile(xVSS_context);
-
-                        if( err != M4NO_ERROR )
-                        {
-                            M4OSA_TRACE1_1(
-                                "M4xVSS_internalCloseAudioMixedFile returned an error: 0x%x",
-                                err);
-                            /* TODO ? : Translate error code of VSS to an xVSS error code */
-                            return err;
-                        }
-
-                            err = M4VSS3GPP_WAR_SAVING_DONE;
-                            goto end_step;
-
-                    }
-                }
-                else
-                {
-                    M4OSA_TRACE1_0("Bad state in step function !");
-                    return M4ERR_STATE;
-                }
-            }
-            break;
-
-        case M4xVSS_kStateAnalyzing:
-            {
-                if( xVSS_context->analyseStep
-                    == M4xVSS_kMicroStateAnalysePto3GPP ) /* Pto3GPP, analysing input parameters */
-                {
-                    if( xVSS_context->pPTo3GPPcurrentParams == M4OSA_NULL
-                        && xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
-                    {
-                        xVSS_context->pPTo3GPPcurrentParams =
-                            xVSS_context->
-                            pPTo3GPPparamsList; /* Current Pto3GPP Parameter is the first element
-                            of the list */
-                    }
-                    else if( xVSS_context->pPTo3GPPcurrentParams != M4OSA_NULL
-                        && xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
-                    {
-                        xVSS_context->pPTo3GPPcurrentParams =
-                            xVSS_context->pPTo3GPPcurrentParams->
-                            pNext; /* Current Pto3GPP Parameter is the next element of the list */
-
-                        if( xVSS_context->pPTo3GPPcurrentParams
-                            == M4OSA_NULL ) /* It means there is no next image to convert */
-                        {
-                            /* We step to MCS phase */
-                            xVSS_context->analyseStep =
-                                M4xVSS_kMicroStateAnalyzeMCS;
-                            err = M4NO_ERROR;
-                            goto end_step;
-                        }
-                    }
-                    else if( xVSS_context->pPTo3GPPparamsList == M4OSA_NULL )
-                    {
-                        xVSS_context->analyseStep =
-                            M4xVSS_kMicroStateAnalyzeMCS; /* Change Analyzing micro state to
-                             MCS phase */
-                        err = M4NO_ERROR;
-                        goto end_step;
-                    }
-
-                    /* Check if this file has to be converted or not */
-                    /* If not, we just return M4NO_ERROR, and go to next file */
-                    if( xVSS_context->pPTo3GPPcurrentParams->isCreated
-                        == M4OSA_FALSE )
-                    {
-                        /* Opening Pto3GPP */
-                        err = M4xVSS_internalStartConvertPictureTo3gp(xVSS_context);
-
-                        if( err != M4NO_ERROR )
-                        {
-                            M4OSA_TRACE1_1("M4xVSS_Step: M4xVSS_internalStartConvertPictureTo3gp \
-                            returned error: 0x%x",
-                                err)
-                                /* TODO ? : Translate error code of VSS to an xVSS error code */
-                                return err;
-                        }
-                        xVSS_context->analyseStep =
-                            M4xVSS_kMicroStateConvertPto3GPP;
-                    }
-                }
-                else if( xVSS_context->analyseStep
-                    == M4xVSS_kMicroStateConvertPto3GPP ) /* Pto3GPP, converting */
-                {
-                    err = M4PTO3GPP_Step(xVSS_context->pM4PTO3GPP_Ctxt);
-                    /* update progress bar */
-                    if(xVSS_context->pCallBackCtxt->m_NbImage > 1)
-                    {
-                        uiProgress = (xVSS_context->pCallBackCtxt->m_ImageCounter * 100) / (xVSS_context->pCallBackCtxt->m_NbImage -1);
-                    }
-
-                    if( ( err != M4NO_ERROR) && (err
-                        != ((M4OSA_UInt32)M4PTO3GPP_WAR_END_OF_PROCESSING)) )
-                    {
-                        /* TO BE CHECKED NO LEAKS  !!!!! */
-                        M4OSA_TRACE1_1(
-                            "M4xVSS_Step: M4PTO3GPP_Step returned 0x%x\n", err);
-                        /* TODO ? : Translate error code of VSS to an xVSS error code */
-                        return err;
-                    }
-                    else if( err
-                        == ((M4OSA_UInt32)M4PTO3GPP_WAR_END_OF_PROCESSING) )
-                    {
-                        xVSS_context->currentStep++;
-                        /* P4ME00003276: When a step is complete, increment currentStep and reset
-                         uiProgress unless progress would be wrong */
-                        uiProgress = 0;
-                        xVSS_context->analyseStep =
-                            M4xVSS_kMicroStateAnalysePto3GPP; /* We go back to analyze parameters
-                            to see if there is a next file to convert */
-                        /* RC !!!!!!!! */
-                        xVSS_context->pPTo3GPPcurrentParams->isCreated =
-                            M4OSA_TRUE; /* To avoid reconverting it if another SendCommand is
-                            called */
-                        err = M4xVSS_internalStopConvertPictureTo3gp(xVSS_context);
-                        /*SS:blrnxpsw#  234 */
-                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
-                        {
-                            err = M4xVSSERR_NO_MORE_SPACE;
-                        }
-
-                        if( err != M4NO_ERROR )
-                        {
-                            M4OSA_TRACE1_1("M4xVSS_Step:\
-                                           M4xVSS_internalStopConvertPictureTo3gp returned 0x%x",
-                                            err);
-                            /* TODO ? : Translate error code of VSS to an xVSS error code */
-                            return err;
-                        }
-                    }
-                }
-                else if( xVSS_context->analyseStep
-                    ==
-                    M4xVSS_kMicroStateAnalyzeMCS ) /* MCS: analyzing input parameters */
-                {
-                    if( xVSS_context->pMCScurrentParams == M4OSA_NULL \
-                        && xVSS_context->pMCSparamsList != M4OSA_NULL )
-                    {
-                        xVSS_context->pMCScurrentParams = xVSS_context->
-                            pMCSparamsList; /* Current MCS Parameter is the first
-                                            element of the list */
-                    }
-                    else if( xVSS_context->pMCScurrentParams != M4OSA_NULL \
-                        && xVSS_context->pMCSparamsList != M4OSA_NULL )
-                    {
-                        xVSS_context->pMCScurrentParams =
-                            xVSS_context->pMCScurrentParams->
-                            pNext; /* Current MCS Parameter
-                                   is the next element of the list */
-
-                        if( xVSS_context->pMCScurrentParams == M4OSA_NULL )
-                            /* It means there is no next image to convert */
-                        {
-                            xVSS_context->analyseStep =
-                                M4xVSS_kMicroStateAnalysePto3GPP; /* Reinit Analyzing micro state */
-                            xVSS_context->m_state =
-                                M4xVSS_kStateOpened; /* Change xVSS state */
-                            err = M4VSS3GPP_WAR_ANALYZING_DONE;
-                            goto end_step; /* End of Analysis */
-                        }
-                    }
-                    else if( xVSS_context->pMCSparamsList == M4OSA_NULL )
-                    {
-                        xVSS_context->analyseStep =
-                            M4xVSS_kMicroStateAnalysePto3GPP; /* Reinit Analyzing micro state */
-                        xVSS_context->m_state =
-                            M4xVSS_kStateOpened; /* Change xVSS state */
-                        err = M4VSS3GPP_WAR_ANALYZING_DONE;
-                        goto end_step;                        /* End of Analysis */
-                    }
-
-                    /* Check if this file has to be transcoded or not */
-                    /* If not, we just return M4NO_ERROR, and go to next file */
-                    if( xVSS_context->pMCScurrentParams->isCreated == M4OSA_FALSE )
-                    {
-                        /* Opening MCS */
-                        M4OSA_UInt32 rotationDegree = 0;
-                        err = M4xVSS_internalStartTranscoding(xVSS_context, &rotationDegree);
-
-                        if( err != M4NO_ERROR )
-                        {
-                            M4OSA_TRACE1_1("M4xVSS_Step: M4xVSS_internalStartTranscoding returned\
-                                 error: 0x%x", err);
-                            return err;
-                        }
-                        int32_t index = xVSS_context->pMCScurrentParams->videoclipnumber;
-
-                        /* The cuts are done in the MCS, so we need to replace
-                           the beginCutTime and endCutTime to keep the entire video*/
-                        xVSS_context->pSettings->pClipList[index]->uiBeginCutTime = 0;
-                        xVSS_context->pSettings->pClipList[index]->uiEndCutTime = 0;
-
-
-                        M4OSA_TRACE1_1("M4xVSS_Step: \
-                            M4xVSS_internalStartTranscoding returned \
-                                success; MCS context: 0x%x",
-                                 xVSS_context->pMCS_Ctxt);
-                        xVSS_context->analyseStep =
-                            M4xVSS_kMicroStateTranscodeMCS;
-
-                        // Retain rotation info of trimmed / transcoded file
-                        xVSS_context->pSettings->pClipList[index]->\
-                            ClipProperties.videoRotationDegrees = rotationDegree;
-                    }
-                }
-                else if( xVSS_context->analyseStep
-                    == M4xVSS_kMicroStateTranscodeMCS )
-                    /* MCS: transcoding file */
-                {
-                    err = M4MCS_step(xVSS_context->pMCS_Ctxt, &uiProgress);
-                    /*SS:blrnxpsw#  234 */
-                    if( err == ((M4OSA_UInt32)M4MCS_ERR_NOMORE_SPACE) )
-                    {
-                        err = M4xVSSERR_NO_MORE_SPACE;
-                    }
-
-                    if( ( err != M4NO_ERROR)
-                        && (err != M4MCS_WAR_TRANSCODING_DONE) )
-                    {
-                        /* TO BE CHECKED NO LEAKS  !!!!! */
-                        M4OSA_TRACE1_1("M4xVSS_Step: M4MCS_step returned 0x%x\n",
-                            err);
-                        /* TODO ? : Translate error code of MCS to an xVSS error code ? */
-                        return err;
-                    }
-                    else if( err == M4MCS_WAR_TRANSCODING_DONE )
-                    {
-                        xVSS_context->currentStep++;
-                        /* P4ME00003276: When a step is complete, increment currentStep and reset
-                        uiProgress unless progress would be wrong */
-                        uiProgress = 0;
-                        xVSS_context->analyseStep =
-                            M4xVSS_kMicroStateAnalyzeMCS; /* We go back to
-                                                          analyze parameters to see if there is
-                                                           a next file to transcode */
-                        /* RC !!!!!!!!!*/
-                        xVSS_context->pMCScurrentParams->isCreated =
-                            M4OSA_TRUE; /* To avoid
-                                        reconverting it if another SendCommand is called */
-                        err = M4xVSS_internalStopTranscoding(xVSS_context);
-
-                        if( err != M4NO_ERROR )
-                        {
-                            M4OSA_TRACE1_1("M4xVSS_Step:\
-                                           M4xVSS_internalStopTranscoding returned 0x%x", err);
-                            /* TODO ? : Translate error code of MCS to an xVSS error code ? */
-                            return err;
-                        }
-                    }
-                }
-                else
-                {
-                    M4OSA_TRACE1_0("Bad micro state in analyzing state")
-                        return M4ERR_STATE;
-                }
-            }
-            break;
-
-        default:
-            M4OSA_TRACE1_1(
-                "Bad state when calling M4xVSS_Step function! State is %d",
-                xVSS_context->m_state);
-            return M4ERR_STATE;
-    }
-
-end_step:
-    /* Compute progression */
-    if( xVSS_context->nbStepTotal != 0 )
-    {
-        *pProgress = (M4OSA_UInt8)(( ( xVSS_context->currentStep * 100) \
-            / (xVSS_context->nbStepTotal))
-            + (uiProgress / (xVSS_context->nbStepTotal)));
-
-        if( *pProgress > 100 )
-        {
-            *pProgress = 100;
-        }
-    }
-    else
-    {
-        *pProgress = 100;
-    }
-
-    return err;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_CloseCommand(M4OSA_Context pContext)
- * @brief        This function deletes current editing profile, unallocate
- *                ressources and change xVSS internal state.
- * @note        After this function, the user can call a new M4xVSS_SendCommand
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_CloseCommand( M4OSA_Context pContext )
-{
-    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    /* Check state */
-    /* Depending of the state, differents things have to be done */
-    switch( xVSS_context->m_state )
-    {
-        case M4xVSS_kStateOpened:
-            /* Nothing to do here */
-            err = M4xVSS_internalFreeSaving(xVSS_context);
-            break;
-
-        case M4xVSS_kStateSaving:
-            {
-                if( xVSS_context->editingStep == M4xVSS_kMicroStateEditing )
-                {
-                    err = M4xVSS_internalCloseEditedFile(xVSS_context);
-
-                    if( err != M4NO_ERROR )
-                    {
-                        /* Fix for blrnxpsw#234---->*/
-                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
-                        {
-                            err = M4xVSSERR_NO_MORE_SPACE;
-                        }
-                        M4OSA_TRACE1_1("M4xVSS_CloseCommand:\
-                                       M4xVSS_internalCloseEditedFile returned an error: 0x%x",
-                                        err);
-                        /* we are retaining error here and returning error  in the end of the
-                        function  as to aviod memory leak*/
-                        //return err;
-                    }
-                }
-                else if( xVSS_context->editingStep
-                    == M4xVSS_kMicroStateAudioMixing )
-                {
-                    err = M4xVSS_internalCloseAudioMixedFile(xVSS_context);
-
-                    if( err != M4NO_ERROR )
-                    {
-                        /* Fix for blrnxpsw#234---->*/
-                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
-                        {
-                            err = M4xVSSERR_NO_MORE_SPACE;
-                        }
-                        M4OSA_TRACE1_1("M4xVSS_CloseCommand: \
-                                M4xVSS_internalCloseAudioMixedFile returned an error: 0x%x", err);
-                        /* we are retaining error here and returning error  in the end of
-                        the function  as to aviod memory leak*/
-                        //return err;
-                        /* <----Fix for blrnxpsw#234*/
-                    }
-                }
-                err = M4xVSS_internalFreeSaving(xVSS_context);
-                /* We free this pointer only if a BGM track is present, because in that case,
-                this pointer owns to us */
-                if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL ) {
-                    /*if(M4OSA_NULL != xVSS_context->pSettings->pOutputFile)
-                    {
-                    free(xVSS_context->pSettings->pOutputFile);
-                    xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
-                    }*/
-                    /*if(M4OSA_NULL != xVSS_context->pSettings->pTemporaryFile)
-                    {
-                    free(xVSS_context->pSettings->pTemporaryFile);
-                    xVSS_context->pSettings->pTemporaryFile = M4OSA_NULL;
-                    }*/
-                }
-            }
-            break;
-
-        case M4xVSS_kStateSaved:
-            break;
-
-        case M4xVSS_kStateAnalyzing:
-            {
-                if( xVSS_context->analyseStep == M4xVSS_kMicroStateConvertPto3GPP )
-                {
-                    /* Free Pto3GPP module */
-                    err = M4xVSS_internalStopConvertPictureTo3gp(xVSS_context);
-                    /* Fix for blrnxpsw#234---->*/
-                    if( err != M4NO_ERROR )
-                    {
-                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
-                        {
-                            err = M4xVSSERR_NO_MORE_SPACE;
-                        }
-                        M4OSA_TRACE1_1("M4xVSS_Step: \
-                                       M4xVSS_internalStopConvertPictureTo3gp returned 0x%x", err);
-                        /* we are retaining error here and returning error  in the end of the
-                        function  as to aviod memory leak*/
-                        //return err;
-                    }
-                    /* <-----Fix for blrnxpsw#234>*/
-                }
-                else if( xVSS_context->analyseStep
-                    == M4xVSS_kMicroStateTranscodeMCS )
-                {
-                    /* Free MCS module */
-                    err = M4MCS_abort(xVSS_context->pMCS_Ctxt);
-                    /* Fix for blrnxpsw#234---->*/
-                    if( err != M4NO_ERROR )
-                    {
-                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
-                        {
-                            err = M4xVSSERR_NO_MORE_SPACE;
-                        }
-                        M4OSA_TRACE1_1("M4xVSS_Step: M4MCS_abort returned 0x%x",
-                            err);
-                        /* we are retaining error here and returning error  in the end of the
-                        function  as to aviod memory leak*/
-                        //return err;
-                    }
-                    /* <---Fix for blrnxpsw#234*/
-                }
-            }
-            break;
-
-        default:
-            M4OSA_TRACE1_1(
-                "Bad state when calling M4xVSS_CloseCommand function! State is %d",
-                xVSS_context->m_state);
-            return M4ERR_STATE;
-    }
-
-    /* Free Send command */
-    M4xVSS_freeCommand(xVSS_context);
-
-    xVSS_context->m_state = M4xVSS_kStateInitialized; /* Change xVSS state */
-
-    return err;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_CleanUp(M4OSA_Context pContext)
- * @brief        This function deletes all xVSS ressources
- * @note        This function must be called after M4xVSS_CloseCommand.
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_CleanUp( M4OSA_Context pContext )
-{
-    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
-    M4OSA_TRACE3_0("M4xVSS_CleanUp:entering");
-
-    /* Check state */
-    if( xVSS_context->m_state != M4xVSS_kStateInitialized )
-    {
-        M4OSA_TRACE1_1(\
-            "Bad state when calling M4xVSS_CleanUp function! State is %d",\
-            xVSS_context->m_state);
-        return M4ERR_STATE;
-    }
-
-    /**
-    * UTF conversion: free temporary buffer*/
-    if( xVSS_context->UTFConversionContext.pTempOutConversionBuffer
-        != M4OSA_NULL )
-    {
-        free(xVSS_context->
-            UTFConversionContext.pTempOutConversionBuffer);
-        xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
-            M4OSA_NULL;
-    }
-
-    free(xVSS_context->pTempPath);
-    xVSS_context->pTempPath = M4OSA_NULL;
-
-    free(xVSS_context->pSettings);
-    xVSS_context->pSettings = M4OSA_NULL;
-
-    free(xVSS_context);
-    xVSS_context = M4OSA_NULL;
-    M4OSA_TRACE3_0("M4xVSS_CleanUp:leaving ");
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_GetVersion(M4_VersionInfo *pVersion)
- * @brief        This function get the version of the Video Studio 2.1
- *
- * @param    pVersion            (IN) Pointer on the version info struct
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_GetVersion( M4_VersionInfo *pVersion )
-{
-    /* Just used for a grep in code */
-    /* CHANGE_VERSION_HERE */
-    static const M4OSA_Char cVersion[26] = "NXPSW_VideoStudio21_1_3_0";
-
-    if( M4OSA_NULL == pVersion )
-    {
-        return M4ERR_PARAMETER;
-    }
-
-    pVersion->m_major = M4_xVSS_MAJOR;
-    pVersion->m_minor = M4_xVSS_MINOR;
-    pVersion->m_revision = M4_xVSS_REVISION;
-    pVersion->m_structSize = sizeof(M4_VersionInfo);
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_CreateClipSettings()
- * @brief    Allows filling a clip settings structure with default values
- *
- * @note    WARNING: pClipSettings->Effects[ ] will be allocated in this function.
- *                   pClipSettings->pFile      will be allocated in this function.
- *
- * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param   pFile               (IN) Clip file name
- * @param   filePathSize        (IN) Size of the clip path (needed for the UTF16 conversion)
- * @param    nbEffects           (IN) Nb of effect settings to allocate
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_CreateClipSettings( M4VSS3GPP_ClipSettings *pClipSettings,
-                                    M4OSA_Void *pFile, M4OSA_UInt32 filePathSize,
-                                     M4OSA_UInt8 nbEffects )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4OSA_TRACE3_1("M4xVSS_CreateClipSettings called with pClipSettings=0x%p",
-        pClipSettings);
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
-        "M4xVSS_CreateClipSettings: pClipSettings is NULL");
-
-    /* Create inherited VSS3GPP stuff */
-    /*err = M4VSS3GPP_editCreateClipSettings(pClipSettings, pFile,nbEffects);*/
-    /*FB: add clip path size (needed for UTF 16 conversion)*/
-    err = M4VSS3GPP_editCreateClipSettings(pClipSettings, pFile, filePathSize,
-        nbEffects);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1("M4xVSS_CreateClipSettings :\
-                       ERROR in M4VSS3GPP_editCreateClipSettings = 0x%x", err);
-        return err;
-    }
-
-    /* Set the clip settings to default */
-    pClipSettings->xVSS.uiBeginCutPercent = 0;
-    pClipSettings->xVSS.uiEndCutPercent = 0;
-    pClipSettings->xVSS.uiDuration = 0;
-    pClipSettings->xVSS.isPanZoom = M4OSA_FALSE;
-    pClipSettings->xVSS.PanZoomTopleftXa = 0;
-    pClipSettings->xVSS.PanZoomTopleftYa = 0;
-    pClipSettings->xVSS.PanZoomTopleftXb = 0;
-    pClipSettings->xVSS.PanZoomTopleftYb = 0;
-    pClipSettings->xVSS.PanZoomXa = 0;
-    pClipSettings->xVSS.PanZoomXb = 0;
-
-    /**
-    * Return with no error */
-    M4OSA_TRACE3_0("M4xVSS_CreateClipSettings(): returning M4NO_ERROR");
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_DuplicateClipSettings()
- * @brief    Duplicates a clip settings structure, performing allocations if required
- *
- * @param    pClipSettingsDest    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param    pClipSettingsOrig    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @param   bCopyEffects        (IN) Flag to know if we have to duplicate effects
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_DuplicateClipSettings( M4VSS3GPP_ClipSettings
-                                       *pClipSettingsDest,
-                                       M4VSS3GPP_ClipSettings *pClipSettingsOrig,
-                                        M4OSA_Bool bCopyEffects )
-{
-    M4OSA_ERR err = M4NO_ERROR;
-
-    M4OSA_TRACE3_2(
-        "M4xVSS_DuplicateClipSettings called with dest=0x%p src=0x%p",
-        pClipSettingsDest, pClipSettingsOrig);
-
-    /* Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsDest), M4ERR_PARAMETER,
-        "M4xVSS_DuplicateClipSettings: pClipSettingsDest is NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsOrig), M4ERR_PARAMETER,
-        "M4xVSS_DuplicateClipSettings: pClipSettingsOrig is NULL");
-
-    /* Call inherited VSS3GPP duplication */
-    err = M4VSS3GPP_editDuplicateClipSettings(pClipSettingsDest,
-        pClipSettingsOrig, bCopyEffects);
-
-    if( M4NO_ERROR != err )
-    {
-        M4OSA_TRACE1_1("M4xVSS_CreateClipSettings :\
-                       ERROR in M4VSS3GPP_editDuplicateClipSettings = 0x%x", err);
-        return err;
-    }
-
-    /* Return with no error */
-    M4OSA_TRACE3_0("M4xVSS_DuplicateClipSettings(): returning M4NO_ERROR");
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_FreeClipSettings()
- * @brief    Free the pointers allocated in the ClipSetting structure (pFile, Effects, ...).
- *
- * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_FreeClipSettings( M4VSS3GPP_ClipSettings *pClipSettings )
-{
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
-        "M4xVSS_FreeClipSettings: pClipSettings is NULL");
-
-    /* Free inherited VSS3GPP stuff */
-    M4VSS3GPP_editFreeClipSettings(pClipSettings);
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_getMCSContext(M4OSA_Context pContext, M4OSA_Context* mcsContext)
- * @brief        This function returns the MCS context within the xVSS internal context
- * @note        This function must be called only after VSS state has moved to analyzing state or
- * beyond
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @param    mcsContext        (OUT) Pointer to pointer of mcs context to return
- * @return    M4NO_ERROR:        No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_getMCSContext( M4OSA_Context pContext,
-                               M4OSA_Context *mcsContext )
-{
-    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4xVSS_getMCSContext: pContext is NULL");
-
-    if( xVSS_context->m_state == M4xVSS_kStateInitialized )
-    {
-        M4OSA_TRACE1_1("M4xVSS_getMCSContext: Bad state! State is %d",\
-            xVSS_context->m_state);
-        return M4ERR_STATE;
-    }
-
-    *mcsContext = xVSS_context->pMCS_Ctxt;
-
-    return err;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_getVSS3GPPContext(M4OSA_Context pContext,
- *                                                   M4OSA_Context* mcsContext)
- * @brief        This function returns the VSS3GPP context within the xVSS internal context
- * @note        This function must be called only after VSS state has moved to Generating preview
- *              or beyond
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @param    vss3gppContext        (OUT) Pointer to pointer of vss3gpp context to return
- * @return    M4NO_ERROR:        No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_STATE:        This function cannot not be called at this time
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_getVSS3GPPContext( M4OSA_Context pContext,
-                                   M4OSA_Context *vss3gppContext )
-{
-    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    /**
-    *    Check input parameter */
-    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
-        "M4xVSS_getVSS3GPPContext: pContext is NULL");
-
-    if( xVSS_context->m_state < M4xVSS_kStateSaving )
-    {
-        M4OSA_TRACE1_1("M4xVSS_getVSS3GPPContext: Bad state! State is %d",\
-            xVSS_context->m_state);
-        return M4ERR_STATE;
-    }
-
-    *vss3gppContext = xVSS_context->pCurrentEditContext;
-
-    return err;
-}
-
-M4OSA_ERR M4xVSS_getVideoDecoderCapabilities(M4DECODER_VideoDecoders **decoders) {
-    M4OSA_ERR err = M4NO_ERROR;
-
-    // Call the decoder api directly
-    // to get all the video decoder capablities.
-    err = VideoEditorVideoDecoder_getVideoDecodersAndCapabilities(decoders);
-    return err;
-}
diff --git a/libvideoeditor/vss/src/M4xVSS_internal.c b/libvideoeditor/vss/src/M4xVSS_internal.c
deleted file mode 100755
index 84959ec..0000000
--- a/libvideoeditor/vss/src/M4xVSS_internal.c
+++ /dev/null
@@ -1,4889 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file    M4xVSS_internal.c
- * @brief    Internal functions of extended Video Studio Service (Video Studio 2.1)
- * @note
- ******************************************************************************
- */
-#include "M4OSA_Debug.h"
-#include "M4OSA_CharStar.h"
-
-#include "NXPSW_CompilerSwitches.h"
-
-#include "M4VSS3GPP_API.h"
-#include "M4VSS3GPP_ErrorCodes.h"
-
-#include "M4xVSS_API.h"
-#include "M4xVSS_Internal.h"
-
-/*for rgb16 color effect*/
-#include "M4VIFI_Defines.h"
-#include "M4VIFI_Clip.h"
-
-/**
- * component includes */
-#include "M4VFL_transition.h"            /**< video effects */
-
-/* Internal header file of VSS is included because of MMS use case */
-#include "M4VSS3GPP_InternalTypes.h"
-
-/*Exif header files to add image rendering support (cropping, black borders)*/
-#include "M4EXIFC_CommonAPI.h"
-// StageFright encoders require %16 resolution
-#include "M4ENCODER_common.h"
-
-#define TRANSPARENT_COLOR 0x7E0
-
-/* Prototype of M4VIFI_xVSS_RGB565toYUV420 function (avoid green effect of transparency color) */
-M4VIFI_UInt8 M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
-                                        M4VIFI_ImagePlane *pPlaneOut);
-
-
-/*special MCS function used only in VideoArtist and VideoStudio to open the media in the normal
- mode. That way the media duration is accurate*/
-extern M4OSA_ERR M4MCS_open_normalMode(M4MCS_Context pContext, M4OSA_Void* pFileIn,
-                                         M4VIDEOEDITING_FileType InputFileType,
-                                         M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
-
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext)
- * @brief        This function initializes MCS (3GP transcoder) with the given
- *                parameters
- * @note        The transcoding parameters are given by the internal xVSS context.
- *                This context contains a pointer on the current element of the
- *                chained list of MCS parameters.
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext,
-                                          M4OSA_UInt32 *rotationDegree)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_ERR err;
-    M4MCS_Context mcs_context;
-    M4MCS_OutputParams Params;
-    M4MCS_EncodingParams Rates;
-    M4OSA_UInt32 i;
-    M4VIDEOEDITING_ClipProperties clipProps;
-
-    err = M4MCS_init(&mcs_context, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("Error in M4MCS_init: 0x%x", err);
-        return err;
-    }
-
-    err = M4MCS_open(mcs_context, xVSS_context->pMCScurrentParams->pFileIn,
-         xVSS_context->pMCScurrentParams->InputFileType,
-             xVSS_context->pMCScurrentParams->pFileOut,
-             xVSS_context->pMCScurrentParams->pFileTemp);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("Error in M4MCS_open: 0x%x", err);
-        M4MCS_abort(mcs_context);
-        return err;
-    }
-
-    /** Get the clip properties
-     */
-    err = M4MCS_getInputFileProperties(mcs_context, &clipProps);
-    if (err != M4NO_ERROR) {
-        M4OSA_TRACE1_1("Error in M4MCS_getInputFileProperties: 0x%x", err);
-        M4MCS_abort(mcs_context);
-        return err;
-    }
-    *rotationDegree = clipProps.videoRotationDegrees;
-
-    /**
-     * Fill MCS parameters with the parameters contained in the current element of the
-       MCS parameters chained list */
-    Params.OutputFileType = xVSS_context->pMCScurrentParams->OutputFileType;
-    Params.OutputVideoFormat = xVSS_context->pMCScurrentParams->OutputVideoFormat;
-    Params.outputVideoProfile= xVSS_context->pMCScurrentParams->outputVideoProfile;
-    Params.outputVideoLevel = xVSS_context->pMCScurrentParams->outputVideoLevel;
-    Params.OutputVideoFrameSize = xVSS_context->pMCScurrentParams->OutputVideoFrameSize;
-    Params.OutputVideoFrameRate = xVSS_context->pMCScurrentParams->OutputVideoFrameRate;
-    Params.OutputAudioFormat = xVSS_context->pMCScurrentParams->OutputAudioFormat;
-    Params.OutputAudioSamplingFrequency =
-         xVSS_context->pMCScurrentParams->OutputAudioSamplingFrequency;
-    Params.bAudioMono = xVSS_context->pMCScurrentParams->bAudioMono;
-    Params.pOutputPCMfile = M4OSA_NULL;
-    /*FB 2008/10/20: add media rendering parameter to keep aspect ratio*/
-    switch(xVSS_context->pMCScurrentParams->MediaRendering)
-    {
-    case M4xVSS_kResizing:
-        Params.MediaRendering = M4MCS_kResizing;
-        break;
-    case M4xVSS_kCropping:
-        Params.MediaRendering = M4MCS_kCropping;
-        break;
-    case M4xVSS_kBlackBorders:
-        Params.MediaRendering = M4MCS_kBlackBorders;
-        break;
-    default:
-        break;
-    }
-    /**/
-    // new params after integrating MCS 2.0
-    // Set the number of audio effects; 0 for now.
-    Params.nbEffects = 0;
-
-    // Set the audio effect; null for now.
-    Params.pEffects = NULL;
-
-    // Set the audio effect; null for now.
-    Params.bDiscardExif = M4OSA_FALSE;
-
-    // Set the audio effect; null for now.
-    Params.bAdjustOrientation = M4OSA_FALSE;
-    // new params after integrating MCS 2.0
-
-    /**
-     * Set output parameters */
-    err = M4MCS_setOutputParams(mcs_context, &Params);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("Error in M4MCS_setOutputParams: 0x%x", err);
-        M4MCS_abort(mcs_context);
-        return err;
-    }
-
-    Rates.OutputVideoBitrate = xVSS_context->pMCScurrentParams->OutputVideoBitrate;
-    Rates.OutputAudioBitrate = xVSS_context->pMCScurrentParams->OutputAudioBitrate;
-    Rates.BeginCutTime = 0;
-    Rates.EndCutTime = 0;
-    Rates.OutputFileSize = 0;
-
-    /*FB: transcoding per parts*/
-    Rates.BeginCutTime = xVSS_context->pMCScurrentParams->BeginCutTime;
-    Rates.EndCutTime = xVSS_context->pMCScurrentParams->EndCutTime;
-    Rates.OutputVideoTimescale = xVSS_context->pMCScurrentParams->OutputVideoTimescale;
-
-    err = M4MCS_setEncodingParams(mcs_context, &Rates);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("Error in M4MCS_setEncodingParams: 0x%x", err);
-        M4MCS_abort(mcs_context);
-        return err;
-    }
-
-    err = M4MCS_checkParamsAndStart(mcs_context);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("Error in M4MCS_checkParamsAndStart: 0x%x", err);
-        M4MCS_abort(mcs_context);
-        return err;
-    }
-
-    /**
-     * Save MCS context to be able to call MCS step function in M4xVSS_step function */
-    xVSS_context->pMCS_Ctxt = mcs_context;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext)
- * @brief        This function cleans up MCS (3GP transcoder)
- * @note
- *
- * @param    pContext            (IN) Pointer on the xVSS edit context
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
- * @return    M4ERR_ALLOC:        Memory allocation has failed
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_ERR err;
-
-    err = M4MCS_close(xVSS_context->pMCS_Ctxt);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalStopTranscoding: Error in M4MCS_close: 0x%x", err);
-        M4MCS_abort(xVSS_context->pMCS_Ctxt);
-        return err;
-    }
-
-    /**
-     * Free this MCS instance */
-    err = M4MCS_cleanUp(xVSS_context->pMCS_Ctxt);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalStopTranscoding: Error in M4MCS_cleanUp: 0x%x", err);
-        return err;
-    }
-
-    xVSS_context->pMCS_Ctxt = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
- *                                             M4OSA_FileReadPointer* pFileReadPtr,
- *                                                M4VIFI_ImagePlane* pImagePlanes,
- *                                                 M4OSA_UInt32 width,
- *                                                M4OSA_UInt32 height);
- * @brief    It Coverts and resizes a ARGB8888 image to YUV420
- * @note
- * @param    pFileIn            (IN) The Image input file
- * @param    pFileReadPtr    (IN) Pointer on filesystem functions
- * @param    pImagePlanes    (IN/OUT) Pointer on YUV420 output planes allocated by the user
- *                            ARGB8888 image  will be converted and resized  to output
- *                             YUV420 plane size
- *@param    width        (IN) width of the ARGB8888
- *@param    height            (IN) height of the ARGB8888
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_ALLOC: memory error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-
-M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
-                                                          M4OSA_FileReadPointer* pFileReadPtr,
-                                                          M4VIFI_ImagePlane* pImagePlanes,
-                                                          M4OSA_UInt32 width,M4OSA_UInt32 height)
-{
-    M4OSA_Context pARGBIn;
-    M4VIFI_ImagePlane rgbPlane1 ,rgbPlane2;
-    M4OSA_UInt32 frameSize_argb=(width * height * 4);
-    M4OSA_UInt32 frameSize = (width * height * 3); //Size of RGB888 data.
-    M4OSA_UInt32 i = 0,j= 0;
-    M4OSA_ERR err=M4NO_ERROR;
-
-
-    M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_32bitAlignedMalloc(frameSize_argb,
-         M4VS, (M4OSA_Char*)"Image argb data");
-        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Entering :");
-    if(pTmpData == M4OSA_NULL) {
-        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
-            Failed to allocate memory for Image clip");
-        return M4ERR_ALLOC;
-    }
-
-    M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :width and height %d %d",
-        width ,height);
-    /* Get file size (mandatory for chunk decoding) */
-    err = pFileReadPtr->openRead(&pARGBIn, pFileIn, M4OSA_kFileRead);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
-            Can't open input ARGB8888 file %s, error: 0x%x\n",pFileIn, err);
-        free(pTmpData);
-        pTmpData = M4OSA_NULL;
-        goto cleanup;
-    }
-
-    err = pFileReadPtr->readData(pARGBIn,(M4OSA_MemAddr8)pTmpData, &frameSize_argb);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Can't close ARGB8888\
-             file %s, error: 0x%x\n",pFileIn, err);
-        pFileReadPtr->closeRead(pARGBIn);
-        free(pTmpData);
-        pTmpData = M4OSA_NULL;
-        goto cleanup;
-    }
-
-    err = pFileReadPtr->closeRead(pARGBIn);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Can't close ARGB8888 \
-             file %s, error: 0x%x\n",pFileIn, err);
-        free(pTmpData);
-        pTmpData = M4OSA_NULL;
-        goto cleanup;
-    }
-
-    rgbPlane1.pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize, M4VS,
-         (M4OSA_Char*)"Image clip RGB888 data");
-    if(rgbPlane1.pac_data == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 \
-            Failed to allocate memory for Image clip");
-        free(pTmpData);
-        return M4ERR_ALLOC;
-    }
-
-        rgbPlane1.u_height = height;
-        rgbPlane1.u_width = width;
-        rgbPlane1.u_stride = width*3;
-        rgbPlane1.u_topleft = 0;
-
-
-    /** Remove the alpha channel */
-    for (i=0, j = 0; i < frameSize_argb; i++) {
-        if ((i % 4) == 0) continue;
-        rgbPlane1.pac_data[j] = pTmpData[i];
-        j++;
-    }
-        free(pTmpData);
-
-    /* To Check if resizing is required with color conversion */
-    if(width != pImagePlanes->u_width || height != pImagePlanes->u_height)
-    {
-        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Resizing :");
-        frameSize =  ( pImagePlanes->u_width * pImagePlanes->u_height * 3);
-        rgbPlane2.pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize, M4VS,
-             (M4OSA_Char*)"Image clip RGB888 data");
-        if(rgbPlane2.pac_data == M4OSA_NULL)
-        {
-            M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
-            free(pTmpData);
-            return M4ERR_ALLOC;
-        }
-            rgbPlane2.u_height =  pImagePlanes->u_height;
-            rgbPlane2.u_width = pImagePlanes->u_width;
-            rgbPlane2.u_stride = pImagePlanes->u_width*3;
-            rgbPlane2.u_topleft = 0;
-
-        /* Resizing RGB888 to RGB888 */
-        err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL, &rgbPlane1, &rgbPlane2);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("error when converting from Resize RGB888 to RGB888: 0x%x\n", err);
-            free(rgbPlane2.pac_data);
-            free(rgbPlane1.pac_data);
-            return err;
-        }
-        /*Converting Resized RGB888 to YUV420 */
-        err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane2, pImagePlanes);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("error when converting from RGB888 to YUV: 0x%x\n", err);
-            free(rgbPlane2.pac_data);
-            free(rgbPlane1.pac_data);
-            return err;
-        }
-            free(rgbPlane2.pac_data);
-            free(rgbPlane1.pac_data);
-
-            M4OSA_TRACE1_0("RGB to YUV done");
-
-
-    }
-    else
-    {
-        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 NO  Resizing :");
-        err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane1, pImagePlanes);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("error when converting from RGB to YUV: 0x%x\n", err);
-        }
-            free(rgbPlane1.pac_data);
-
-            M4OSA_TRACE1_0("RGB to YUV done");
-    }
-cleanup:
-    M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 leaving :");
-    return err;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
- *                                             M4OSA_FileReadPointer* pFileReadPtr,
- *                                                M4VIFI_ImagePlane* pImagePlanes,
- *                                                 M4OSA_UInt32 width,
- *                                                M4OSA_UInt32 height);
- * @brief    It Coverts a ARGB8888 image to YUV420
- * @note
- * @param    pFileIn            (IN) The Image input file
- * @param    pFileReadPtr    (IN) Pointer on filesystem functions
- * @param    pImagePlanes    (IN/OUT) Pointer on YUV420 output planes allocated by the user
- *                            ARGB8888 image  will be converted and resized  to output
- *                            YUV420 plane size
- * @param    width        (IN) width of the ARGB8888
- * @param    height            (IN) height of the ARGB8888
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_ALLOC: memory error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-
-M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
-                                                 M4OSA_FileReadPointer* pFileReadPtr,
-                                                 M4VIFI_ImagePlane** pImagePlanes,
-                                                 M4OSA_UInt32 width,M4OSA_UInt32 height)
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4VIFI_ImagePlane *yuvPlane = M4OSA_NULL;
-
-    yuvPlane = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane),
-                M4VS, (M4OSA_Char*)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
-    if(yuvPlane == M4OSA_NULL) {
-        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
-            Failed to allocate memory for Image clip");
-        return M4ERR_ALLOC;
-    }
-    yuvPlane[0].u_height = height;
-    yuvPlane[0].u_width = width;
-    yuvPlane[0].u_stride = width;
-    yuvPlane[0].u_topleft = 0;
-    yuvPlane[0].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(yuvPlane[0].u_height \
-        * yuvPlane[0].u_width * 1.5, M4VS, (M4OSA_Char*)"imageClip YUV data");
-
-    yuvPlane[1].u_height = yuvPlane[0].u_height >>1;
-    yuvPlane[1].u_width = yuvPlane[0].u_width >> 1;
-    yuvPlane[1].u_stride = yuvPlane[1].u_width;
-    yuvPlane[1].u_topleft = 0;
-    yuvPlane[1].pac_data = (M4VIFI_UInt8*)(yuvPlane[0].pac_data + yuvPlane[0].u_height \
-        * yuvPlane[0].u_width);
-
-    yuvPlane[2].u_height = yuvPlane[0].u_height >>1;
-    yuvPlane[2].u_width = yuvPlane[0].u_width >> 1;
-    yuvPlane[2].u_stride = yuvPlane[2].u_width;
-    yuvPlane[2].u_topleft = 0;
-    yuvPlane[2].pac_data = (M4VIFI_UInt8*)(yuvPlane[1].pac_data + yuvPlane[1].u_height \
-        * yuvPlane[1].u_width);
-    err = M4xVSS_internalConvertAndResizeARGB8888toYUV420( pFileIn,pFileReadPtr,
-                                                          yuvPlane, width, height);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalConvertAndResizeARGB8888toYUV420 return error: 0x%x\n", err);
-        free(yuvPlane);
-        return err;
-    }
-
-        *pImagePlanes = yuvPlane;
-
-    M4OSA_TRACE1_0("M4xVSS_internalConvertARGB8888toYUV420 :Leaving");
-    return err;
-
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_PictureCallbackFct (M4OSA_Void* pPictureCtxt,
- *                                        M4VIFI_ImagePlane* pImagePlanes,
- *                                        M4OSA_UInt32* pPictureDuration);
- * @brief    It feeds the PTO3GPP with YUV420 pictures.
- * @note    This function is given to the PTO3GPP in the M4PTO3GPP_Params structure
- * @param    pContext    (IN) The integrator own context
- * @param    pImagePlanes(IN/OUT) Pointer to an array of three valid image planes
- * @param    pPictureDuration(OUT) Duration of the returned picture
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_PictureCallbackFct(M4OSA_Void* pPictureCtxt, M4VIFI_ImagePlane* pImagePlanes,
-                                     M4OSA_Double* pPictureDuration)
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt8    last_frame_flag = 0;
-    M4xVSS_PictureCallbackCtxt* pC = (M4xVSS_PictureCallbackCtxt*) (pPictureCtxt);
-
-    /*Used for pan&zoom*/
-    M4OSA_UInt8 tempPanzoomXa = 0;
-    M4OSA_UInt8 tempPanzoomXb = 0;
-    M4AIR_Params Params;
-    /**/
-
-    /*Used for cropping and black borders*/
-    M4OSA_Context    pPictureContext = M4OSA_NULL;
-    M4OSA_FilePosition    pictureSize = 0 ;
-    M4OSA_UInt8*    pictureBuffer = M4OSA_NULL;
-    //M4EXIFC_Context pExifContext = M4OSA_NULL;
-    M4EXIFC_BasicTags pBasicTags;
-    M4VIFI_ImagePlane pImagePlanes1 = pImagePlanes[0];
-    M4VIFI_ImagePlane pImagePlanes2 = pImagePlanes[1];
-    M4VIFI_ImagePlane pImagePlanes3 = pImagePlanes[2];
-    /**/
-
-    /**
-     * Check input parameters */
-    M4OSA_DEBUG_IF2((M4OSA_NULL==pPictureCtxt),        M4ERR_PARAMETER,
-         "M4xVSS_PictureCallbackFct: pPictureCtxt is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL==pImagePlanes),        M4ERR_PARAMETER,
-         "M4xVSS_PictureCallbackFct: pImagePlanes is M4OSA_NULL");
-    M4OSA_DEBUG_IF2((M4OSA_NULL==pPictureDuration), M4ERR_PARAMETER,
-         "M4xVSS_PictureCallbackFct: pPictureDuration is M4OSA_NULL");
-    M4OSA_TRACE1_0("M4xVSS_PictureCallbackFct :Entering");
-    /*PR P4ME00003181 In case the image number is 0, pan&zoom can not be used*/
-    if(M4OSA_TRUE == pC->m_pPto3GPPparams->isPanZoom && pC->m_NbImage == 0)
-    {
-        pC->m_pPto3GPPparams->isPanZoom = M4OSA_FALSE;
-    }
-
-    /*If no cropping/black borders or pan&zoom, just decode and resize the picture*/
-    if(pC->m_mediaRendering == M4xVSS_kResizing && M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
-    {
-        /**
-         * Convert and resize input ARGB8888 file to YUV420 */
-        /*To support ARGB8888 : */
-        M4OSA_TRACE1_2("M4xVSS_PictureCallbackFct 1: width and heght %d %d",
-            pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
-        err = M4xVSS_internalConvertAndResizeARGB8888toYUV420(pC->m_FileIn,
-             pC->m_pFileReadPtr, pImagePlanes,pC->m_pPto3GPPparams->width,
-                pC->m_pPto3GPPparams->height);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when decoding JPEG: 0x%x\n", err);
-            return err;
-        }
-    }
-    /*In case of cropping, black borders or pan&zoom, call the EXIF reader and the AIR*/
-    else
-    {
-        /**
-         * Computes ratios */
-        if(pC->m_pDecodedPlane == M4OSA_NULL)
-        {
-            /**
-             * Convert input ARGB8888 file to YUV420 */
-             M4OSA_TRACE1_2("M4xVSS_PictureCallbackFct 2: width and heght %d %d",
-                pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
-            err = M4xVSS_internalConvertARGB8888toYUV420(pC->m_FileIn, pC->m_pFileReadPtr,
-                &(pC->m_pDecodedPlane),pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
-            if(err != M4NO_ERROR)
-            {
-                M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when decoding JPEG: 0x%x\n", err);
-                if(pC->m_pDecodedPlane != M4OSA_NULL)
-                {
-                    /* YUV420 planar is returned but allocation is made only once
-                        (contigous planes in memory) */
-                    if(pC->m_pDecodedPlane->pac_data != M4OSA_NULL)
-                    {
-                        free(pC->m_pDecodedPlane->pac_data);
-                    }
-                    free(pC->m_pDecodedPlane);
-                    pC->m_pDecodedPlane = M4OSA_NULL;
-                }
-                return err;
-            }
-        }
-
-        /*Initialize AIR Params*/
-        Params.m_inputCoord.m_x = 0;
-        Params.m_inputCoord.m_y = 0;
-        Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
-        Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
-        Params.m_outputSize.m_width = pImagePlanes->u_width;
-        Params.m_outputSize.m_height = pImagePlanes->u_height;
-        Params.m_bOutputStripe = M4OSA_FALSE;
-        Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
-
-        /*Initialize Exif params structure*/
-        pBasicTags.orientation = M4COMMON_kOrientationUnknown;
-
-        /**
-        Pan&zoom params*/
-        if(M4OSA_TRUE == pC->m_pPto3GPPparams->isPanZoom)
-        {
-            /*Save ratio values, they can be reused if the new ratios are 0*/
-            tempPanzoomXa = (M4OSA_UInt8)pC->m_pPto3GPPparams->PanZoomXa;
-            tempPanzoomXb = (M4OSA_UInt8)pC->m_pPto3GPPparams->PanZoomXb;
-            /*Check that the ratio is not 0*/
-            /*Check (a) parameters*/
-            if(pC->m_pPto3GPPparams->PanZoomXa == 0)
-            {
-                M4OSA_UInt8 maxRatio = 0;
-                if(pC->m_pPto3GPPparams->PanZoomTopleftXa >=
-                     pC->m_pPto3GPPparams->PanZoomTopleftYa)
-                {
-                    /*The ratio is 0, that means the area of the picture defined with (a)
-                    parameters is bigger than the image size*/
-                    if(pC->m_pPto3GPPparams->PanZoomTopleftXa + tempPanzoomXa > 1000)
-                    {
-                        /*The oversize is maxRatio*/
-                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftXa + tempPanzoomXa - 1000;
-                    }
-                }
-                else
-                {
-                    /*The ratio is 0, that means the area of the picture defined with (a)
-                     parameters is bigger than the image size*/
-                    if(pC->m_pPto3GPPparams->PanZoomTopleftYa + tempPanzoomXa > 1000)
-                    {
-                        /*The oversize is maxRatio*/
-                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftYa + tempPanzoomXa - 1000;
-                    }
-                }
-                /*Modify the (a) parameters:*/
-                if(pC->m_pPto3GPPparams->PanZoomTopleftXa >= maxRatio)
-                {
-                    /*The (a) topleft parameters can be moved to keep the same area size*/
-                    pC->m_pPto3GPPparams->PanZoomTopleftXa -= maxRatio;
-                }
-                else
-                {
-                    /*Move the (a) topleft parameter to 0 but the ratio will be also further
-                    modified to match the image size*/
-                    pC->m_pPto3GPPparams->PanZoomTopleftXa = 0;
-                }
-                if(pC->m_pPto3GPPparams->PanZoomTopleftYa >= maxRatio)
-                {
-                    /*The (a) topleft parameters can be moved to keep the same area size*/
-                    pC->m_pPto3GPPparams->PanZoomTopleftYa -= maxRatio;
-                }
-                else
-                {
-                    /*Move the (a) topleft parameter to 0 but the ratio will be also further
-                     modified to match the image size*/
-                    pC->m_pPto3GPPparams->PanZoomTopleftYa = 0;
-                }
-                /*The new ratio is the original one*/
-                pC->m_pPto3GPPparams->PanZoomXa = tempPanzoomXa;
-                if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftXa > 1000)
-                {
-                    /*Change the ratio if the area of the picture defined with (a) parameters is
-                    bigger than the image size*/
-                    pC->m_pPto3GPPparams->PanZoomXa = 1000 - pC->m_pPto3GPPparams->PanZoomTopleftXa;
-                }
-                if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftYa > 1000)
-                {
-                    /*Change the ratio if the area of the picture defined with (a) parameters is
-                    bigger than the image size*/
-                    pC->m_pPto3GPPparams->PanZoomXa = 1000 - pC->m_pPto3GPPparams->PanZoomTopleftYa;
-                }
-            }
-            /*Check (b) parameters*/
-            if(pC->m_pPto3GPPparams->PanZoomXb == 0)
-            {
-                M4OSA_UInt8 maxRatio = 0;
-                if(pC->m_pPto3GPPparams->PanZoomTopleftXb >=
-                     pC->m_pPto3GPPparams->PanZoomTopleftYb)
-                {
-                    /*The ratio is 0, that means the area of the picture defined with (b)
-                     parameters is bigger than the image size*/
-                    if(pC->m_pPto3GPPparams->PanZoomTopleftXb + tempPanzoomXb > 1000)
-                    {
-                        /*The oversize is maxRatio*/
-                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftXb + tempPanzoomXb - 1000;
-                    }
-                }
-                else
-                {
-                    /*The ratio is 0, that means the area of the picture defined with (b)
-                     parameters is bigger than the image size*/
-                    if(pC->m_pPto3GPPparams->PanZoomTopleftYb + tempPanzoomXb > 1000)
-                    {
-                        /*The oversize is maxRatio*/
-                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftYb + tempPanzoomXb - 1000;
-                    }
-                }
-                /*Modify the (b) parameters:*/
-                if(pC->m_pPto3GPPparams->PanZoomTopleftXb >= maxRatio)
-                {
-                    /*The (b) topleft parameters can be moved to keep the same area size*/
-                    pC->m_pPto3GPPparams->PanZoomTopleftXb -= maxRatio;
-                }
-                else
-                {
-                    /*Move the (b) topleft parameter to 0 but the ratio will be also further
-                     modified to match the image size*/
-                    pC->m_pPto3GPPparams->PanZoomTopleftXb = 0;
-                }
-                if(pC->m_pPto3GPPparams->PanZoomTopleftYb >= maxRatio)
-                {
-                    /*The (b) topleft parameters can be moved to keep the same area size*/
-                    pC->m_pPto3GPPparams->PanZoomTopleftYb -= maxRatio;
-                }
-                else
-                {
-                    /*Move the (b) topleft parameter to 0 but the ratio will be also further
-                    modified to match the image size*/
-                    pC->m_pPto3GPPparams->PanZoomTopleftYb = 0;
-                }
-                /*The new ratio is the original one*/
-                pC->m_pPto3GPPparams->PanZoomXb = tempPanzoomXb;
-                if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftXb > 1000)
-                {
-                    /*Change the ratio if the area of the picture defined with (b) parameters is
-                    bigger than the image size*/
-                    pC->m_pPto3GPPparams->PanZoomXb = 1000 - pC->m_pPto3GPPparams->PanZoomTopleftXb;
-                }
-                if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftYb > 1000)
-                {
-                    /*Change the ratio if the area of the picture defined with (b) parameters is
-                    bigger than the image size*/
-                    pC->m_pPto3GPPparams->PanZoomXb = 1000 - pC->m_pPto3GPPparams->PanZoomTopleftYb;
-                }
-            }
-
-            /**
-             * Computes AIR parameters */
-/*        Params.m_inputCoord.m_x = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_width *
-            (pC->m_pPto3GPPparams->PanZoomTopleftXa +
-            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomTopleftXb \
-                - pC->m_pPto3GPPparams->PanZoomTopleftXa) *
-            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
-        Params.m_inputCoord.m_y = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_height *
-            (pC->m_pPto3GPPparams->PanZoomTopleftYa +
-            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomTopleftYb\
-                 - pC->m_pPto3GPPparams->PanZoomTopleftYa) *
-            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
-
-        Params.m_inputSize.m_width = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_width *
-            (pC->m_pPto3GPPparams->PanZoomXa +
-            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomXb - pC->m_pPto3GPPparams->PanZoomXa) *
-            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
-
-        Params.m_inputSize.m_height =  (M4OSA_UInt32)(pC->m_pDecodedPlane->u_height *
-            (pC->m_pPto3GPPparams->PanZoomXa +
-            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomXb - pC->m_pPto3GPPparams->PanZoomXa) *
-            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
- */
-            // Instead of using pC->m_NbImage we have to use (pC->m_NbImage-1) as pC->m_ImageCounter
-            // will be x-1 max for x no. of frames
-            Params.m_inputCoord.m_x = (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_width *
-                (pC->m_pPto3GPPparams->PanZoomTopleftXa +
-                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomTopleftXb\
-                     - pC->m_pPto3GPPparams->PanZoomTopleftXa) *
-                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage-1)) / 1000));
-            Params.m_inputCoord.m_y =
-                 (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_height *
-                (pC->m_pPto3GPPparams->PanZoomTopleftYa +
-                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomTopleftYb\
-                     - pC->m_pPto3GPPparams->PanZoomTopleftYa) *
-                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage-1)) / 1000));
-
-            Params.m_inputSize.m_width =
-                 (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_width *
-                (pC->m_pPto3GPPparams->PanZoomXa +
-                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomXb\
-                     - pC->m_pPto3GPPparams->PanZoomXa) *
-                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage-1)) / 1000));
-
-            Params.m_inputSize.m_height =
-                 (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_height *
-                (pC->m_pPto3GPPparams->PanZoomXa +
-                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomXb \
-                    - pC->m_pPto3GPPparams->PanZoomXa) *
-                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage-1)) / 1000));
-
-            if((Params.m_inputSize.m_width + Params.m_inputCoord.m_x)\
-                 > pC->m_pDecodedPlane->u_width)
-            {
-                Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width \
-                    - Params.m_inputCoord.m_x;
-            }
-
-            if((Params.m_inputSize.m_height + Params.m_inputCoord.m_y)\
-                 > pC->m_pDecodedPlane->u_height)
-            {
-                Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height\
-                     - Params.m_inputCoord.m_y;
-            }
-
-
-
-            Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
-            Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
-        }
-
-
-
-    /**
-        Picture rendering: Black borders*/
-
-        if(pC->m_mediaRendering == M4xVSS_kBlackBorders)
-        {
-            memset((void *)pImagePlanes[0].pac_data,Y_PLANE_BORDER_VALUE,
-                (pImagePlanes[0].u_height*pImagePlanes[0].u_stride));
-            memset((void *)pImagePlanes[1].pac_data,U_PLANE_BORDER_VALUE,
-                (pImagePlanes[1].u_height*pImagePlanes[1].u_stride));
-            memset((void *)pImagePlanes[2].pac_data,V_PLANE_BORDER_VALUE,
-                (pImagePlanes[2].u_height*pImagePlanes[2].u_stride));
-
-            /**
-            First without pan&zoom*/
-            if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
-            {
-                switch(pBasicTags.orientation)
-                {
-                default:
-                case M4COMMON_kOrientationUnknown:
-                    Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
-                case M4COMMON_kOrientationTopLeft:
-                case M4COMMON_kOrientationTopRight:
-                case M4COMMON_kOrientationBottomRight:
-                case M4COMMON_kOrientationBottomLeft:
-                    if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_width)\
-                         /pC->m_pDecodedPlane->u_width) <= pImagePlanes->u_height)
-                         //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
-                    {
-                        /*it is height so black borders will be on the top and on the bottom side*/
-                        Params.m_outputSize.m_width = pImagePlanes->u_width;
-                        Params.m_outputSize.m_height =
-                             (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height \
-                                * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_width);
-                        /*number of lines at the top*/
-                        pImagePlanes[0].u_topleft =
-                            (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
-                                -Params.m_outputSize.m_height)>>1))*pImagePlanes[0].u_stride;
-                        pImagePlanes[0].u_height = Params.m_outputSize.m_height;
-                        pImagePlanes[1].u_topleft =
-                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
-                                -(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanes[1].u_stride;
-                        pImagePlanes[1].u_height = Params.m_outputSize.m_height>>1;
-                        pImagePlanes[2].u_topleft =
-                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
-                                -(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanes[2].u_stride;
-                        pImagePlanes[2].u_height = Params.m_outputSize.m_height>>1;
-                    }
-                    else
-                    {
-                        /*it is width so black borders will be on the left and right side*/
-                        Params.m_outputSize.m_height = pImagePlanes->u_height;
-                        Params.m_outputSize.m_width =
-                             (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
-                                * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_height);
-
-                        pImagePlanes[0].u_topleft =
-                            (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
-                                -Params.m_outputSize.m_width)>>1));
-                        pImagePlanes[0].u_width = Params.m_outputSize.m_width;
-                        pImagePlanes[1].u_topleft =
-                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
-                                -(Params.m_outputSize.m_width>>1)))>>1);
-                        pImagePlanes[1].u_width = Params.m_outputSize.m_width>>1;
-                        pImagePlanes[2].u_topleft =
-                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
-                                -(Params.m_outputSize.m_width>>1)))>>1);
-                        pImagePlanes[2].u_width = Params.m_outputSize.m_width>>1;
-                    }
-                    break;
-                case M4COMMON_kOrientationLeftTop:
-                case M4COMMON_kOrientationLeftBottom:
-                case M4COMMON_kOrientationRightTop:
-                case M4COMMON_kOrientationRightBottom:
-                        if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
-                             /pC->m_pDecodedPlane->u_height) < pImagePlanes->u_height)
-                             //Params.m_inputSize.m_height > Params.m_inputSize.m_width)
-                        {
-                            /*it is height so black borders will be on the top and on
-                             the bottom side*/
-                            Params.m_outputSize.m_height = pImagePlanes->u_width;
-                            Params.m_outputSize.m_width =
-                                 (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
-                                    * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_height);
-                            /*number of lines at the top*/
-                            pImagePlanes[0].u_topleft =
-                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
-                                    -Params.m_outputSize.m_width))>>1)*pImagePlanes[0].u_stride)+1;
-                            pImagePlanes[0].u_height = Params.m_outputSize.m_width;
-                            pImagePlanes[1].u_topleft =
-                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
-                                    -(Params.m_outputSize.m_width>>1)))>>1)\
-                                        *pImagePlanes[1].u_stride)+1;
-                            pImagePlanes[1].u_height = Params.m_outputSize.m_width>>1;
-                            pImagePlanes[2].u_topleft =
-                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
-                                    -(Params.m_outputSize.m_width>>1)))>>1)\
-                                        *pImagePlanes[2].u_stride)+1;
-                            pImagePlanes[2].u_height = Params.m_outputSize.m_width>>1;
-                        }
-                        else
-                        {
-                            /*it is width so black borders will be on the left and right side*/
-                            Params.m_outputSize.m_width = pImagePlanes->u_height;
-                            Params.m_outputSize.m_height =
-                                 (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height\
-                                     * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_width);
-
-                            pImagePlanes[0].u_topleft =
-                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
-                                    -Params.m_outputSize.m_height))>>1))+1;
-                            pImagePlanes[0].u_width = Params.m_outputSize.m_height;
-                            pImagePlanes[1].u_topleft =
-                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
-                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
-                            pImagePlanes[1].u_width = Params.m_outputSize.m_height>>1;
-                            pImagePlanes[2].u_topleft =
-                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
-                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
-                            pImagePlanes[2].u_width = Params.m_outputSize.m_height>>1;
-                        }
-                    break;
-                }
-            }
-
-            /**
-            Secondly with pan&zoom*/
-            else
-            {
-                switch(pBasicTags.orientation)
-                {
-                default:
-                case M4COMMON_kOrientationUnknown:
-                    Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
-                case M4COMMON_kOrientationTopLeft:
-                case M4COMMON_kOrientationTopRight:
-                case M4COMMON_kOrientationBottomRight:
-                case M4COMMON_kOrientationBottomLeft:
-                    /*NO ROTATION*/
-                    if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_width)\
-                         /pC->m_pDecodedPlane->u_width) <= pImagePlanes->u_height)
-                            //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
-                    {
-                        /*Black borders will be on the top and bottom of the output video*/
-                        /*Maximum output height if the input image aspect ratio is kept and if
-                        the output width is the screen width*/
-                        M4OSA_UInt32 tempOutputSizeHeight =
-                            (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height\
-                                 * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_width);
-                        M4OSA_UInt32 tempInputSizeHeightMax = 0;
-                        M4OSA_UInt32 tempFinalInputHeight = 0;
-                        /*The output width is the screen width*/
-                        Params.m_outputSize.m_width = pImagePlanes->u_width;
-                        tempOutputSizeHeight = (tempOutputSizeHeight>>1)<<1;
-
-                        /*Maximum input height according to the maximum output height
-                        (proportional to the maximum output height)*/
-                        tempInputSizeHeightMax = (pImagePlanes->u_height\
-                            *Params.m_inputSize.m_height)/tempOutputSizeHeight;
-                        tempInputSizeHeightMax = (tempInputSizeHeightMax>>1)<<1;
-
-                        /*Check if the maximum possible input height is contained into the
-                        input image height*/
-                        if(tempInputSizeHeightMax <= pC->m_pDecodedPlane->u_height)
-                        {
-                            /*The maximum possible input height is contained in the input
-                            image height,
-                            that means no black borders, the input pan zoom area will be extended
-                            so that the input AIR height will be the maximum possible*/
-                            if(((tempInputSizeHeightMax - Params.m_inputSize.m_height)>>1)\
-                                 <= Params.m_inputCoord.m_y
-                                && ((tempInputSizeHeightMax - Params.m_inputSize.m_height)>>1)\
-                                     <= pC->m_pDecodedPlane->u_height -(Params.m_inputCoord.m_y\
-                                         + Params.m_inputSize.m_height))
-                            {
-                                /*The input pan zoom area can be extended symmetrically on the
-                                top and bottom side*/
-                                Params.m_inputCoord.m_y -= ((tempInputSizeHeightMax \
-                                    - Params.m_inputSize.m_height)>>1);
-                            }
-                            else if(Params.m_inputCoord.m_y < pC->m_pDecodedPlane->u_height\
-                                -(Params.m_inputCoord.m_y + Params.m_inputSize.m_height))
-                            {
-                                /*There is not enough place above the input pan zoom area to
-                                extend it symmetrically,
-                                so extend it to the maximum on the top*/
-                                Params.m_inputCoord.m_y = 0;
-                            }
-                            else
-                            {
-                                /*There is not enough place below the input pan zoom area to
-                                extend it symmetrically,
-                                so extend it to the maximum on the bottom*/
-                                Params.m_inputCoord.m_y = pC->m_pDecodedPlane->u_height \
-                                    - tempInputSizeHeightMax;
-                            }
-                            /*The input height of the AIR is the maximum possible height*/
-                            Params.m_inputSize.m_height = tempInputSizeHeightMax;
-                        }
-                        else
-                        {
-                            /*The maximum possible input height is greater than the input
-                            image height,
-                            that means black borders are necessary to keep aspect ratio
-                            The input height of the AIR is all the input image height*/
-                            Params.m_outputSize.m_height =
-                                (tempOutputSizeHeight*pC->m_pDecodedPlane->u_height)\
-                                    /Params.m_inputSize.m_height;
-                            Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
-                            Params.m_inputCoord.m_y = 0;
-                            Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
-                            pImagePlanes[0].u_topleft =
-                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
-                                    -Params.m_outputSize.m_height)>>1))*pImagePlanes[0].u_stride;
-                            pImagePlanes[0].u_height = Params.m_outputSize.m_height;
-                            pImagePlanes[1].u_topleft =
-                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
-                                    -(Params.m_outputSize.m_height>>1)))>>1)\
-                                        *pImagePlanes[1].u_stride);
-                            pImagePlanes[1].u_height = Params.m_outputSize.m_height>>1;
-                            pImagePlanes[2].u_topleft =
-                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
-                                    -(Params.m_outputSize.m_height>>1)))>>1)\
-                                        *pImagePlanes[2].u_stride);
-                            pImagePlanes[2].u_height = Params.m_outputSize.m_height>>1;
-                        }
-                    }
-                    else
-                    {
-                        /*Black borders will be on the left and right side of the output video*/
-                        /*Maximum output width if the input image aspect ratio is kept and if the
-                         output height is the screen height*/
-                        M4OSA_UInt32 tempOutputSizeWidth =
-                             (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
-                                * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_height);
-                        M4OSA_UInt32 tempInputSizeWidthMax = 0;
-                        M4OSA_UInt32 tempFinalInputWidth = 0;
-                        /*The output height is the screen height*/
-                        Params.m_outputSize.m_height = pImagePlanes->u_height;
-                        tempOutputSizeWidth = (tempOutputSizeWidth>>1)<<1;
-
-                        /*Maximum input width according to the maximum output width
-                        (proportional to the maximum output width)*/
-                        tempInputSizeWidthMax =
-                             (pImagePlanes->u_width*Params.m_inputSize.m_width)\
-                                /tempOutputSizeWidth;
-                        tempInputSizeWidthMax = (tempInputSizeWidthMax>>1)<<1;
-
-                        /*Check if the maximum possible input width is contained into the input
-                         image width*/
-                        if(tempInputSizeWidthMax <= pC->m_pDecodedPlane->u_width)
-                        {
-                            /*The maximum possible input width is contained in the input
-                            image width,
-                            that means no black borders, the input pan zoom area will be extended
-                            so that the input AIR width will be the maximum possible*/
-                            if(((tempInputSizeWidthMax - Params.m_inputSize.m_width)>>1) \
-                                <= Params.m_inputCoord.m_x
-                                && ((tempInputSizeWidthMax - Params.m_inputSize.m_width)>>1)\
-                                     <= pC->m_pDecodedPlane->u_width -(Params.m_inputCoord.m_x \
-                                        + Params.m_inputSize.m_width))
-                            {
-                                /*The input pan zoom area can be extended symmetrically on the
-                                     right and left side*/
-                                Params.m_inputCoord.m_x -= ((tempInputSizeWidthMax\
-                                     - Params.m_inputSize.m_width)>>1);
-                            }
-                            else if(Params.m_inputCoord.m_x < pC->m_pDecodedPlane->u_width\
-                                -(Params.m_inputCoord.m_x + Params.m_inputSize.m_width))
-                            {
-                                /*There is not enough place above the input pan zoom area to
-                                    extend it symmetrically,
-                                so extend it to the maximum on the left*/
-                                Params.m_inputCoord.m_x = 0;
-                            }
-                            else
-                            {
-                                /*There is not enough place below the input pan zoom area
-                                    to extend it symmetrically,
-                                so extend it to the maximum on the right*/
-                                Params.m_inputCoord.m_x = pC->m_pDecodedPlane->u_width \
-                                    - tempInputSizeWidthMax;
-                            }
-                            /*The input width of the AIR is the maximum possible width*/
-                            Params.m_inputSize.m_width = tempInputSizeWidthMax;
-                        }
-                        else
-                        {
-                            /*The maximum possible input width is greater than the input
-                            image width,
-                            that means black borders are necessary to keep aspect ratio
-                            The input width of the AIR is all the input image width*/
-                            Params.m_outputSize.m_width =\
-                                 (tempOutputSizeWidth*pC->m_pDecodedPlane->u_width)\
-                                    /Params.m_inputSize.m_width;
-                            Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
-                            Params.m_inputCoord.m_x = 0;
-                            Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
-                            pImagePlanes[0].u_topleft =
-                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
-                                    -Params.m_outputSize.m_width)>>1));
-                            pImagePlanes[0].u_width = Params.m_outputSize.m_width;
-                            pImagePlanes[1].u_topleft =
-                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
-                                    -(Params.m_outputSize.m_width>>1)))>>1);
-                            pImagePlanes[1].u_width = Params.m_outputSize.m_width>>1;
-                            pImagePlanes[2].u_topleft =
-                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
-                                    -(Params.m_outputSize.m_width>>1)))>>1);
-                            pImagePlanes[2].u_width = Params.m_outputSize.m_width>>1;
-                        }
-                    }
-                    break;
-                case M4COMMON_kOrientationLeftTop:
-                case M4COMMON_kOrientationLeftBottom:
-                case M4COMMON_kOrientationRightTop:
-                case M4COMMON_kOrientationRightBottom:
-                    /*ROTATION*/
-                    if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
-                         /pC->m_pDecodedPlane->u_height) < pImagePlanes->u_height)
-                         //Params.m_inputSize.m_height > Params.m_inputSize.m_width)
-                    {
-                        /*Black borders will be on the left and right side of the output video*/
-                        /*Maximum output height if the input image aspect ratio is kept and if
-                        the output height is the screen width*/
-                        M4OSA_UInt32 tempOutputSizeHeight =
-                        (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
-                             /pC->m_pDecodedPlane->u_height);
-                        M4OSA_UInt32 tempInputSizeHeightMax = 0;
-                        M4OSA_UInt32 tempFinalInputHeight = 0;
-                        /*The output width is the screen height*/
-                        Params.m_outputSize.m_height = pImagePlanes->u_width;
-                        Params.m_outputSize.m_width= pImagePlanes->u_height;
-                        tempOutputSizeHeight = (tempOutputSizeHeight>>1)<<1;
-
-                        /*Maximum input height according to the maximum output height
-                             (proportional to the maximum output height)*/
-                        tempInputSizeHeightMax =
-                            (pImagePlanes->u_height*Params.m_inputSize.m_width)\
-                                /tempOutputSizeHeight;
-                        tempInputSizeHeightMax = (tempInputSizeHeightMax>>1)<<1;
-
-                        /*Check if the maximum possible input height is contained into the
-                             input image width (rotation included)*/
-                        if(tempInputSizeHeightMax <= pC->m_pDecodedPlane->u_width)
-                        {
-                            /*The maximum possible input height is contained in the input
-                            image width (rotation included),
-                            that means no black borders, the input pan zoom area will be extended
-                            so that the input AIR width will be the maximum possible*/
-                            if(((tempInputSizeHeightMax - Params.m_inputSize.m_width)>>1) \
-                                <= Params.m_inputCoord.m_x
-                                && ((tempInputSizeHeightMax - Params.m_inputSize.m_width)>>1)\
-                                     <= pC->m_pDecodedPlane->u_width -(Params.m_inputCoord.m_x \
-                                        + Params.m_inputSize.m_width))
-                            {
-                                /*The input pan zoom area can be extended symmetrically on the
-                                 right and left side*/
-                                Params.m_inputCoord.m_x -= ((tempInputSizeHeightMax \
-                                    - Params.m_inputSize.m_width)>>1);
-                            }
-                            else if(Params.m_inputCoord.m_x < pC->m_pDecodedPlane->u_width\
-                                -(Params.m_inputCoord.m_x + Params.m_inputSize.m_width))
-                            {
-                                /*There is not enough place on the left of the input pan
-                                zoom area to extend it symmetrically,
-                                so extend it to the maximum on the left*/
-                                Params.m_inputCoord.m_x = 0;
-                            }
-                            else
-                            {
-                                /*There is not enough place on the right of the input pan zoom
-                                 area to extend it symmetrically,
-                                so extend it to the maximum on the right*/
-                                Params.m_inputCoord.m_x =
-                                     pC->m_pDecodedPlane->u_width - tempInputSizeHeightMax;
-                            }
-                            /*The input width of the AIR is the maximum possible width*/
-                            Params.m_inputSize.m_width = tempInputSizeHeightMax;
-                        }
-                        else
-                        {
-                            /*The maximum possible input height is greater than the input
-                            image width (rotation included),
-                            that means black borders are necessary to keep aspect ratio
-                            The input width of the AIR is all the input image width*/
-                            Params.m_outputSize.m_width =
-                            (tempOutputSizeHeight*pC->m_pDecodedPlane->u_width)\
-                                /Params.m_inputSize.m_width;
-                            Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
-                            Params.m_inputCoord.m_x = 0;
-                            Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
-                            pImagePlanes[0].u_topleft =
-                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
-                                    -Params.m_outputSize.m_width))>>1)*pImagePlanes[0].u_stride)+1;
-                            pImagePlanes[0].u_height = Params.m_outputSize.m_width;
-                            pImagePlanes[1].u_topleft =
-                            ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
-                                -(Params.m_outputSize.m_width>>1)))>>1)\
-                                    *pImagePlanes[1].u_stride)+1;
-                            pImagePlanes[1].u_height = Params.m_outputSize.m_width>>1;
-                            pImagePlanes[2].u_topleft =
-                            ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
-                                -(Params.m_outputSize.m_width>>1)))>>1)\
-                                    *pImagePlanes[2].u_stride)+1;
-                            pImagePlanes[2].u_height = Params.m_outputSize.m_width>>1;
-                        }
-                    }
-                    else
-                    {
-                        /*Black borders will be on the top and bottom of the output video*/
-                        /*Maximum output width if the input image aspect ratio is kept and if
-                         the output width is the screen height*/
-                        M4OSA_UInt32 tempOutputSizeWidth =
-                        (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_height)\
-                             /pC->m_pDecodedPlane->u_width);
-                        M4OSA_UInt32 tempInputSizeWidthMax = 0;
-                        M4OSA_UInt32 tempFinalInputWidth = 0, tempFinalOutputWidth = 0;
-                        /*The output height is the screen width*/
-                        Params.m_outputSize.m_width = pImagePlanes->u_height;
-                        Params.m_outputSize.m_height= pImagePlanes->u_width;
-                        tempOutputSizeWidth = (tempOutputSizeWidth>>1)<<1;
-
-                        /*Maximum input width according to the maximum output width
-                         (proportional to the maximum output width)*/
-                        tempInputSizeWidthMax =
-                        (pImagePlanes->u_width*Params.m_inputSize.m_height)/tempOutputSizeWidth;
-                        tempInputSizeWidthMax = (tempInputSizeWidthMax>>1)<<1;
-
-                        /*Check if the maximum possible input width is contained into the input
-                         image height (rotation included)*/
-                        if(tempInputSizeWidthMax <= pC->m_pDecodedPlane->u_height)
-                        {
-                            /*The maximum possible input width is contained in the input
-                             image height (rotation included),
-                            that means no black borders, the input pan zoom area will be extended
-                            so that the input AIR height will be the maximum possible*/
-                            if(((tempInputSizeWidthMax - Params.m_inputSize.m_height)>>1) \
-                                <= Params.m_inputCoord.m_y
-                                && ((tempInputSizeWidthMax - Params.m_inputSize.m_height)>>1)\
-                                     <= pC->m_pDecodedPlane->u_height -(Params.m_inputCoord.m_y \
-                                        + Params.m_inputSize.m_height))
-                            {
-                                /*The input pan zoom area can be extended symmetrically on
-                                the right and left side*/
-                                Params.m_inputCoord.m_y -= ((tempInputSizeWidthMax \
-                                    - Params.m_inputSize.m_height)>>1);
-                            }
-                            else if(Params.m_inputCoord.m_y < pC->m_pDecodedPlane->u_height\
-                                -(Params.m_inputCoord.m_y + Params.m_inputSize.m_height))
-                            {
-                                /*There is not enough place on the top of the input pan zoom
-                                area to extend it symmetrically,
-                                so extend it to the maximum on the top*/
-                                Params.m_inputCoord.m_y = 0;
-                            }
-                            else
-                            {
-                                /*There is not enough place on the bottom of the input pan zoom
-                                 area to extend it symmetrically,
-                                so extend it to the maximum on the bottom*/
-                                Params.m_inputCoord.m_y = pC->m_pDecodedPlane->u_height\
-                                     - tempInputSizeWidthMax;
-                            }
-                            /*The input height of the AIR is the maximum possible height*/
-                            Params.m_inputSize.m_height = tempInputSizeWidthMax;
-                        }
-                        else
-                        {
-                            /*The maximum possible input width is greater than the input\
-                             image height (rotation included),
-                            that means black borders are necessary to keep aspect ratio
-                            The input height of the AIR is all the input image height*/
-                            Params.m_outputSize.m_height =
-                                (tempOutputSizeWidth*pC->m_pDecodedPlane->u_height)\
-                                    /Params.m_inputSize.m_height;
-                            Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
-                            Params.m_inputCoord.m_y = 0;
-                            Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
-                            pImagePlanes[0].u_topleft =
-                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
-                                    -Params.m_outputSize.m_height))>>1))+1;
-                            pImagePlanes[0].u_width = Params.m_outputSize.m_height;
-                            pImagePlanes[1].u_topleft =
-                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
-                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
-                            pImagePlanes[1].u_width = Params.m_outputSize.m_height>>1;
-                            pImagePlanes[2].u_topleft =
-                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
-                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
-                            pImagePlanes[2].u_width = Params.m_outputSize.m_height>>1;
-                        }
-                    }
-                    break;
-                }
-            }
-
-            /*Width and height have to be even*/
-            Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
-            Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
-            Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
-            Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
-            pImagePlanes[0].u_width = (pImagePlanes[0].u_width>>1)<<1;
-            pImagePlanes[1].u_width = (pImagePlanes[1].u_width>>1)<<1;
-            pImagePlanes[2].u_width = (pImagePlanes[2].u_width>>1)<<1;
-            pImagePlanes[0].u_height = (pImagePlanes[0].u_height>>1)<<1;
-            pImagePlanes[1].u_height = (pImagePlanes[1].u_height>>1)<<1;
-            pImagePlanes[2].u_height = (pImagePlanes[2].u_height>>1)<<1;
-
-            /*Check that values are coherent*/
-            if(Params.m_inputSize.m_height == Params.m_outputSize.m_height)
-            {
-                Params.m_inputSize.m_width = Params.m_outputSize.m_width;
-            }
-            else if(Params.m_inputSize.m_width == Params.m_outputSize.m_width)
-            {
-                Params.m_inputSize.m_height = Params.m_outputSize.m_height;
-            }
-        }
-
-        /**
-        Picture rendering: Resizing and Cropping*/
-        if(pC->m_mediaRendering != M4xVSS_kBlackBorders)
-        {
-            switch(pBasicTags.orientation)
-            {
-            default:
-            case M4COMMON_kOrientationUnknown:
-                Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
-            case M4COMMON_kOrientationTopLeft:
-            case M4COMMON_kOrientationTopRight:
-            case M4COMMON_kOrientationBottomRight:
-            case M4COMMON_kOrientationBottomLeft:
-                Params.m_outputSize.m_height = pImagePlanes->u_height;
-                Params.m_outputSize.m_width = pImagePlanes->u_width;
-                break;
-            case M4COMMON_kOrientationLeftTop:
-            case M4COMMON_kOrientationLeftBottom:
-            case M4COMMON_kOrientationRightTop:
-            case M4COMMON_kOrientationRightBottom:
-                Params.m_outputSize.m_height = pImagePlanes->u_width;
-                Params.m_outputSize.m_width = pImagePlanes->u_height;
-                break;
-            }
-        }
-
-        /**
-        Picture rendering: Cropping*/
-        if(pC->m_mediaRendering == M4xVSS_kCropping)
-        {
-            if((Params.m_outputSize.m_height * Params.m_inputSize.m_width)\
-                 /Params.m_outputSize.m_width<Params.m_inputSize.m_height)
-            {
-                M4OSA_UInt32 tempHeight = Params.m_inputSize.m_height;
-                /*height will be cropped*/
-                Params.m_inputSize.m_height =  (M4OSA_UInt32)((Params.m_outputSize.m_height \
-                    * Params.m_inputSize.m_width) /Params.m_outputSize.m_width);
-                Params.m_inputSize.m_height =  (Params.m_inputSize.m_height>>1)<<1;
-                if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
-                {
-                    Params.m_inputCoord.m_y = (M4OSA_Int32)((M4OSA_Int32)\
-                        ((pC->m_pDecodedPlane->u_height - Params.m_inputSize.m_height))>>1);
-                }
-                else
-                {
-                    Params.m_inputCoord.m_y += (M4OSA_Int32)((M4OSA_Int32)\
-                        ((tempHeight - Params.m_inputSize.m_height))>>1);
-                }
-            }
-            else
-            {
-                M4OSA_UInt32 tempWidth= Params.m_inputSize.m_width;
-                /*width will be cropped*/
-                Params.m_inputSize.m_width =  (M4OSA_UInt32)((Params.m_outputSize.m_width \
-                    * Params.m_inputSize.m_height) /Params.m_outputSize.m_height);
-                Params.m_inputSize.m_width =  (Params.m_inputSize.m_width>>1)<<1;
-                if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
-                {
-                    Params.m_inputCoord.m_x = (M4OSA_Int32)((M4OSA_Int32)\
-                        ((pC->m_pDecodedPlane->u_width - Params.m_inputSize.m_width))>>1);
-                }
-                else
-                {
-                    Params.m_inputCoord.m_x += (M4OSA_Int32)\
-                        (((M4OSA_Int32)(tempWidth - Params.m_inputSize.m_width))>>1);
-                }
-            }
-        }
-
-
-
-        /**
-         * Call AIR functions */
-        if(M4OSA_NULL == pC->m_air_context)
-        {
-            err = M4AIR_create(&pC->m_air_context, M4AIR_kYUV420P);
-            if(err != M4NO_ERROR)
-            {
-                free(pC->m_pDecodedPlane[0].pac_data);
-                free(pC->m_pDecodedPlane);
-                pC->m_pDecodedPlane = M4OSA_NULL;
-                M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
-                     Error when initializing AIR: 0x%x", err);
-                return err;
-            }
-        }
-
-        err = M4AIR_configure(pC->m_air_context, &Params);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
-                 Error when configuring AIR: 0x%x", err);
-            M4AIR_cleanUp(pC->m_air_context);
-            free(pC->m_pDecodedPlane[0].pac_data);
-            free(pC->m_pDecodedPlane);
-            pC->m_pDecodedPlane = M4OSA_NULL;
-            return err;
-        }
-
-        err = M4AIR_get(pC->m_air_context, pC->m_pDecodedPlane, pImagePlanes);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when getting AIR plane: 0x%x", err);
-            M4AIR_cleanUp(pC->m_air_context);
-            free(pC->m_pDecodedPlane[0].pac_data);
-            free(pC->m_pDecodedPlane);
-            pC->m_pDecodedPlane = M4OSA_NULL;
-            return err;
-        }
-        pImagePlanes[0] = pImagePlanes1;
-        pImagePlanes[1] = pImagePlanes2;
-        pImagePlanes[2] = pImagePlanes3;
-    }
-
-
-    /**
-     * Increment the image counter */
-    pC->m_ImageCounter++;
-
-    /**
-     * Check end of sequence */
-    last_frame_flag    = (pC->m_ImageCounter >= pC->m_NbImage);
-
-    /**
-     * Keep the picture duration */
-    *pPictureDuration = pC->m_timeDuration;
-
-    if (1 == last_frame_flag)
-    {
-        if(M4OSA_NULL != pC->m_air_context)
-        {
-            err = M4AIR_cleanUp(pC->m_air_context);
-            if(err != M4NO_ERROR)
-            {
-                M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x", err);
-                return err;
-            }
-        }
-        if(M4OSA_NULL != pC->m_pDecodedPlane)
-        {
-            free(pC->m_pDecodedPlane[0].pac_data);
-            free(pC->m_pDecodedPlane);
-            pC->m_pDecodedPlane = M4OSA_NULL;
-        }
-        return M4PTO3GPP_WAR_LAST_PICTURE;
-    }
-
-    M4OSA_TRACE1_0("M4xVSS_PictureCallbackFct: Leaving ");
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext)
- * @brief    This function initializes Pto3GPP with the given parameters
- * @note    The "Pictures to 3GPP" parameters are given by the internal xVSS
- *            context. This context contains a pointer on the current element
- *            of the chained list of Pto3GPP parameters.
- * @param    pContext    (IN) The integrator own context
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext)
-{
-    /************************************************************************/
-    /* Definitions to generate dummy AMR file used to add AMR silence in files generated
-     by Pto3GPP */
-    #define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE     13
-    /* This constant is defined in M4VSS3GPP_InternalConfig.h */
-    extern const M4OSA_UInt8\
-         M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE];
-
-    /* AMR silent frame used to compute dummy AMR silence file */
-    #define M4VSS3GPP_AMR_HEADER_SIZE 6
-    const M4OSA_UInt8 M4VSS3GPP_AMR_HEADER[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
-    { 0x23, 0x21, 0x41, 0x4d, 0x52, 0x0a };
-    /************************************************************************/
-
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_ERR err;
-    M4PTO3GPP_Context pM4PTO3GPP_Ctxt = M4OSA_NULL;
-    M4PTO3GPP_Params Params;
-     M4xVSS_PictureCallbackCtxt*    pCallBackCtxt;
-    M4OSA_Bool cmpResult=M4OSA_FALSE;
-    M4OSA_Context pDummyAMRFile;
-    M4OSA_Char out_amr[M4XVSS_MAX_PATH_LEN];
-    /*UTF conversion support*/
-    M4OSA_Char* pDecodedPath = M4OSA_NULL;
-    M4OSA_UInt32 i;
-
-    /**
-     * Create a M4PTO3GPP instance */
-    err = M4PTO3GPP_Init( &pM4PTO3GPP_Ctxt, xVSS_context->pFileReadPtr,
-         xVSS_context->pFileWritePtr);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalStartConvertPictureTo3gp returned %ld\n",err);
-        return err;
-    }
-
-    pCallBackCtxt = (M4xVSS_PictureCallbackCtxt*)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_PictureCallbackCtxt),
-         M4VS,(M4OSA_Char *) "Pto3gpp callback struct");
-    if(pCallBackCtxt == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalStartConvertPictureTo3gp");
-        return M4ERR_ALLOC;
-    }
-
-    Params.OutputVideoFrameSize = xVSS_context->pSettings->xVSS.outputVideoSize;
-    Params.OutputVideoFormat = xVSS_context->pSettings->xVSS.outputVideoFormat;
-    Params.videoProfile = xVSS_context->pSettings->xVSS.outputVideoProfile;
-    Params.videoLevel = xVSS_context->pSettings->xVSS.outputVideoLevel;
-
-    /**
-     * Generate "dummy" amr file containing silence in temporary folder */
-    M4OSA_chrNCopy(out_amr, xVSS_context->pTempPath, M4XVSS_MAX_PATH_LEN - 1);
-    strncat((char *)out_amr, (const char *)"dummy.amr\0", 10);
-
-    /**
-     * UTF conversion: convert the temporary path into the customer format*/
-    pDecodedPath = out_amr;
-
-    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
-    {
-        M4OSA_UInt32 length = 0;
-        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) out_amr,
-             (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_internalStartConvertPictureTo3gp:\
-                 M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
-            return err;
-        }
-        pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-    }
-
-    /**
-    * End of the conversion, now use the converted path*/
-
-    err = xVSS_context->pFileWritePtr->openWrite(&pDummyAMRFile, pDecodedPath, M4OSA_kFileWrite);
-
-    /*Commented because of the use of the UTF conversion see above*/
-/*    err = xVSS_context->pFileWritePtr->openWrite(&pDummyAMRFile, out_amr, M4OSA_kFileWrite);
- */
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: Can't open output dummy amr file %s,\
-             error: 0x%x\n",out_amr, err);
-        return err;
-    }
-
-    err =  xVSS_context->pFileWritePtr->writeData(pDummyAMRFile,
-        (M4OSA_Int8*)M4VSS3GPP_AMR_HEADER, M4VSS3GPP_AMR_HEADER_SIZE);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: Can't write output dummy amr file %s,\
-             error: 0x%x\n",out_amr, err);
-        return err;
-    }
-
-    err =  xVSS_context->pFileWritePtr->writeData(pDummyAMRFile,
-         (M4OSA_Int8*)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048, M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: \
-            Can't write output dummy amr file %s, error: 0x%x\n",out_amr, err);
-        return err;
-    }
-
-    err =  xVSS_context->pFileWritePtr->closeWrite(pDummyAMRFile);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: \
-            Can't close output dummy amr file %s, error: 0x%x\n",out_amr, err);
-        return err;
-    }
-
-    /**
-     * Fill parameters for Pto3GPP with the parameters contained in the current element of the
-     * Pto3GPP parameters chained list and with default parameters */
-/*+ New Encoder bitrates */
-    if(xVSS_context->pSettings->xVSS.outputVideoBitrate == 0) {
-        Params.OutputVideoBitrate    = M4VIDEOEDITING_kVARIABLE_KBPS;
-    }
-    else {
-          Params.OutputVideoBitrate = xVSS_context->pSettings->xVSS.outputVideoBitrate;
-    }
-    M4OSA_TRACE1_1("M4xVSS_internalStartConvertPicTo3GP: video bitrate = %d",
-        Params.OutputVideoBitrate);
-/*- New Encoder bitrates */
-    Params.OutputFileMaxSize    = M4PTO3GPP_kUNLIMITED;
-    Params.pPictureCallbackFct    = M4xVSS_PictureCallbackFct;
-    Params.pPictureCallbackCtxt    = pCallBackCtxt;
-    /*FB: change to use the converted path (UTF conversion) see the conversion above*/
-    /*Fix :- Adding Audio Track in Image as input :AudioTarckFile Setting to NULL */
-    Params.pInputAudioTrackFile    = M4OSA_NULL;//(M4OSA_Void*)pDecodedPath;//out_amr;
-    Params.AudioPaddingMode        = M4PTO3GPP_kAudioPaddingMode_Loop;
-    Params.AudioFileFormat        = M4VIDEOEDITING_kFileType_AMR;
-    Params.pOutput3gppFile        = xVSS_context->pPTo3GPPcurrentParams->pFileOut;
-    Params.pTemporaryFile        = xVSS_context->pPTo3GPPcurrentParams->pFileTemp;
-    /*+PR No:  blrnxpsw#223*/
-    /*Increasing frequency of Frame, calculating Nos of Frame = duration /FPS */
-    /*Other changes made is @ M4xVSS_API.c @ line 3841 in M4xVSS_SendCommand*/
-    /*If case check for PanZoom removed */
-    Params.NbVideoFrames            = (M4OSA_UInt32)
-        (xVSS_context->pPTo3GPPcurrentParams->duration \
-            / xVSS_context->pPTo3GPPcurrentParams->framerate); /* */
-    pCallBackCtxt->m_timeDuration    = xVSS_context->pPTo3GPPcurrentParams->framerate;
-    /*-PR No:  blrnxpsw#223*/
-    pCallBackCtxt->m_ImageCounter    = 0;
-    pCallBackCtxt->m_FileIn            = xVSS_context->pPTo3GPPcurrentParams->pFileIn;
-    pCallBackCtxt->m_NbImage        = Params.NbVideoFrames;
-    pCallBackCtxt->m_pFileReadPtr    = xVSS_context->pFileReadPtr;
-    pCallBackCtxt->m_pDecodedPlane    = M4OSA_NULL;
-    pCallBackCtxt->m_pPto3GPPparams    = xVSS_context->pPTo3GPPcurrentParams;
-    pCallBackCtxt->m_air_context    = M4OSA_NULL;
-    pCallBackCtxt->m_mediaRendering = xVSS_context->pPTo3GPPcurrentParams->MediaRendering;
-
-    /**
-     * Set the input and output files */
-    err = M4PTO3GPP_Open(pM4PTO3GPP_Ctxt, &Params);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Open returned: 0x%x\n",err);
-        if(pCallBackCtxt != M4OSA_NULL)
-        {
-            free(pCallBackCtxt);
-            pCallBackCtxt = M4OSA_NULL;
-        }
-        M4PTO3GPP_CleanUp(pM4PTO3GPP_Ctxt);
-        return err;
-    }
-
-    /**
-     * Save context to be able to call Pto3GPP step function in M4xVSS_step function */
-    xVSS_context->pM4PTO3GPP_Ctxt = pM4PTO3GPP_Ctxt;
-    xVSS_context->pCallBackCtxt = pCallBackCtxt;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext)
- * @brief    This function cleans up Pto3GPP
- * @note
- * @param    pContext    (IN) The integrator own context
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_ERR err;
-    M4OSA_Char out_amr[M4XVSS_MAX_PATH_LEN];
-    /*UTF conversion support*/
-    M4OSA_Char* pDecodedPath = M4OSA_NULL;
-
-    /**
-    * Free the PTO3GPP callback context */
-    if(M4OSA_NULL != xVSS_context->pCallBackCtxt)
-    {
-        free(xVSS_context->pCallBackCtxt);
-        xVSS_context->pCallBackCtxt = M4OSA_NULL;
-    }
-
-    /**
-     * Finalize the output file */
-    err = M4PTO3GPP_Close(xVSS_context->pM4PTO3GPP_Ctxt);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_Close returned 0x%x\n",err);
-        M4PTO3GPP_CleanUp(xVSS_context->pM4PTO3GPP_Ctxt);
-        return err;
-    }
-
-    /**
-     * Free this M4PTO3GPP instance */
-    err = M4PTO3GPP_CleanUp(xVSS_context->pM4PTO3GPP_Ctxt);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4PTO3GPP_CleanUp returned 0x%x\n",err);
-        return err;
-    }
-
-    /**
-     * Remove dummy.amr file */
-    M4OSA_chrNCopy(out_amr, xVSS_context->pTempPath, M4XVSS_MAX_PATH_LEN - 1);
-    strncat((char *)out_amr, (const char *)"dummy.amr\0", 10);
-
-    /**
-     * UTF conversion: convert the temporary path into the customer format*/
-    pDecodedPath = out_amr;
-
-    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
-    {
-        M4OSA_UInt32 length = 0;
-        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) out_amr,
-             (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_internalStopConvertPictureTo3gp:\
-                 M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
-            return err;
-        }
-        pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-    }
-    /**
-    * End of the conversion, now use the decoded path*/
-    remove((const char *)pDecodedPath);
-
-    /*Commented because of the use of the UTF conversion*/
-/*    remove(out_amr);
- */
-
-    xVSS_context->pM4PTO3GPP_Ctxt = M4OSA_NULL;
-    xVSS_context->pCallBackCtxt = M4OSA_NULL;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
- * @brief    This function converts an RGB565 plane to YUV420 planar
- * @note    It is used only for framing effect
- *            It allocates output YUV planes
- * @param    framingCtx    (IN) The framing struct containing input RGB565 plane
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- * @return    M4ERR_ALLOC: Allocation error (no more memory)
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
-{
-    M4OSA_ERR err;
-
-    /**
-     * Allocate output YUV planes */
-    framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane),
-         M4VS, (M4OSA_Char *)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
-    if(framingCtx->FramingYuv == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
-        return M4ERR_ALLOC;
-    }
-    framingCtx->FramingYuv[0].u_width = framingCtx->FramingRgb->u_width;
-    framingCtx->FramingYuv[0].u_height = framingCtx->FramingRgb->u_height;
-    framingCtx->FramingYuv[0].u_topleft = 0;
-    framingCtx->FramingYuv[0].u_stride = framingCtx->FramingRgb->u_width;
-    framingCtx->FramingYuv[0].pac_data =
-         (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc((framingCtx->FramingYuv[0].u_width\
-            *framingCtx->FramingYuv[0].u_height*3)>>1, M4VS, (M4OSA_Char *)\
-                "Alloc for the Convertion output YUV");;
-    if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
-        return M4ERR_ALLOC;
-    }
-    framingCtx->FramingYuv[1].u_width = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[1].u_height = (framingCtx->FramingRgb->u_height)>>1;
-    framingCtx->FramingYuv[1].u_topleft = 0;
-    framingCtx->FramingYuv[1].u_stride = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[1].pac_data = framingCtx->FramingYuv[0].pac_data \
-        + framingCtx->FramingYuv[0].u_width * framingCtx->FramingYuv[0].u_height;
-    framingCtx->FramingYuv[2].u_width = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[2].u_height = (framingCtx->FramingRgb->u_height)>>1;
-    framingCtx->FramingYuv[2].u_topleft = 0;
-    framingCtx->FramingYuv[2].u_stride = (framingCtx->FramingRgb->u_width)>>1;
-    framingCtx->FramingYuv[2].pac_data = framingCtx->FramingYuv[1].pac_data \
-        + framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height;
-
-    /**
-     * Convert input RGB 565 to YUV 420 to be able to merge it with output video in framing
-      effect */
-    err = M4VIFI_xVSS_RGB565toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalConvertRGBtoYUV:\
-             error when converting from RGB to YUV: 0x%x\n", err);
-    }
-
-    framingCtx->duration = 0;
-    framingCtx->previousClipTime = -1;
-    framingCtx->previewOffsetClipTime = -1;
-
-    /**
-     * Only one element in the chained list (no animated image with RGB buffer...) */
-    framingCtx->pCurrent = framingCtx;
-    framingCtx->pNext = framingCtx;
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR M4xVSS_internalSetPlaneTransparent(M4OSA_UInt8* planeIn, M4OSA_UInt32 size)
-{
-    M4OSA_UInt32 i;
-    M4OSA_UInt8* plane = planeIn;
-    M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
-    M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
-
-    for(i=0; i<(size>>1); i++)
-    {
-        *plane++ = transparent1;
-        *plane++ = transparent2;
-    }
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ******************************************************************************
- * prototype M4OSA_ERR M4xVSS_internalConvertARBG888toYUV420_FrammingEffect(M4OSA_Context pContext,
- *                                                M4VSS3GPP_EffectSettings* pEffect,
- *                                                M4xVSS_FramingStruct* framingCtx,
-                                                  M4VIDEOEDITING_VideoFrameSize OutputVideoResolution)
- *
- * @brief    This function converts ARGB8888 input file  to YUV420 whenused for framming effect
- * @note    The input ARGB8888 file path is contained in the pEffect structure
- *            If the ARGB8888 must be resized to fit output video size, this function
- *            will do it.
- * @param    pContext    (IN) The integrator own context
- * @param    pEffect        (IN) The effect structure containing all informations on
- *                        the file to decode, resizing ...
- * @param    framingCtx    (IN/OUT) Structure in which the output RGB will be stored
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- * @return    M4ERR_ALLOC: Allocation error (no more memory)
- * @return    M4ERR_FILE_NOT_FOUND: File not found.
- ******************************************************************************
- */
-
-
-M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pContext,
-                                                               M4VSS3GPP_EffectSettings* pEffect,
-                                                               M4xVSS_FramingStruct* framingCtx,
-                                                               M4VIDEOEDITING_VideoFrameSize\
-                                                               OutputVideoResolution)
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_Context pARGBIn;
-    M4OSA_UInt32 file_size;
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_UInt32 width, height, width_out, height_out;
-    M4OSA_Void* pFile = pEffect->xVSS.pFramingFilePath;
-    M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
-    M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
-    /*UTF conversion support*/
-    M4OSA_Char* pDecodedPath = M4OSA_NULL;
-    M4OSA_UInt32 i = 0,j = 0;
-    M4VIFI_ImagePlane rgbPlane;
-    M4OSA_UInt32 frameSize_argb=(framingCtx->width * framingCtx->height * 4);
-    M4OSA_UInt32 frameSize;
-    M4OSA_UInt32 tempAlphaPercent = 0;
-    M4VIFI_UInt8* TempPacData = M4OSA_NULL;
-    M4OSA_UInt16 *ptr = M4OSA_NULL;
-    M4OSA_UInt32 z = 0;
-
-    M4OSA_TRACE3_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: Entering ");
-
-    M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect width and height %d %d ",
-        framingCtx->width,framingCtx->height);
-
-    M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_32bitAlignedMalloc(frameSize_argb, M4VS, (M4OSA_Char*)\
-        "Image argb data");
-    if(pTmpData == M4OSA_NULL) {
-        M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
-        return M4ERR_ALLOC;
-    }
-    /**
-     * UTF conversion: convert the file path into the customer format*/
-    pDecodedPath = pFile;
-
-    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
-    {
-        M4OSA_UInt32 length = 0;
-        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) pFile,
-             (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_internalDecodePNG:\
-                 M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
-            free(pTmpData);
-            pTmpData = M4OSA_NULL;
-            return err;
-        }
-        pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-    }
-
-    /**
-    * End of the conversion, now use the decoded path*/
-
-     /* Open input ARGB8888 file and store it into memory */
-    err = xVSS_context->pFileReadPtr->openRead(&pARGBIn, pDecodedPath, M4OSA_kFileRead);
-
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_2("Can't open input ARGB8888 file %s, error: 0x%x\n",pFile, err);
-        free(pTmpData);
-        pTmpData = M4OSA_NULL;
-        return err;
-    }
-
-    err = xVSS_context->pFileReadPtr->readData(pARGBIn,(M4OSA_MemAddr8)pTmpData, &frameSize_argb);
-    if(err != M4NO_ERROR)
-    {
-        xVSS_context->pFileReadPtr->closeRead(pARGBIn);
-        free(pTmpData);
-        pTmpData = M4OSA_NULL;
-        return err;
-    }
-
-
-    err =  xVSS_context->pFileReadPtr->closeRead(pARGBIn);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_2("Can't close input png file %s, error: 0x%x\n",pFile, err);
-        free(pTmpData);
-        pTmpData = M4OSA_NULL;
-        return err;
-    }
-
-
-    rgbPlane.u_height = framingCtx->height;
-    rgbPlane.u_width = framingCtx->width;
-    rgbPlane.u_stride = rgbPlane.u_width*3;
-    rgbPlane.u_topleft = 0;
-
-    frameSize = (rgbPlane.u_width * rgbPlane.u_height * 3); //Size of RGB888 data
-    rgbPlane.pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(((frameSize)+ (2 * framingCtx->width)),
-         M4VS, (M4OSA_Char*)"Image clip RGB888 data");
-    if(rgbPlane.pac_data == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
-        free(pTmpData);
-        return M4ERR_ALLOC;
-    }
-
-    M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
-          Remove the alpha channel  ");
-
-    /* premultiplied alpha % on RGB */
-    for (i=0, j = 0; i < frameSize_argb; i += 4) {
-        /* this is alpha value */
-        if ((i % 4) == 0)
-        {
-            tempAlphaPercent = pTmpData[i];
-        }
-
-        /* R */
-        rgbPlane.pac_data[j] = pTmpData[i+1];
-        j++;
-
-        /* G */
-        if (tempAlphaPercent > 0) {
-            rgbPlane.pac_data[j] = pTmpData[i+2];
-            j++;
-        } else {/* In case of alpha value 0, make GREEN to 255 */
-            rgbPlane.pac_data[j] = 255; //pTmpData[i+2];
-            j++;
-        }
-
-        /* B */
-        rgbPlane.pac_data[j] = pTmpData[i+3];
-        j++;
-    }
-
-    free(pTmpData);
-    pTmpData = M4OSA_NULL;
-
-    /* convert RGB888 to RGB565 */
-
-    /* allocate temp RGB 565 buffer */
-    TempPacData = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize +
-                       (4 * (framingCtx->width + framingCtx->height + 1)),
-                        M4VS, (M4OSA_Char*)"Image clip RGB565 data");
-    if (TempPacData == M4OSA_NULL) {
-        M4OSA_TRACE1_0("Failed to allocate memory for Image clip RGB565 data");
-        free(rgbPlane.pac_data);
-        return M4ERR_ALLOC;
-    }
-
-    ptr = (M4OSA_UInt16 *)TempPacData;
-    z = 0;
-
-    for (i = 0; i < j ; i += 3)
-    {
-        ptr[z++] = PACK_RGB565(0,   rgbPlane.pac_data[i],
-                                    rgbPlane.pac_data[i+1],
-                                    rgbPlane.pac_data[i+2]);
-    }
-
-    /* free the RBG888 and assign RGB565 */
-    free(rgbPlane.pac_data);
-    rgbPlane.pac_data = TempPacData;
-
-    /**
-     * Check if output sizes are odd */
-    if(rgbPlane.u_height % 2 != 0)
-    {
-        M4VIFI_UInt8* output_pac_data = rgbPlane.pac_data;
-        M4OSA_UInt32 i;
-        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
-             output height is odd  ");
-        output_pac_data +=rgbPlane.u_width * rgbPlane.u_height*2;
-
-        for(i=0;i<rgbPlane.u_width;i++)
-        {
-            *output_pac_data++ = transparent1;
-            *output_pac_data++ = transparent2;
-        }
-
-        /**
-         * We just add a white line to the PNG that will be transparent */
-        rgbPlane.u_height++;
-    }
-    if(rgbPlane.u_width % 2 != 0)
-    {
-        /**
-         * We add a new column of white (=transparent), but we need to parse all RGB lines ... */
-        M4OSA_UInt32 i;
-        M4VIFI_UInt8* newRGBpac_data;
-        M4VIFI_UInt8* output_pac_data, *input_pac_data;
-
-        rgbPlane.u_width++;
-        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: \
-             output width is odd  ");
-        /**
-         * We need to allocate a new RGB output buffer in which all decoded data
-          + white line will be copied */
-        newRGBpac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(rgbPlane.u_height*rgbPlane.u_width*2\
-            *sizeof(M4VIFI_UInt8), M4VS, (M4OSA_Char *)"New Framing GIF Output pac_data RGB");
-
-        if(newRGBpac_data == M4OSA_NULL)
-        {
-            M4OSA_TRACE1_0("Allocation error in \
-                M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
-            free(rgbPlane.pac_data);
-            return M4ERR_ALLOC;
-        }
-
-        output_pac_data= newRGBpac_data;
-        input_pac_data = rgbPlane.pac_data;
-
-        for(i=0;i<rgbPlane.u_height;i++)
-        {
-            memcpy((void *)output_pac_data, (void *)input_pac_data,
-                 (rgbPlane.u_width-1)*2);
-
-            output_pac_data += ((rgbPlane.u_width-1)*2);
-            /* Put the pixel to transparency color */
-            *output_pac_data++ = transparent1;
-            *output_pac_data++ = transparent2;
-
-            input_pac_data += ((rgbPlane.u_width-1)*2);
-        }
-        free(rgbPlane.pac_data);
-        rgbPlane.pac_data = newRGBpac_data;
-    }
-
-    /* reset stride */
-    rgbPlane.u_stride = rgbPlane.u_width*2;
-
-    /**
-     * Initialize chained list parameters */
-    framingCtx->duration = 0;
-    framingCtx->previousClipTime = -1;
-    framingCtx->previewOffsetClipTime = -1;
-
-    /**
-     * Only one element in the chained list (no animated image ...) */
-    framingCtx->pCurrent = framingCtx;
-    framingCtx->pNext = framingCtx;
-
-    /**
-     * Get output width/height */
-     switch(OutputVideoResolution)
-    //switch(xVSS_context->pSettings->xVSS.outputVideoSize)
-    {
-    case M4VIDEOEDITING_kSQCIF:
-        width_out = 128;
-        height_out = 96;
-        break;
-    case M4VIDEOEDITING_kQQVGA:
-        width_out = 160;
-        height_out = 120;
-        break;
-    case M4VIDEOEDITING_kQCIF:
-        width_out = 176;
-        height_out = 144;
-        break;
-    case M4VIDEOEDITING_kQVGA:
-        width_out = 320;
-        height_out = 240;
-        break;
-    case M4VIDEOEDITING_kCIF:
-        width_out = 352;
-        height_out = 288;
-        break;
-    case M4VIDEOEDITING_kVGA:
-        width_out = 640;
-        height_out = 480;
-        break;
-    case M4VIDEOEDITING_kWVGA:
-        width_out = 800;
-        height_out = 480;
-        break;
-    case M4VIDEOEDITING_kNTSC:
-        width_out = 720;
-        height_out = 480;
-        break;
-    case M4VIDEOEDITING_k640_360:
-        width_out = 640;
-        height_out = 360;
-        break;
-    case M4VIDEOEDITING_k854_480:
-        // StageFright encoders require %16 resolution
-        width_out = M4ENCODER_854_480_Width;
-        height_out = 480;
-        break;
-    case M4VIDEOEDITING_k1280_720:
-        width_out = 1280;
-        height_out = 720;
-        break;
-    case M4VIDEOEDITING_k1080_720:
-        // StageFright encoders require %16 resolution
-        width_out = M4ENCODER_1080_720_Width;
-        height_out = 720;
-        break;
-    case M4VIDEOEDITING_k960_720:
-        width_out = 960;
-        height_out = 720;
-        break;
-    case M4VIDEOEDITING_k1920_1080:
-        width_out = 1920;
-        height_out = M4ENCODER_1920_1080_Height;
-        break;
-    /**
-     * If output video size is not given, we take QCIF size,
-     * should not happen, because already done in M4xVSS_sendCommand */
-    default:
-        width_out = 176;
-        height_out = 144;
-        break;
-    }
-
-    /**
-     * Allocate output planes structures */
-    framingCtx->FramingRgb = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(sizeof(M4VIFI_ImagePlane), M4VS,
-         (M4OSA_Char *)"Framing Output plane RGB");
-    if(framingCtx->FramingRgb == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
-        return M4ERR_ALLOC;
-    }
-    /**
-     * Resize RGB if needed */
-    if((pEffect->xVSS.bResize) &&
-         (rgbPlane.u_width != width_out || rgbPlane.u_height != height_out))
-    {
-        width = width_out;
-        height = height_out;
-
-        M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: \
-             New Width and height %d %d  ",width,height);
-
-        framingCtx->FramingRgb->u_height = height_out;
-        framingCtx->FramingRgb->u_width = width_out;
-        framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*2;
-        framingCtx->FramingRgb->u_topleft = 0;
-
-        framingCtx->FramingRgb->pac_data =
-             (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(framingCtx->FramingRgb->u_height*framingCtx->\
-                FramingRgb->u_width*2*sizeof(M4VIFI_UInt8), M4VS,
-                  (M4OSA_Char *)"Framing Output pac_data RGB");
-
-        if(framingCtx->FramingRgb->pac_data == M4OSA_NULL)
-        {
-            M4OSA_TRACE1_0("Allocation error in \
-                M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
-            free(framingCtx->FramingRgb);
-            free(rgbPlane.pac_data);
-            return M4ERR_ALLOC;
-        }
-
-        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:  Resizing Needed ");
-        M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
-              rgbPlane.u_height & rgbPlane.u_width %d %d",rgbPlane.u_height,rgbPlane.u_width);
-
-        //err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL, &rgbPlane,framingCtx->FramingRgb);
-        err = M4VIFI_ResizeBilinearRGB565toRGB565(M4OSA_NULL, &rgbPlane,framingCtx->FramingRgb);
-
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect :\
-                when resizing RGB plane: 0x%x\n", err);
-            return err;
-        }
-
-        if(rgbPlane.pac_data != M4OSA_NULL)
-        {
-            free(rgbPlane.pac_data);
-            rgbPlane.pac_data = M4OSA_NULL;
-        }
-    }
-    else
-    {
-
-        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
-              Resizing Not Needed ");
-
-        width = rgbPlane.u_width;
-        height = rgbPlane.u_height;
-        framingCtx->FramingRgb->u_height = height;
-        framingCtx->FramingRgb->u_width = width;
-        framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*2;
-        framingCtx->FramingRgb->u_topleft = 0;
-        framingCtx->FramingRgb->pac_data = rgbPlane.pac_data;
-    }
-
-
-    if(pEffect->xVSS.bResize)
-    {
-        /**
-         * Force topleft to 0 for pure framing effect */
-        framingCtx->topleft_x = 0;
-        framingCtx->topleft_y = 0;
-    }
-
-
-    /**
-     * Convert  RGB output to YUV 420 to be able to merge it with output video in framing
-     effect */
-    framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane), M4VS,
-         (M4OSA_Char *)"Framing Output plane YUV");
-    if(framingCtx->FramingYuv == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
-        free(framingCtx->FramingRgb->pac_data);
-        return M4ERR_ALLOC;
-    }
-
-    // Alloc for Y, U and V planes
-    framingCtx->FramingYuv[0].u_width = ((width+1)>>1)<<1;
-    framingCtx->FramingYuv[0].u_height = ((height+1)>>1)<<1;
-    framingCtx->FramingYuv[0].u_topleft = 0;
-    framingCtx->FramingYuv[0].u_stride = ((width+1)>>1)<<1;
-    framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc
-        ((framingCtx->FramingYuv[0].u_width*framingCtx->FramingYuv[0].u_height), M4VS,
-            (M4OSA_Char *)"Alloc for the output Y");
-    if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
-        free(framingCtx->FramingYuv);
-        free(framingCtx->FramingRgb->pac_data);
-        return M4ERR_ALLOC;
-    }
-    framingCtx->FramingYuv[1].u_width = (((width+1)>>1)<<1)>>1;
-    framingCtx->FramingYuv[1].u_height = (((height+1)>>1)<<1)>>1;
-    framingCtx->FramingYuv[1].u_topleft = 0;
-    framingCtx->FramingYuv[1].u_stride = (((width+1)>>1)<<1)>>1;
-
-
-    framingCtx->FramingYuv[1].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(
-        framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height, M4VS,
-        (M4OSA_Char *)"Alloc for the output U");
-    if (framingCtx->FramingYuv[1].pac_data == M4OSA_NULL) {
-        free(framingCtx->FramingYuv[0].pac_data);
-        free(framingCtx->FramingYuv);
-        free(framingCtx->FramingRgb->pac_data);
-        return M4ERR_ALLOC;
-    }
-
-    framingCtx->FramingYuv[2].u_width = (((width+1)>>1)<<1)>>1;
-    framingCtx->FramingYuv[2].u_height = (((height+1)>>1)<<1)>>1;
-    framingCtx->FramingYuv[2].u_topleft = 0;
-    framingCtx->FramingYuv[2].u_stride = (((width+1)>>1)<<1)>>1;
-
-
-    framingCtx->FramingYuv[2].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(
-        framingCtx->FramingYuv[2].u_width * framingCtx->FramingYuv[0].u_height, M4VS,
-        (M4OSA_Char *)"Alloc for the  output V");
-    if (framingCtx->FramingYuv[2].pac_data == M4OSA_NULL) {
-        free(framingCtx->FramingYuv[1].pac_data);
-        free(framingCtx->FramingYuv[0].pac_data);
-        free(framingCtx->FramingYuv);
-        free(framingCtx->FramingRgb->pac_data);
-        return M4ERR_ALLOC;
-    }
-
-    M4OSA_TRACE3_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
-        convert RGB to YUV ");
-
-    //err = M4VIFI_RGB888toYUV420(M4OSA_NULL, framingCtx->FramingRgb,  framingCtx->FramingYuv);
-    err = M4VIFI_RGB565toYUV420(M4OSA_NULL, framingCtx->FramingRgb,  framingCtx->FramingYuv);
-
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("SPS png: error when converting from RGB to YUV: 0x%x\n", err);
-    }
-    M4OSA_TRACE3_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:  Leaving ");
-    return err;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext)
- *
- * @brief    This function prepares VSS for editing
- * @note    It also set special xVSS effect as external effects for the VSS
- * @param    pContext    (IN) The integrator own context
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- * @return    M4ERR_ALLOC: Allocation error (no more memory)
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4VSS3GPP_EditContext pVssCtxt;
-    M4OSA_UInt32 i,j;
-    M4OSA_ERR err;
-
-    /**
-     * Create a VSS 3GPP edition instance */
-    err = M4VSS3GPP_editInit( &pVssCtxt, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile: M4VSS3GPP_editInit returned 0x%x\n",
-            err);
-        M4VSS3GPP_editCleanUp(pVssCtxt);
-        /**
-         * Set the VSS context to NULL */
-        xVSS_context->pCurrentEditContext = M4OSA_NULL;
-        return err;
-    }
-
-        M4VSS3GPP_InternalEditContext* pVSSContext =
-            (M4VSS3GPP_InternalEditContext*)pVssCtxt;
-        pVSSContext->xVSS.outputVideoFormat =
-            xVSS_context->pSettings->xVSS.outputVideoFormat;
-        pVSSContext->xVSS.outputVideoSize =
-            xVSS_context->pSettings->xVSS.outputVideoSize ;
-        pVSSContext->xVSS.outputAudioFormat =
-            xVSS_context->pSettings->xVSS.outputAudioFormat;
-        pVSSContext->xVSS.outputAudioSamplFreq =
-            xVSS_context->pSettings->xVSS.outputAudioSamplFreq;
-        pVSSContext->xVSS.outputVideoBitrate =
-            xVSS_context->pSettings->xVSS.outputVideoBitrate ;
-        pVSSContext->xVSS.outputAudioBitrate =
-            xVSS_context->pSettings->xVSS.outputAudioBitrate ;
-        pVSSContext->xVSS.bAudioMono =
-            xVSS_context->pSettings->xVSS.bAudioMono;
-        pVSSContext->xVSS.outputVideoProfile =
-            xVSS_context->pSettings->xVSS.outputVideoProfile;
-        pVSSContext->xVSS.outputVideoLevel =
-            xVSS_context->pSettings->xVSS.outputVideoLevel;
-    /* In case of MMS use case, we fill directly into the VSS context the targeted bitrate */
-    if(xVSS_context->targetedBitrate != 0)
-    {
-        M4VSS3GPP_InternalEditContext* pVSSContext = (M4VSS3GPP_InternalEditContext*)pVssCtxt;
-
-        pVSSContext->bIsMMS = M4OSA_TRUE;
-        pVSSContext->uiMMSVideoBitrate = xVSS_context->targetedBitrate;
-        pVSSContext->MMSvideoFramerate = xVSS_context->pSettings->videoFrameRate;
-    }
-
-    /*Warning: since the adding of the UTF conversion, pSettings has been changed in the next
-    part in  pCurrentEditSettings (there is a specific current editing structure for the saving,
-     as for the preview)*/
-
-    /**
-     * Set the external video effect functions, for saving mode (to be moved to
-      M4xVSS_saveStart() ?)*/
-    for (i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
-    {
-        for (j=0; j<xVSS_context->pCurrentEditSettings->nbEffects; j++)
-        {
-            if (M4xVSS_kVideoEffectType_BlackAndWhite ==
-            xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                 M4VSS3GPP_externalVideoEffectColor;
-                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                // (M4OSA_Void*)M4xVSS_kVideoEffectType_BlackAndWhite;
-                /*commented FB*/
-                /**
-                 * We do not need to set the color context, it is already set
-                 during sendCommand function */
-            }
-            if (M4xVSS_kVideoEffectType_Pink ==
-                xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                 M4VSS3GPP_externalVideoEffectColor;
-                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                // (M4OSA_Void*)M4xVSS_kVideoEffectType_Pink; /**< we don't
-                // use any function context */
-                /*commented FB*/
-                /**
-                 * We do not need to set the color context,
-                  it is already set during sendCommand function */
-            }
-            if (M4xVSS_kVideoEffectType_Green ==
-                 xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                    M4VSS3GPP_externalVideoEffectColor;
-                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                    // (M4OSA_Void*)M4xVSS_kVideoEffectType_Green;
-                     /**< we don't use any function context */
-                /*commented FB*/
-                /**
-                 * We do not need to set the color context, it is already set during
-                  sendCommand function */
-            }
-            if (M4xVSS_kVideoEffectType_Sepia ==
-                 xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                 M4VSS3GPP_externalVideoEffectColor;
-                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                // (M4OSA_Void*)M4xVSS_kVideoEffectType_Sepia;
-                /**< we don't use any function context */
-                /*commented FB*/
-                /**
-                 * We do not need to set the color context, it is already set during
-                 sendCommand function */
-            }
-            if (M4xVSS_kVideoEffectType_Fifties ==
-             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                 M4VSS3GPP_externalVideoEffectFifties;
-                /**
-                 * We do not need to set the framing context, it is already set during
-                 sendCommand function */
-            }
-            if (M4xVSS_kVideoEffectType_Negative ==
-             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                 M4VSS3GPP_externalVideoEffectColor;
-                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                // (M4OSA_Void*)M4xVSS_kVideoEffectType_Negative;
-                 /**< we don't use any function context */
-                /*commented FB*/
-                /**
-                 * We do not need to set the color context, it is already set during
-                  sendCommand function */
-            }
-            if (M4xVSS_kVideoEffectType_Framing ==
-             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                 M4VSS3GPP_externalVideoEffectFraming;
-                /**
-                 * We do not need to set the framing context, it is already set during
-                 sendCommand function */
-            }
-            if (M4xVSS_kVideoEffectType_ZoomIn ==
-             xVSS_context->pSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                 M4VSS3GPP_externalVideoEffectZoom;
-                xVSS_context->pCurrentEditSettings->Effects[j].pExtVideoEffectFctCtxt =
-                 (M4OSA_Void*)M4xVSS_kVideoEffectType_ZoomIn; /**< we don't use any
-                 function context */
-            }
-            if (M4xVSS_kVideoEffectType_ZoomOut ==
-             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                 M4VSS3GPP_externalVideoEffectZoom;
-                xVSS_context->pCurrentEditSettings->Effects[j].pExtVideoEffectFctCtxt =
-                 (M4OSA_Void*)M4xVSS_kVideoEffectType_ZoomOut; /**< we don't use any
-                 function context */
-            }
-            if (M4xVSS_kVideoEffectType_ColorRGB16 ==
-             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                 M4VSS3GPP_externalVideoEffectColor;
-                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                // (M4OSA_Void*)M4xVSS_kVideoEffectType_ColorRGB16;
-                /**< we don't use any function context */
-                /**
-                 * We do not need to set the color context, it is already set during
-                 sendCommand function */
-            }
-            if (M4xVSS_kVideoEffectType_Gradient ==
-             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
-            {
-                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
-                 M4VSS3GPP_externalVideoEffectColor;
-                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
-                // (M4OSA_Void*)M4xVSS_kVideoEffectType_ColorRGB16;
-                /**< we don't use any function context */
-                /**
-                 * We do not need to set the color context, it is already set during
-                 sendCommand function */
-            }
-
-        }
-    }
-
-    /**
-     * Open the VSS 3GPP */
-    err = M4VSS3GPP_editOpen(pVssCtxt, xVSS_context->pCurrentEditSettings);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile:\
-             M4VSS3GPP_editOpen returned 0x%x\n",err);
-        M4VSS3GPP_editCleanUp(pVssCtxt);
-        /**
-         * Set the VSS context to NULL */
-        xVSS_context->pCurrentEditContext = M4OSA_NULL;
-        return err;
-    }
-
-    /**
-     * Save VSS context to be able to close / free VSS later */
-    xVSS_context->pCurrentEditContext = pVssCtxt;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext)
- *
- * @brief    This function cleans up VSS
- * @note
- * @param    pContext    (IN) The integrator own context
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4VSS3GPP_EditContext pVssCtxt = xVSS_context->pCurrentEditContext;
-    M4OSA_ERR err;
-
-    if(xVSS_context->pCurrentEditContext != M4OSA_NULL)
-    {
-        /**
-         * Close the VSS 3GPP */
-        err = M4VSS3GPP_editClose(pVssCtxt);
-        if (err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_internalCloseEditedFile:\
-                 M4VSS3GPP_editClose returned 0x%x\n",err);
-            M4VSS3GPP_editCleanUp(pVssCtxt);
-            /**
-             * Set the VSS context to NULL */
-            xVSS_context->pCurrentEditContext = M4OSA_NULL;
-            return err;
-        }
-
-        /**
-         * Free this VSS3GPP edition instance */
-        err = M4VSS3GPP_editCleanUp(pVssCtxt);
-        /**
-         * Set the VSS context to NULL */
-        xVSS_context->pCurrentEditContext = M4OSA_NULL;
-        if (err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_internalCloseEditedFile: \
-                M4VSS3GPP_editCleanUp returned 0x%x\n",err);
-            return err;
-        }
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext)
- *
- * @brief    This function prepares VSS for audio mixing
- * @note    It takes its parameters from the BGM settings in the xVSS internal context
- * @param    pContext    (IN) The integrator own context
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- * @return    M4ERR_ALLOC: Allocation error (no more memory)
- ******************************************************************************
- */
-/***
- * FB: the function has been modified since the structure used for the saving is now the
- *  pCurrentEditSettings and not the pSettings
- * This change has been added for the UTF support
- * All the "xVSS_context->pSettings" has been replaced by "xVSS_context->pCurrentEditSettings"
- ***/
-M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4VSS3GPP_AudioMixingSettings* pAudioMixSettings;
-    M4VSS3GPP_AudioMixingContext pAudioMixingCtxt;
-    M4OSA_ERR err;
-    M4VIDEOEDITING_ClipProperties fileProperties;
-
-    /**
-     * Allocate audio mixing settings structure and fill it with BGM parameters */
-    pAudioMixSettings = (M4VSS3GPP_AudioMixingSettings*)M4OSA_32bitAlignedMalloc
-        (sizeof(M4VSS3GPP_AudioMixingSettings), M4VS, (M4OSA_Char *)"pAudioMixSettings");
-    if(pAudioMixSettings == M4OSA_NULL)
-    {
-        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalGenerateAudioMixFile");
-        return M4ERR_ALLOC;
-    }
-
-    if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->FileType ==
-         M4VIDEOEDITING_kFileType_3GPP)
-    {
-        err = M4xVSS_internalGetProperties((M4OSA_Context)xVSS_context,
-             (M4OSA_Char*)xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile,
-                 &fileProperties);
-        if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_internalGenerateAudioMixFile:\
-                 impossible to retrieve audio BGM properties ->\
-                     reencoding audio background music", err);
-            fileProperties.AudioStreamType =
-                 xVSS_context->pCurrentEditSettings->xVSS.outputAudioFormat+1;
-                  /* To force BGM encoding */
-        }
-    }
-
-    pAudioMixSettings->bRemoveOriginal = M4OSA_FALSE;
-    pAudioMixSettings->AddedAudioFileType =
-     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->FileType;
-    pAudioMixSettings->pAddedAudioTrackFile =
-     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile;
-    pAudioMixSettings->uiAddVolume =
-     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddVolume;
-
-    pAudioMixSettings->outputAudioFormat = xVSS_context->pSettings->xVSS.outputAudioFormat;
-    pAudioMixSettings->outputASF = xVSS_context->pSettings->xVSS.outputAudioSamplFreq;
-    pAudioMixSettings->outputAudioBitrate = xVSS_context->pSettings->xVSS.outputAudioBitrate;
-    pAudioMixSettings->uiSamplingFrequency =
-     xVSS_context->pSettings->xVSS.pBGMtrack->uiSamplingFrequency;
-    pAudioMixSettings->uiNumChannels = xVSS_context->pSettings->xVSS.pBGMtrack->uiNumChannels;
-
-    pAudioMixSettings->b_DuckingNeedeed =
-     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->b_DuckingNeedeed;
-    pAudioMixSettings->fBTVolLevel =
-     (M4OSA_Float )xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddVolume/100;
-    pAudioMixSettings->InDucking_threshold =
-     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->InDucking_threshold;
-    pAudioMixSettings->InDucking_lowVolume =
-     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->lowVolume/100;
-    pAudioMixSettings->fPTVolLevel =
-     (M4OSA_Float)xVSS_context->pSettings->PTVolLevel/100;
-    pAudioMixSettings->bLoop = xVSS_context->pSettings->xVSS.pBGMtrack->bLoop;
-
-    if(xVSS_context->pSettings->xVSS.bAudioMono)
-    {
-        pAudioMixSettings->outputNBChannels = 1;
-    }
-    else
-    {
-        pAudioMixSettings->outputNBChannels = 2;
-    }
-
-    /**
-     * Fill audio mix settings with BGM parameters */
-    pAudioMixSettings->uiBeginLoop =
-     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiBeginLoop;
-    pAudioMixSettings->uiEndLoop =
-     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiEndLoop;
-    pAudioMixSettings->uiAddCts =
-     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddCts;
-
-    /**
-     * Output file of the audio mixer will be final file (audio mixing is the last step) */
-    pAudioMixSettings->pOutputClipFile = xVSS_context->pOutputFile;
-    pAudioMixSettings->pTemporaryFile = xVSS_context->pTemporaryFile;
-
-    /**
-     * Input file of the audio mixer is a temporary file containing all audio/video editions */
-    pAudioMixSettings->pOriginalClipFile = xVSS_context->pCurrentEditSettings->pOutputFile;
-
-    /**
-     * Save audio mixing settings pointer to be able to free it in
-     M4xVSS_internalCloseAudioMixedFile function */
-    xVSS_context->pAudioMixSettings = pAudioMixSettings;
-
-    /**
-     * Create a VSS 3GPP audio mixing instance */
-    err = M4VSS3GPP_audioMixingInit(&pAudioMixingCtxt, pAudioMixSettings,
-         xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
-
-    /**
-     * Save audio mixing context to be able to call audio mixing step function in
-      M4xVSS_step function */
-    xVSS_context->pAudioMixContext = pAudioMixingCtxt;
-
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalGenerateAudioMixFile:\
-             M4VSS3GPP_audioMixingInit returned 0x%x\n",err);
-        //M4VSS3GPP_audioMixingCleanUp(pAudioMixingCtxt);
-        return err;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext)
- *
- * @brief    This function cleans up VSS for audio mixing
- * @note
- * @param    pContext    (IN) The integrator own context
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_ERR err;
-
-    /**
-     * Free this VSS3GPP audio mixing instance */
-    if(xVSS_context->pAudioMixContext != M4OSA_NULL)
-    {
-        err = M4VSS3GPP_audioMixingCleanUp(xVSS_context->pAudioMixContext);
-        if (err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_internalCloseAudioMixedFile:\
-                 M4VSS3GPP_audioMixingCleanUp returned 0x%x\n",err);
-            return err;
-        }
-    }
-
-    /**
-     * Free VSS audio mixing settings */
-    if(xVSS_context->pAudioMixSettings != M4OSA_NULL)
-    {
-        free(xVSS_context->pAudioMixSettings);
-        xVSS_context->pAudioMixSettings = M4OSA_NULL;
-    }
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext)
- *
- * @brief    This function cleans up preview edition structure used to generate
- *            preview.3gp file given to the VPS
- * @note    It also free the preview structure given to the VPS
- * @param    pContext    (IN) The integrator own context
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_UInt8 i;
-
-    /**
-     * Free clip/transition settings */
-    for(i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
-    {
-        M4xVSS_FreeClipSettings(xVSS_context->pCurrentEditSettings->pClipList[i]);
-
-        free((xVSS_context->pCurrentEditSettings->pClipList[i]));
-        xVSS_context->pCurrentEditSettings->pClipList[i] = M4OSA_NULL;
-
-        /**
-         * Because there is 1 less transition than clip number */
-        if(i != xVSS_context->pCurrentEditSettings->uiClipNumber-1)
-        {
-            free((xVSS_context->pCurrentEditSettings->pTransitionList[i]));
-            xVSS_context->pCurrentEditSettings->pTransitionList[i] = M4OSA_NULL;
-        }
-    }
-
-    /**
-     * Free clip/transition list */
-    if(xVSS_context->pCurrentEditSettings->pClipList != M4OSA_NULL)
-    {
-        free((xVSS_context->pCurrentEditSettings->pClipList));
-        xVSS_context->pCurrentEditSettings->pClipList = M4OSA_NULL;
-    }
-    if(xVSS_context->pCurrentEditSettings->pTransitionList != M4OSA_NULL)
-    {
-        free((xVSS_context->pCurrentEditSettings->pTransitionList));
-        xVSS_context->pCurrentEditSettings->pTransitionList = M4OSA_NULL;
-    }
-
-    /**
-     * Free output preview file path */
-    if(xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL)
-    {
-        free(xVSS_context->pCurrentEditSettings->pOutputFile);
-        xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
-    }
-
-    /**
-     * Free temporary preview file path */
-    if(xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL)
-    {
-        remove((const char *)xVSS_context->pCurrentEditSettings->pTemporaryFile);
-        free(xVSS_context->pCurrentEditSettings->pTemporaryFile);
-        xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
-    }
-
-    /**
-     * Free "local" BGM settings */
-    if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
-    {
-        if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
-        {
-            free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile);
-            xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
-        }
-        free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack);
-        xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack = M4OSA_NULL;
-    }
-
-    /**
-     * Free current edit settings structure */
-    if(xVSS_context->pCurrentEditSettings != M4OSA_NULL)
-    {
-        free(xVSS_context->pCurrentEditSettings);
-        xVSS_context->pCurrentEditSettings = M4OSA_NULL;
-    }
-
-    /**
-     * Free preview effects given to application */
-    if(M4OSA_NULL != xVSS_context->pPreviewSettings->Effects)
-    {
-        free(xVSS_context->pPreviewSettings->Effects);
-        xVSS_context->pPreviewSettings->Effects = M4OSA_NULL;
-        xVSS_context->pPreviewSettings->nbEffects = 0;
-    }
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext)
- *
- * @brief    This function cleans up saving edition structure used to generate
- *            output.3gp file given to the VPS
- * @note
- * @param    pContext    (IN) The integrator own context
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_UInt8 i;
-
-    if(xVSS_context->pCurrentEditSettings != M4OSA_NULL)
-    {
-        /**
-         * Free clip/transition settings */
-        for(i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
-        {
-            M4xVSS_FreeClipSettings(xVSS_context->pCurrentEditSettings->pClipList[i]);
-
-            free((xVSS_context->pCurrentEditSettings->pClipList[i]));
-            xVSS_context->pCurrentEditSettings->pClipList[i] = M4OSA_NULL;
-
-            /**
-             * Because there is 1 less transition than clip number */
-            if(i != xVSS_context->pCurrentEditSettings->uiClipNumber-1)
-            {
-                free(\
-                    (xVSS_context->pCurrentEditSettings->pTransitionList[i]));
-                xVSS_context->pCurrentEditSettings->pTransitionList[i] = M4OSA_NULL;
-            }
-        }
-
-        /**
-         * Free clip/transition list */
-        if(xVSS_context->pCurrentEditSettings->pClipList != M4OSA_NULL)
-        {
-            free((xVSS_context->pCurrentEditSettings->pClipList));
-            xVSS_context->pCurrentEditSettings->pClipList = M4OSA_NULL;
-        }
-        if(xVSS_context->pCurrentEditSettings->pTransitionList != M4OSA_NULL)
-        {
-            free((xVSS_context->pCurrentEditSettings->pTransitionList));
-            xVSS_context->pCurrentEditSettings->pTransitionList = M4OSA_NULL;
-        }
-
-        if(xVSS_context->pCurrentEditSettings->Effects != M4OSA_NULL)
-        {
-            free((xVSS_context->pCurrentEditSettings->Effects));
-            xVSS_context->pCurrentEditSettings->Effects = M4OSA_NULL;
-            xVSS_context->pCurrentEditSettings->nbEffects = 0;
-        }
-
-        /**
-         * Free output saving file path */
-        if(xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL)
-        {
-            if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
-            {
-                remove((const char *)xVSS_context->pCurrentEditSettings->pOutputFile);
-                free(xVSS_context->pCurrentEditSettings->pOutputFile);
-            }
-            if(xVSS_context->pOutputFile != M4OSA_NULL)
-            {
-                free(xVSS_context->pOutputFile);
-                xVSS_context->pOutputFile = M4OSA_NULL;
-            }
-            xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
-            xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
-        }
-
-        /**
-         * Free temporary saving file path */
-        if(xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL)
-        {
-            remove((const char *)xVSS_context->pCurrentEditSettings->pTemporaryFile);
-            free(xVSS_context->pCurrentEditSettings->pTemporaryFile);
-            xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
-        }
-
-        /**
-         * Free "local" BGM settings */
-        if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
-        {
-            if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
-            {
-                free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile);
-                xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
-            }
-            free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack);
-            xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack = M4OSA_NULL;
-        }
-
-        /**
-         * Free current edit settings structure */
-        free(xVSS_context->pCurrentEditSettings);
-        xVSS_context->pCurrentEditSettings = M4OSA_NULL;
-    }
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_freeSettings(M4OSA_Context pContext)
- *
- * @brief    This function cleans up an M4VSS3GPP_EditSettings structure
- * @note
- * @param    pSettings    (IN) Pointer on M4VSS3GPP_EditSettings structure to free
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_freeSettings(M4VSS3GPP_EditSettings* pSettings)
-{
-    M4OSA_UInt8 i,j;
-
-    /**
-     * For each clip ... */
-    for(i=0; i<pSettings->uiClipNumber; i++)
-    {
-        /**
-         * ... free clip settings */
-        if(pSettings->pClipList[i] != M4OSA_NULL)
-        {
-            M4xVSS_FreeClipSettings(pSettings->pClipList[i]);
-
-            free((pSettings->pClipList[i]));
-            pSettings->pClipList[i] = M4OSA_NULL;
-        }
-
-        /**
-         * ... free transition settings */
-        if(i < pSettings->uiClipNumber-1) /* Because there is 1 less transition than clip number */
-        {
-            if(pSettings->pTransitionList[i] != M4OSA_NULL)
-            {
-                switch (pSettings->pTransitionList[i]->VideoTransitionType)
-                {
-                    case M4xVSS_kVideoTransitionType_AlphaMagic:
-
-                        /**
-                         * In case of Alpha Magic transition,
-                          some extra parameters need to be freed */
-                        if(pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt\
-                             != M4OSA_NULL)
-                        {
-                            free((((M4xVSS_internal_AlphaMagicSettings*)\
-                                pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt)->\
-                                    pPlane->pac_data));
-                            ((M4xVSS_internal_AlphaMagicSettings*)pSettings->pTransitionList[i\
-                                ]->pExtVideoTransitionFctCtxt)->pPlane->pac_data = M4OSA_NULL;
-
-                            free((((M4xVSS_internal_AlphaMagicSettings*)\
-                                pSettings->pTransitionList[i]->\
-                                    pExtVideoTransitionFctCtxt)->pPlane));
-                            ((M4xVSS_internal_AlphaMagicSettings*)pSettings->pTransitionList[i]\
-                                ->pExtVideoTransitionFctCtxt)->pPlane = M4OSA_NULL;
-
-                            free((pSettings->pTransitionList[i]->\
-                                pExtVideoTransitionFctCtxt));
-                            pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt = M4OSA_NULL;
-
-                            for(j=i+1;j<pSettings->uiClipNumber-1;j++)
-                            {
-                                if(pSettings->pTransitionList[j] != M4OSA_NULL)
-                                {
-                                    if(pSettings->pTransitionList[j]->VideoTransitionType ==
-                                     M4xVSS_kVideoTransitionType_AlphaMagic)
-                                    {
-                                        M4OSA_UInt32 pCmpResult=0;
-                                        pCmpResult = strcmp((const char *)pSettings->pTransitionList[i]->\
-                                            xVSS.transitionSpecific.pAlphaMagicSettings->\
-                                                pAlphaFilePath,
-                                                (const char *)pSettings->pTransitionList[j]->\
-                                                xVSS.transitionSpecific.pAlphaMagicSettings->\
-                                                pAlphaFilePath);
-                                        if(pCmpResult == 0)
-                                        {
-                                            /* Free extra internal alpha magic structure and put
-                                            it to NULL to avoid refreeing it */
-                                            free((pSettings->\
-                                                pTransitionList[j]->pExtVideoTransitionFctCtxt));
-                                            pSettings->pTransitionList[j]->\
-                                                pExtVideoTransitionFctCtxt = M4OSA_NULL;
-                                        }
-                                    }
-                                }
-                            }
-                        }
-
-                        if(pSettings->pTransitionList[i]->\
-                            xVSS.transitionSpecific.pAlphaMagicSettings != M4OSA_NULL)
-                        {
-                            if(pSettings->pTransitionList[i]->\
-                                xVSS.transitionSpecific.pAlphaMagicSettings->\
-                                    pAlphaFilePath != M4OSA_NULL)
-                            {
-                                free(pSettings->\
-                                    pTransitionList[i]->\
-                                        xVSS.transitionSpecific.pAlphaMagicSettings->\
-                                            pAlphaFilePath);
-                                pSettings->pTransitionList[i]->\
-                                    xVSS.transitionSpecific.pAlphaMagicSettings->\
-                                        pAlphaFilePath = M4OSA_NULL;
-                            }
-                            free(pSettings->pTransitionList[i]->\
-                                xVSS.transitionSpecific.pAlphaMagicSettings);
-                            pSettings->pTransitionList[i]->\
-                                xVSS.transitionSpecific.pAlphaMagicSettings = M4OSA_NULL;
-
-                        }
-
-                    break;
-
-
-                    case M4xVSS_kVideoTransitionType_SlideTransition:
-                        if (M4OSA_NULL != pSettings->pTransitionList[i]->\
-                            xVSS.transitionSpecific.pSlideTransitionSettings)
-                        {
-                            free(pSettings->pTransitionList[i]->\
-                                xVSS.transitionSpecific.pSlideTransitionSettings);
-                            pSettings->pTransitionList[i]->\
-                                xVSS.transitionSpecific.pSlideTransitionSettings = M4OSA_NULL;
-                        }
-                        if(pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt != M4OSA_NULL)
-                        {
-                            free((pSettings->pTransitionList[i]->\
-                                pExtVideoTransitionFctCtxt));
-                            pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt = M4OSA_NULL;
-                        }
-                    break;
-                                        default:
-                    break;
-
-                }
-                /**
-                 * Free transition settings structure */
-                free((pSettings->pTransitionList[i]));
-                pSettings->pTransitionList[i] = M4OSA_NULL;
-            }
-        }
-    }
-
-    /**
-     * Free clip list */
-    if(pSettings->pClipList != M4OSA_NULL)
-    {
-        free((pSettings->pClipList));
-        pSettings->pClipList = M4OSA_NULL;
-    }
-
-    /**
-     * Free transition list */
-    if(pSettings->pTransitionList != M4OSA_NULL)
-    {
-        free((pSettings->pTransitionList));
-        pSettings->pTransitionList = M4OSA_NULL;
-    }
-
-    /**
-     * RC: Free effects list */
-    if(pSettings->Effects != M4OSA_NULL)
-    {
-        for(i=0; i<pSettings->nbEffects; i++)
-        {
-            /**
-             * For each clip, free framing structure if needed */
-            if(pSettings->Effects[i].VideoEffectType == M4xVSS_kVideoEffectType_Framing
-                || pSettings->Effects[i].VideoEffectType == M4xVSS_kVideoEffectType_Text)
-            {
-#ifdef DECODE_GIF_ON_SAVING
-                M4xVSS_FramingContext* framingCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
-#else
-                M4xVSS_FramingStruct* framingCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
-                M4xVSS_FramingStruct* framingCtx_save;
-                M4xVSS_Framing3102Struct* framingCtx_first = framingCtx;
-#endif
-
-#ifdef DECODE_GIF_ON_SAVING
-                if(framingCtx != M4OSA_NULL) /* Bugfix 1.2.0: crash, trying to free non existant
-                 pointer */
-                {
-                    if(framingCtx->aFramingCtx != M4OSA_NULL)
-                    {
-                        {
-                            if(framingCtx->aFramingCtx->FramingRgb != M4OSA_NULL)
-                            {
-                                free(framingCtx->aFramingCtx->\
-                                    FramingRgb->pac_data);
-                                framingCtx->aFramingCtx->FramingRgb->pac_data = M4OSA_NULL;
-                                free(framingCtx->aFramingCtx->FramingRgb);
-                                framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
-                            }
-                        }
-                        if(framingCtx->aFramingCtx->FramingYuv != M4OSA_NULL)
-                        {
-                            free(framingCtx->aFramingCtx->\
-                                FramingYuv[0].pac_data);
-                            framingCtx->aFramingCtx->FramingYuv[0].pac_data = M4OSA_NULL;
-                           free(framingCtx->aFramingCtx->\
-                                FramingYuv[1].pac_data);
-                            framingCtx->aFramingCtx->FramingYuv[1].pac_data = M4OSA_NULL;
-                           free(framingCtx->aFramingCtx->\
-                                FramingYuv[2].pac_data);
-                            framingCtx->aFramingCtx->FramingYuv[2].pac_data = M4OSA_NULL;
-                            free(framingCtx->aFramingCtx->FramingYuv);
-                            framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
-                        }
-                        free(framingCtx->aFramingCtx);
-                        framingCtx->aFramingCtx = M4OSA_NULL;
-                    }
-                    if(framingCtx->aFramingCtx_last != M4OSA_NULL)
-                    {
-                        if(framingCtx->aFramingCtx_last->FramingRgb != M4OSA_NULL)
-                        {
-                            free(framingCtx->aFramingCtx_last->\
-                                FramingRgb->pac_data);
-                            framingCtx->aFramingCtx_last->FramingRgb->pac_data = M4OSA_NULL;
-                            free(framingCtx->aFramingCtx_last->\
-                                FramingRgb);
-                            framingCtx->aFramingCtx_last->FramingRgb = M4OSA_NULL;
-                        }
-                        if(framingCtx->aFramingCtx_last->FramingYuv != M4OSA_NULL)
-                        {
-                            free(framingCtx->aFramingCtx_last->\
-                                FramingYuv[0].pac_data);
-                            framingCtx->aFramingCtx_last->FramingYuv[0].pac_data = M4OSA_NULL;
-                            free(framingCtx->aFramingCtx_last->FramingYuv);
-                            framingCtx->aFramingCtx_last->FramingYuv = M4OSA_NULL;
-                        }
-                        free(framingCtx->aFramingCtx_last);
-                        framingCtx->aFramingCtx_last = M4OSA_NULL;
-                    }
-                    if(framingCtx->pEffectFilePath != M4OSA_NULL)
-                    {
-                        free(framingCtx->pEffectFilePath);
-                        framingCtx->pEffectFilePath = M4OSA_NULL;
-                    }
-                    /*In case there are still allocated*/
-                    if(framingCtx->pSPSContext != M4OSA_NULL)
-                    {
-                    //    M4SPS_destroy(framingCtx->pSPSContext);
-                        framingCtx->pSPSContext = M4OSA_NULL;
-                    }
-                    /*Alpha blending structure*/
-                    if(framingCtx->alphaBlendingStruct  != M4OSA_NULL)
-                    {
-                        free(framingCtx->alphaBlendingStruct);
-                        framingCtx->alphaBlendingStruct = M4OSA_NULL;
-                    }
-
-                    free(framingCtx);
-                    framingCtx = M4OSA_NULL;
-                }
-#else
-                do
-                {
-                    if(framingCtx != M4OSA_NULL) /* Bugfix 1.2.0: crash, trying to free non
-                    existant pointer */
-                    {
-                        if(framingCtx->FramingRgb != M4OSA_NULL)
-                        {
-                            free(framingCtx->FramingRgb->pac_data);
-                            framingCtx->FramingRgb->pac_data = M4OSA_NULL;
-                            free(framingCtx->FramingRgb);
-                            framingCtx->FramingRgb = M4OSA_NULL;
-                        }
-                        if(framingCtx->FramingYuv != M4OSA_NULL)
-                        {
-                            free(framingCtx->FramingYuv[0].pac_data);
-                            framingCtx->FramingYuv[0].pac_data = M4OSA_NULL;
-                            free(framingCtx->FramingYuv);
-                            framingCtx->FramingYuv = M4OSA_NULL;
-                        }
-                        framingCtx_save = framingCtx->pNext;
-                        free(framingCtx);
-                        framingCtx = M4OSA_NULL;
-                        framingCtx = framingCtx_save;
-                    }
-                    else
-                    {
-                        /*FB: bug fix P4ME00003002*/
-                        break;
-                    }
-                } while(framingCtx_first != framingCtx);
-#endif
-            }
-            else if( M4xVSS_kVideoEffectType_Fifties == pSettings->Effects[i].VideoEffectType)
-            {
-                /* Free Fifties context */
-                M4xVSS_FiftiesStruct* FiftiesCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
-
-                if(FiftiesCtx != M4OSA_NULL)
-                {
-                    free(FiftiesCtx);
-                    FiftiesCtx = M4OSA_NULL;
-                }
-
-            }
-            else if( M4xVSS_kVideoEffectType_ColorRGB16 == pSettings->Effects[i].VideoEffectType
-                || M4xVSS_kVideoEffectType_BlackAndWhite == pSettings->Effects[i].VideoEffectType
-                || M4xVSS_kVideoEffectType_Pink == pSettings->Effects[i].VideoEffectType
-                || M4xVSS_kVideoEffectType_Green == pSettings->Effects[i].VideoEffectType
-                || M4xVSS_kVideoEffectType_Sepia == pSettings->Effects[i].VideoEffectType
-                || M4xVSS_kVideoEffectType_Negative== pSettings->Effects[i].VideoEffectType
-                || M4xVSS_kVideoEffectType_Gradient== pSettings->Effects[i].VideoEffectType)
-            {
-                /* Free Color context */
-                M4xVSS_ColorStruct* ColorCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
-
-                if(ColorCtx != M4OSA_NULL)
-                {
-                    free(ColorCtx);
-                    ColorCtx = M4OSA_NULL;
-                }
-            }
-
-            /* Free simple fields */
-            if(pSettings->Effects[i].xVSS.pFramingFilePath != M4OSA_NULL)
-            {
-                free(pSettings->Effects[i].xVSS.pFramingFilePath);
-                pSettings->Effects[i].xVSS.pFramingFilePath = M4OSA_NULL;
-            }
-            if(pSettings->Effects[i].xVSS.pFramingBuffer != M4OSA_NULL)
-            {
-                free(pSettings->Effects[i].xVSS.pFramingBuffer);
-                pSettings->Effects[i].xVSS.pFramingBuffer = M4OSA_NULL;
-            }
-            if(pSettings->Effects[i].xVSS.pTextBuffer != M4OSA_NULL)
-            {
-                free(pSettings->Effects[i].xVSS.pTextBuffer);
-                pSettings->Effects[i].xVSS.pTextBuffer = M4OSA_NULL;
-            }
-        }
-        free(pSettings->Effects);
-        pSettings->Effects = M4OSA_NULL;
-    }
-
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR M4xVSS_freeCommand(M4OSA_Context pContext)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-//    M4OSA_UInt8 i,j;
-
-    /* Free "local" BGM settings */
-    if(xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL)
-    {
-        if(xVSS_context->pSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
-        {
-            free(xVSS_context->pSettings->xVSS.pBGMtrack->pFile);
-            xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
-        }
-        free(xVSS_context->pSettings->xVSS.pBGMtrack);
-        xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
-    }
-
-    M4xVSS_freeSettings(xVSS_context->pSettings);
-
-    if(xVSS_context->pPTo3GPPparamsList != M4OSA_NULL)
-    {
-        M4xVSS_Pto3GPP_params* pParams = xVSS_context->pPTo3GPPparamsList;
-        M4xVSS_Pto3GPP_params* pParams_sauv;
-
-        while(pParams != M4OSA_NULL)
-        {
-            if(pParams->pFileIn != M4OSA_NULL)
-            {
-                free(pParams->pFileIn);
-                pParams->pFileIn = M4OSA_NULL;
-            }
-            if(pParams->pFileOut != M4OSA_NULL)
-            {
-                /* Delete temporary file */
-                remove((const char *)pParams->pFileOut);
-                free(pParams->pFileOut);
-                pParams->pFileOut = M4OSA_NULL;
-            }
-            if(pParams->pFileTemp != M4OSA_NULL)
-            {
-                /* Delete temporary file */
-#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
-                remove((const char *)pParams->pFileTemp);
-                free(pParams->pFileTemp);
-#endif/*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
-                pParams->pFileTemp = M4OSA_NULL;
-            }
-            pParams_sauv = pParams;
-            pParams = pParams->pNext;
-            free(pParams_sauv);
-            pParams_sauv = M4OSA_NULL;
-        }
-    }
-
-    if(xVSS_context->pMCSparamsList != M4OSA_NULL)
-    {
-        M4xVSS_MCS_params* pParams = xVSS_context->pMCSparamsList;
-        M4xVSS_MCS_params* pParams_sauv;
-
-        while(pParams != M4OSA_NULL)
-        {
-            if(pParams->pFileIn != M4OSA_NULL)
-            {
-                free(pParams->pFileIn);
-                pParams->pFileIn = M4OSA_NULL;
-            }
-            if(pParams->pFileOut != M4OSA_NULL)
-            {
-                /* Delete temporary file */
-                remove((const char *)pParams->pFileOut);
-                free(pParams->pFileOut);
-                pParams->pFileOut = M4OSA_NULL;
-            }
-            if(pParams->pFileTemp != M4OSA_NULL)
-            {
-                /* Delete temporary file */
-#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
-                remove((const char *)pParams->pFileTemp);
-                free(pParams->pFileTemp);
-#endif/*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
-                pParams->pFileTemp = M4OSA_NULL;
-            }
-            pParams_sauv = pParams;
-            pParams = pParams->pNext;
-            free(pParams_sauv);
-            pParams_sauv = M4OSA_NULL;
-        }
-    }
-
-    if(xVSS_context->pcmPreviewFile != M4OSA_NULL)
-    {
-        free(xVSS_context->pcmPreviewFile);
-        xVSS_context->pcmPreviewFile = M4OSA_NULL;
-    }
-    if(xVSS_context->pSettings->pOutputFile != M4OSA_NULL
-        && xVSS_context->pOutputFile != M4OSA_NULL)
-    {
-        free(xVSS_context->pSettings->pOutputFile);
-        xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
-        xVSS_context->pOutputFile = M4OSA_NULL;
-    }
-
-    /* Reinit all context variables */
-    xVSS_context->previousClipNumber = 0;
-    xVSS_context->editingStep = M4xVSS_kMicroStateEditing;
-    xVSS_context->analyseStep = M4xVSS_kMicroStateAnalysePto3GPP;
-    xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
-    xVSS_context->pPTo3GPPcurrentParams = M4OSA_NULL;
-    xVSS_context->pMCSparamsList = M4OSA_NULL;
-    xVSS_context->pMCScurrentParams = M4OSA_NULL;
-    xVSS_context->tempFileIndex = 0;
-    xVSS_context->targetedTimescale = 0;
-
-    return M4NO_ERROR;
-}
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext,
- *                                    M4OSA_Char* pFile,
- *                                    M4VIDEOEDITING_ClipProperties *pFileProperties)
- *
- * @brief    This function retrieve properties of an input 3GP file using MCS
- * @note
- * @param    pContext        (IN) The integrator own context
- * @param    pFile            (IN) 3GP file to analyse
- * @param    pFileProperties    (IN/OUT) Pointer on a structure that will contain
- *                            the 3GP file properties
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext, M4OSA_Char* pFile,
-                                       M4VIDEOEDITING_ClipProperties *pFileProperties)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_ERR err;
-    M4MCS_Context mcs_context;
-
-    err = M4MCS_init(&mcs_context, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_init: 0x%x", err);
-        return err;
-    }
-
-    /*open the MCS in the "normal opening" mode to retrieve the exact duration*/
-    err = M4MCS_open_normalMode(mcs_context, pFile, M4VIDEOEDITING_kFileType_3GPP,
-        M4OSA_NULL, M4OSA_NULL);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_open: 0x%x", err);
-        M4MCS_abort(mcs_context);
-        return err;
-    }
-
-    err = M4MCS_getInputFileProperties(mcs_context, pFileProperties);
-    if(err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("Error in M4MCS_getInputFileProperties: 0x%x", err);
-        M4MCS_abort(mcs_context);
-        return err;
-    }
-
-    err = M4MCS_abort(mcs_context);
-    if (err != M4NO_ERROR)
-    {
-        M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_abort: 0x%x", err);
-        return err;
-    }
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
- *                                                M4OSA_UInt32* pTargetedTimeScale)
- *
- * @brief    This function retrieve targeted time scale
- * @note
- * @param    pContext            (IN)    The integrator own context
- * @param    pTargetedTimeScale    (OUT)    Targeted time scale
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
-                                                 M4VSS3GPP_EditSettings* pSettings,
-                                                  M4OSA_UInt32* pTargetedTimeScale)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_ERR err;
-    M4OSA_UInt32 totalDuration = 0;
-    M4OSA_UInt8 i = 0;
-    M4OSA_UInt32 tempTimeScale = 0, tempDuration = 0;
-
-    for(i=0;i<pSettings->uiClipNumber;i++)
-    {
-        /*search timescale only in mpeg4 case*/
-        if(pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_3GPP
-            || pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_MP4
-            || pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_M4V)
-        {
-            M4VIDEOEDITING_ClipProperties fileProperties;
-
-            /*UTF conversion support*/
-            M4OSA_Char* pDecodedPath = M4OSA_NULL;
-
-            /**
-            * UTF conversion: convert into the customer format, before being used*/
-            pDecodedPath = pSettings->pClipList[i]->pFile;
-
-            if(xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
-                && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
-            {
-                M4OSA_UInt32 length = 0;
-                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
-                     (M4OSA_Void*) pSettings->pClipList[i]->pFile,
-                        (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
-                             &length);
-                if(err != M4NO_ERROR)
-                {
-                    M4OSA_TRACE1_1("M4xVSS_Init:\
-                         M4xVSS_internalConvertToUTF8 returns err: 0x%x",err);
-                    return err;
-                }
-                pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-            }
-
-            /*End of the conversion: use the decoded path*/
-            err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath, &fileProperties);
-
-            /*get input file properties*/
-            /*err = M4xVSS_internalGetProperties(xVSS_context, pSettings->\
-                pClipList[i]->pFile, &fileProperties);*/
-            if(M4NO_ERROR != err)
-            {
-                M4OSA_TRACE1_1("M4xVSS_internalGetTargetedTimeScale:\
-                     M4xVSS_internalGetProperties returned: 0x%x", err);
-                return err;
-            }
-            if(fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
-            {
-                if(pSettings->pClipList[i]->uiEndCutTime > 0)
-                {
-                    if(tempDuration < (pSettings->pClipList[i]->uiEndCutTime \
-                        - pSettings->pClipList[i]->uiBeginCutTime))
-                    {
-                        tempTimeScale = fileProperties.uiVideoTimeScale;
-                        tempDuration = (pSettings->pClipList[i]->uiEndCutTime\
-                             - pSettings->pClipList[i]->uiBeginCutTime);
-                    }
-                }
-                else
-                {
-                    if(tempDuration < (fileProperties.uiClipDuration\
-                         - pSettings->pClipList[i]->uiBeginCutTime))
-                    {
-                        tempTimeScale = fileProperties.uiVideoTimeScale;
-                        tempDuration = (fileProperties.uiClipDuration\
-                             - pSettings->pClipList[i]->uiBeginCutTime);
-                    }
-                }
-            }
-        }
-        if(pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_ARGB8888)
-        {
-            /*the timescale is 30 for PTO3GP*/
-            *pTargetedTimeScale = 30;
-            return M4NO_ERROR;
-
-        }
-    }
-
-    if(tempTimeScale >= 30)/*Define a minimum time scale, otherwise if the timescale is not
-    enough, there will be an infinite loop in the shell encoder*/
-    {
-        *pTargetedTimeScale = tempTimeScale;
-    }
-    else
-    {
-        *pTargetedTimeScale = 30;
-    }
-
-    return M4NO_ERROR;
-}
-
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
- *                                                    M4VIFI_ImagePlane *PlaneIn,
- *                                                    M4VIFI_ImagePlane *PlaneOut,
- *                                                    M4VSS3GPP_ExternalProgress *pProgress,
- *                                                    M4OSA_UInt32 uiEffectKind)
- *
- * @brief    This function apply a color effect on an input YUV420 planar frame
- * @note
- * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
- * @param    PlaneIn            (IN) Input YUV420 planar
- * @param    PlaneOut        (IN/OUT) Output YUV420 planar
- * @param    pProgress        (IN/OUT) Progress indication (0-100)
- * @param    uiEffectKind    (IN) Unused
- *
- * @return    M4VIFI_OK:    No error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
-                                             M4VIFI_ImagePlane *PlaneIn,
-                                             M4VIFI_ImagePlane *PlaneOut,
-                                             M4VSS3GPP_ExternalProgress *pProgress,
-                                             M4OSA_UInt32 uiEffectKind)
-{
-    M4VIFI_Int32 plane_number;
-    M4VIFI_UInt32 i,j;
-    M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
-    M4xVSS_ColorStruct* ColorContext = (M4xVSS_ColorStruct*)pFunctionContext;
-
-    for (plane_number = 0; plane_number < 3; plane_number++)
-    {
-        p_buf_src = &(PlaneIn[plane_number].pac_data[PlaneIn[plane_number].u_topleft]);
-        p_buf_dest = &(PlaneOut[plane_number].pac_data[PlaneOut[plane_number].u_topleft]);
-        for (i = 0; i < PlaneOut[plane_number].u_height; i++)
-        {
-            /**
-             * Chrominance */
-            if(plane_number==1 || plane_number==2)
-            {
-                //switch ((M4OSA_UInt32)pFunctionContext)
-                // commented because a structure for the effects context exist
-                switch (ColorContext->colorEffectType)
-                {
-                    case M4xVSS_kVideoEffectType_BlackAndWhite:
-                        memset((void *)p_buf_dest,128,
-                         PlaneIn[plane_number].u_width);
-                        break;
-                    case M4xVSS_kVideoEffectType_Pink:
-                        memset((void *)p_buf_dest,255,
-                         PlaneIn[plane_number].u_width);
-                        break;
-                    case M4xVSS_kVideoEffectType_Green:
-                        memset((void *)p_buf_dest,0,
-                         PlaneIn[plane_number].u_width);
-                        break;
-                    case M4xVSS_kVideoEffectType_Sepia:
-                        if(plane_number==1)
-                        {
-                            memset((void *)p_buf_dest,117,
-                             PlaneIn[plane_number].u_width);
-                        }
-                        else
-                        {
-                            memset((void *)p_buf_dest,139,
-                             PlaneIn[plane_number].u_width);
-                        }
-                        break;
-                    case M4xVSS_kVideoEffectType_Negative:
-                        memcpy((void *)p_buf_dest,
-                         (void *)p_buf_src ,PlaneOut[plane_number].u_width);
-                        break;
-
-                    case M4xVSS_kVideoEffectType_ColorRGB16:
-                        {
-                            M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
-
-                            /*first get the r, g, b*/
-                            b = (ColorContext->rgb16ColorData &  0x001f);
-                            g = (ColorContext->rgb16ColorData &  0x07e0)>>5;
-                            r = (ColorContext->rgb16ColorData &  0xf800)>>11;
-
-                            /*keep y, but replace u and v*/
-                            if(plane_number==1)
-                            {
-                                /*then convert to u*/
-                                u = U16(r, g, b);
-                                memset((void *)p_buf_dest,(M4OSA_UInt8)u,
-                                 PlaneIn[plane_number].u_width);
-                            }
-                            if(plane_number==2)
-                            {
-                                /*then convert to v*/
-                                v = V16(r, g, b);
-                                memset((void *)p_buf_dest, (M4OSA_UInt8)v,
-                                 PlaneIn[plane_number].u_width);
-                            }
-                        }
-                        break;
-                    case M4xVSS_kVideoEffectType_Gradient:
-                        {
-                            M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
-
-                            /*first get the r, g, b*/
-                            b = (ColorContext->rgb16ColorData &  0x001f);
-                            g = (ColorContext->rgb16ColorData &  0x07e0)>>5;
-                            r = (ColorContext->rgb16ColorData &  0xf800)>>11;
-
-                            /*for color gradation*/
-                            b = (M4OSA_UInt16)( b - ((b*i)/PlaneIn[plane_number].u_height));
-                            g = (M4OSA_UInt16)(g - ((g*i)/PlaneIn[plane_number].u_height));
-                            r = (M4OSA_UInt16)(r - ((r*i)/PlaneIn[plane_number].u_height));
-
-                            /*keep y, but replace u and v*/
-                            if(plane_number==1)
-                            {
-                                /*then convert to u*/
-                                u = U16(r, g, b);
-                                memset((void *)p_buf_dest,(M4OSA_UInt8)u,
-                                 PlaneIn[plane_number].u_width);
-                            }
-                            if(plane_number==2)
-                            {
-                                /*then convert to v*/
-                                v = V16(r, g, b);
-                                memset((void *)p_buf_dest,(M4OSA_UInt8)v,
-                                 PlaneIn[plane_number].u_width);
-                            }
-                        }
-                        break;
-                        default:
-                        break;
-                }
-            }
-            /**
-             * Luminance */
-            else
-            {
-                //switch ((M4OSA_UInt32)pFunctionContext)
-                // commented because a structure for the effects context exist
-                switch (ColorContext->colorEffectType)
-                {
-                case M4xVSS_kVideoEffectType_Negative:
-                    for(j=0;j<PlaneOut[plane_number].u_width;j++)
-                    {
-                            p_buf_dest[j] = 255 - p_buf_src[j];
-                    }
-                    break;
-                default:
-                    memcpy((void *)p_buf_dest,
-                     (void *)p_buf_src ,PlaneOut[plane_number].u_width);
-                    break;
-                }
-            }
-            p_buf_src += PlaneIn[plane_number].u_stride;
-            p_buf_dest += PlaneOut[plane_number].u_stride;
-        }
-    }
-
-    return M4VIFI_OK;
-}
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_externalVideoEffectFraming(M4OSA_Void *pFunctionContext,
- *                                                    M4VIFI_ImagePlane *PlaneIn,
- *                                                    M4VIFI_ImagePlane *PlaneOut,
- *                                                    M4VSS3GPP_ExternalProgress *pProgress,
- *                                                    M4OSA_UInt32 uiEffectKind)
- *
- * @brief    This function add a fixed or animated image on an input YUV420 planar frame
- * @note
- * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
- * @param    PlaneIn            (IN) Input YUV420 planar
- * @param    PlaneOut        (IN/OUT) Output YUV420 planar
- * @param    pProgress        (IN/OUT) Progress indication (0-100)
- * @param    uiEffectKind    (IN) Unused
- *
- * @return    M4VIFI_OK:    No error
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming( M4OSA_Void *userData,
-                                                M4VIFI_ImagePlane PlaneIn[3],
-                                                M4VIFI_ImagePlane *PlaneOut,
-                                                M4VSS3GPP_ExternalProgress *pProgress,
-                                                M4OSA_UInt32 uiEffectKind )
-{
-    M4VIFI_UInt32 x,y;
-
-    M4VIFI_UInt8 *p_in_Y = PlaneIn[0].pac_data;
-    M4VIFI_UInt8 *p_in_U = PlaneIn[1].pac_data;
-    M4VIFI_UInt8 *p_in_V = PlaneIn[2].pac_data;
-
-    M4xVSS_FramingStruct* Framing = M4OSA_NULL;
-    M4xVSS_FramingStruct* currentFraming = M4OSA_NULL;
-    M4VIFI_UInt8 *FramingRGB = M4OSA_NULL;
-
-    M4VIFI_UInt8 *p_out0;
-    M4VIFI_UInt8 *p_out1;
-    M4VIFI_UInt8 *p_out2;
-
-    M4VIFI_UInt32 topleft[2];
-
-    M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
-    M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
-
-#ifndef DECODE_GIF_ON_SAVING
-    Framing = (M4xVSS_FramingStruct *)userData;
-    currentFraming = (M4xVSS_FramingStruct *)Framing->pCurrent;
-    FramingRGB = Framing->FramingRgb->pac_data;
-#endif /*DECODE_GIF_ON_SAVING*/
-
-    /*FB*/
-#ifdef DECODE_GIF_ON_SAVING
-    M4OSA_ERR err;
-    Framing = (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
-    currentFraming = (M4xVSS_FramingStruct *)Framing;
-    FramingRGB = Framing->FramingRgb->pac_data;
-#endif /*DECODE_GIF_ON_SAVING*/
-    /*end FB*/
-
-    /**
-     * Initialize input / output plane pointers */
-    p_in_Y += PlaneIn[0].u_topleft;
-    p_in_U += PlaneIn[1].u_topleft;
-    p_in_V += PlaneIn[2].u_topleft;
-
-    p_out0 = PlaneOut[0].pac_data;
-    p_out1 = PlaneOut[1].pac_data;
-    p_out2 = PlaneOut[2].pac_data;
-
-    /**
-     * Depending on time, initialize Framing frame to use */
-    if(Framing->previousClipTime == -1)
-    {
-        Framing->previousClipTime = pProgress->uiOutputTime;
-    }
-
-    /**
-     * If the current clip time has reach the duration of one frame of the framing picture
-     * we need to step to next framing picture */
-
-    Framing->previousClipTime = pProgress->uiOutputTime;
-    FramingRGB = currentFraming->FramingRgb->pac_data;
-    topleft[0] = currentFraming->topleft_x;
-    topleft[1] = currentFraming->topleft_y;
-
-    for( x=0 ;x < PlaneIn[0].u_height ; x++)
-    {
-        for( y=0 ;y < PlaneIn[0].u_width ; y++)
-        {
-            /**
-             * To handle framing with input size != output size
-             * Framing is applyed if coordinates matches between framing/topleft and input plane */
-            if( y < (topleft[0] + currentFraming->FramingYuv[0].u_width)  &&
-                y >= topleft[0] &&
-                x < (topleft[1] + currentFraming->FramingYuv[0].u_height) &&
-                x >= topleft[1])
-            {
-                /*Alpha blending support*/
-                M4OSA_Float alphaBlending = 1;
-                M4xVSS_internalEffectsAlphaBlending*  alphaBlendingStruct =\
-                 (M4xVSS_internalEffectsAlphaBlending*)\
-                    ((M4xVSS_FramingContext*)userData)->alphaBlendingStruct;
-
-                if(alphaBlendingStruct != M4OSA_NULL)
-                {
-                    if(pProgress->uiProgress \
-                    < (M4OSA_UInt32)(alphaBlendingStruct->m_fadeInTime*10))
-                    {
-                        if(alphaBlendingStruct->m_fadeInTime == 0) {
-                            alphaBlending = alphaBlendingStruct->m_start / 100;
-                        } else {
-                            alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle\
-                             - alphaBlendingStruct->m_start)\
-                                *pProgress->uiProgress/(alphaBlendingStruct->m_fadeInTime*10));
-                            alphaBlending += alphaBlendingStruct->m_start;
-                            alphaBlending /= 100;
-                        }
-                    }
-                    else if(pProgress->uiProgress >= (M4OSA_UInt32)(alphaBlendingStruct->\
-                    m_fadeInTime*10) && pProgress->uiProgress < 1000\
-                     - (M4OSA_UInt32)(alphaBlendingStruct->m_fadeOutTime*10))
-                    {
-                        alphaBlending = (M4OSA_Float)\
-                        ((M4OSA_Float)alphaBlendingStruct->m_middle/100);
-                    }
-                    else if(pProgress->uiProgress >= 1000 - (M4OSA_UInt32)\
-                    (alphaBlendingStruct->m_fadeOutTime*10))
-                    {
-                        if(alphaBlendingStruct->m_fadeOutTime == 0) {
-                            alphaBlending = alphaBlendingStruct->m_end / 100;
-                        } else {
-                            alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle \
-                            - alphaBlendingStruct->m_end))*(1000 - pProgress->uiProgress)\
-                            /(alphaBlendingStruct->m_fadeOutTime*10);
-                            alphaBlending += alphaBlendingStruct->m_end;
-                            alphaBlending /= 100;
-                        }
-                    }
-                }
-                /**/
-
-                if((*(FramingRGB)==transparent1) && (*(FramingRGB+1)==transparent2))
-                {
-                    *( p_out0+y+x*PlaneOut[0].u_stride)=(*(p_in_Y+y+x*PlaneIn[0].u_stride));
-                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
-                        (*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride));
-                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
-                        (*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride));
-                }
-                else
-                {
-                    *( p_out0+y+x*PlaneOut[0].u_stride)=
-                        (*(currentFraming->FramingYuv[0].pac_data+(y-topleft[0])\
-                            +(x-topleft[1])*currentFraming->FramingYuv[0].u_stride))*alphaBlending;
-                    *( p_out0+y+x*PlaneOut[0].u_stride)+=
-                        (*(p_in_Y+y+x*PlaneIn[0].u_stride))*(1-alphaBlending);
-                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
-                        (*(currentFraming->FramingYuv[1].pac_data+((y-topleft[0])>>1)\
-                            +((x-topleft[1])>>1)*currentFraming->FramingYuv[1].u_stride))\
-                                *alphaBlending;
-                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)+=
-                        (*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride))*(1-alphaBlending);
-                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
-                        (*(currentFraming->FramingYuv[2].pac_data+((y-topleft[0])>>1)\
-                            +((x-topleft[1])>>1)*currentFraming->FramingYuv[2].u_stride))\
-                                *alphaBlending;
-                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)+=
-                        (*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride))*(1-alphaBlending);
-                }
-                if( PlaneIn[0].u_width < (topleft[0] + currentFraming->FramingYuv[0].u_width) &&
-                    y == PlaneIn[0].u_width-1)
-                {
-                    FramingRGB = FramingRGB + 2 \
-                        * (topleft[0] + currentFraming->FramingYuv[0].u_width \
-                            - PlaneIn[0].u_width + 1);
-                }
-                else
-                {
-                    FramingRGB = FramingRGB + 2;
-                }
-            }
-            /**
-             * Just copy input plane to output plane */
-            else
-            {
-                *( p_out0+y+x*PlaneOut[0].u_stride)=*(p_in_Y+y+x*PlaneIn[0].u_stride);
-                *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
-                    *(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride);
-                *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
-                    *(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride);
-            }
-        }
-    }
-
-
-    return M4VIFI_OK;
-}
-
-
-/**
- ******************************************************************************
- * prototype    M4VSS3GPP_externalVideoEffectFifties(M4OSA_Void *pFunctionContext,
- *                                                    M4VIFI_ImagePlane *PlaneIn,
- *                                                    M4VIFI_ImagePlane *PlaneOut,
- *                                                    M4VSS3GPP_ExternalProgress *pProgress,
- *                                                    M4OSA_UInt32 uiEffectKind)
- *
- * @brief    This function make a video look as if it was taken in the fifties
- * @note
- * @param    pUserData       (IN) Context
- * @param    pPlaneIn        (IN) Input YUV420 planar
- * @param    pPlaneOut        (IN/OUT) Output YUV420 planar
- * @param    pProgress        (IN/OUT) Progress indication (0-100)
- * @param    uiEffectKind    (IN) Unused
- *
- * @return    M4VIFI_OK:            No error
- * @return  M4ERR_PARAMETER:    pFiftiesData, pPlaneOut or pProgress are NULL (DEBUG only)
- ******************************************************************************
- */
-M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties( M4OSA_Void *pUserData,
-                                                M4VIFI_ImagePlane *pPlaneIn,
-                                                M4VIFI_ImagePlane *pPlaneOut,
-                                                M4VSS3GPP_ExternalProgress *pProgress,
-                                                M4OSA_UInt32 uiEffectKind )
-{
-    M4VIFI_UInt32 x, y, xShift;
-    M4VIFI_UInt8 *pInY = pPlaneIn[0].pac_data;
-    M4VIFI_UInt8 *pOutY, *pInYbegin;
-    M4VIFI_UInt8 *pInCr,* pOutCr;
-    M4VIFI_Int32 plane_number;
-
-    /* Internal context*/
-    M4xVSS_FiftiesStruct* p_FiftiesData = (M4xVSS_FiftiesStruct *)pUserData;
-
-    /* Check the inputs (debug only) */
-    M4OSA_DEBUG_IF2((p_FiftiesData == M4OSA_NULL),M4ERR_PARAMETER,
-         "xVSS: p_FiftiesData is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
-    M4OSA_DEBUG_IF2((pPlaneOut == M4OSA_NULL),M4ERR_PARAMETER,
-         "xVSS: p_PlaneOut is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
-    M4OSA_DEBUG_IF2((pProgress == M4OSA_NULL),M4ERR_PARAMETER,
-        "xVSS: p_Progress is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
-
-    /* Initialize input / output plane pointers */
-    pInY += pPlaneIn[0].u_topleft;
-    pOutY = pPlaneOut[0].pac_data;
-    pInYbegin  = pInY;
-
-    /* Initialize the random */
-    if(p_FiftiesData->previousClipTime < 0)
-    {
-        M4OSA_randInit();
-        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
-        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
-        p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
-    }
-
-    /* Choose random values if we have reached the duration of a partial effect */
-    else if( (pProgress->uiOutputTime - p_FiftiesData->previousClipTime)\
-         > p_FiftiesData->fiftiesEffectDuration)
-    {
-        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
-        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
-        p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
-    }
-
-    /* Put in Sepia the chrominance */
-    for (plane_number = 1; plane_number < 3; plane_number++)
-    {
-        pInCr  = pPlaneIn[plane_number].pac_data  + pPlaneIn[plane_number].u_topleft;
-        pOutCr = pPlaneOut[plane_number].pac_data + pPlaneOut[plane_number].u_topleft;
-
-        for (x = 0; x < pPlaneOut[plane_number].u_height; x++)
-        {
-            if (1 == plane_number)
-                memset((void *)pOutCr, 117,pPlaneIn[plane_number].u_width); /* U value */
-            else
-                memset((void *)pOutCr, 139,pPlaneIn[plane_number].u_width); /* V value */
-
-            pInCr  += pPlaneIn[plane_number].u_stride;
-            pOutCr += pPlaneOut[plane_number].u_stride;
-        }
-    }
-
-    /* Compute the new pixels values */
-    for( x = 0 ; x < pPlaneIn[0].u_height ; x++)
-    {
-        M4VIFI_UInt8 *p_outYtmp, *p_inYtmp;
-
-        /* Compute the xShift (random value) */
-        if (0 == (p_FiftiesData->shiftRandomValue % 5 ))
-            xShift = (x + p_FiftiesData->shiftRandomValue ) % (pPlaneIn[0].u_height - 1);
-        else
-            xShift = (x + (pPlaneIn[0].u_height - p_FiftiesData->shiftRandomValue) ) \
-                % (pPlaneIn[0].u_height - 1);
-
-        /* Initialize the pointers */
-        p_outYtmp = pOutY + 1;                                    /* yShift of 1 pixel */
-        p_inYtmp  = pInYbegin + (xShift * pPlaneIn[0].u_stride);  /* Apply the xShift */
-
-        for( y = 0 ; y < pPlaneIn[0].u_width ; y++)
-        {
-            /* Set Y value */
-            if (xShift > (pPlaneIn[0].u_height - 4))
-                *p_outYtmp = 40;        /* Add some horizontal black lines between the
-                                        two parts of the image */
-            else if ( y == p_FiftiesData->stripeRandomValue)
-                *p_outYtmp = 90;        /* Add a random vertical line for the bulk */
-            else
-                *p_outYtmp = *p_inYtmp;
-
-
-            /* Go to the next pixel */
-            p_outYtmp++;
-            p_inYtmp++;
-
-            /* Restart at the beginning of the line for the last pixel*/
-            if (y == (pPlaneIn[0].u_width - 2))
-                p_outYtmp = pOutY;
-        }
-
-        /* Go to the next line */
-        pOutY += pPlaneOut[0].u_stride;
-    }
-
-    return M4VIFI_OK;
-}
-
-/**
- ******************************************************************************
- * M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom( )
- * @brief    Zoom in/out video effect functions.
- * @note    The external video function is used only if VideoEffectType is set to
- * M4VSS3GPP_kVideoEffectType_ZoomIn or M4VSS3GPP_kVideoEffectType_ZoomOut.
- *
- * @param   pFunctionContext    (IN) The function context, previously set by the integrator
- * @param    pInputPlanes        (IN) Input YUV420 image: pointer to an array of three valid
- *                                    image planes (Y, U and V)
- * @param    pOutputPlanes        (IN/OUT) Output (filtered) YUV420 image: pointer to an array of
- *                                        three valid image planes (Y, U and V)
- * @param    pProgress            (IN) Set of information about the video transition progress.
- * @return    M4NO_ERROR:            No error
- * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
- ******************************************************************************
- */
-
-M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom(
-    M4OSA_Void *pFunctionContext,
-    M4VIFI_ImagePlane *pInputPlanes,
-    M4VIFI_ImagePlane *pOutputPlanes,
-    M4VSS3GPP_ExternalProgress *pProgress,
-    M4OSA_UInt32 uiEffectKind
-)
-{
-    M4OSA_UInt32 boxWidth;
-    M4OSA_UInt32 boxHeight;
-    M4OSA_UInt32 boxPosX;
-    M4OSA_UInt32 boxPosY;
-    M4OSA_UInt32 ratio = 0;
-    /*  * 1.189207 between ratio */
-    /* zoom between x1 and x16 */
-    M4OSA_UInt32 ratiotab[17] ={1024,1218,1448,1722,2048,2435,2896,3444,4096,4871,5793,\
-                                6889,8192,9742,11585,13777,16384};
-    M4OSA_UInt32 ik;
-
-    M4VIFI_ImagePlane boxPlane[3];
-
-    if((M4OSA_Void *)M4xVSS_kVideoEffectType_ZoomOut == pFunctionContext)
-    {
-        //ratio = 16 - (15 * pProgress->uiProgress)/1000;
-        ratio = 16 - pProgress->uiProgress / 66 ;
-    }
-    else if((M4OSA_Void *)M4xVSS_kVideoEffectType_ZoomIn == pFunctionContext)
-    {
-        //ratio = 1 + (15 * pProgress->uiProgress)/1000;
-        ratio = 1 + pProgress->uiProgress / 66 ;
-    }
-
-    for(ik=0;ik<3;ik++){
-
-        boxPlane[ik].u_stride = pInputPlanes[ik].u_stride;
-        boxPlane[ik].pac_data = pInputPlanes[ik].pac_data;
-
-        boxHeight = ( pInputPlanes[ik].u_height << 10 ) / ratiotab[ratio];
-        boxWidth = ( pInputPlanes[ik].u_width << 10 ) / ratiotab[ratio];
-        boxPlane[ik].u_height = (boxHeight)&(~1);
-        boxPlane[ik].u_width = (boxWidth)&(~1);
-
-        boxPosY = (pInputPlanes[ik].u_height >> 1) - (boxPlane[ik].u_height >> 1);
-        boxPosX = (pInputPlanes[ik].u_width >> 1) - (boxPlane[ik].u_width >> 1);
-        boxPlane[ik].u_topleft = boxPosY * boxPlane[ik].u_stride + boxPosX;
-    }
-
-    M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL, (M4VIFI_ImagePlane*)&boxPlane, pOutputPlanes);
-
-    /**
-     * Return */
-    return(M4NO_ERROR);
-}
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_AlphaMagic( M4OSA_Void *userData,
- *                                    M4VIFI_ImagePlane PlaneIn1[3],
- *                                    M4VIFI_ImagePlane PlaneIn2[3],
- *                                    M4VIFI_ImagePlane *PlaneOut,
- *                                    M4VSS3GPP_ExternalProgress *pProgress,
- *                                    M4OSA_UInt32 uiTransitionKind)
- *
- * @brief    This function apply a color effect on an input YUV420 planar frame
- * @note
- * @param    userData        (IN) Contains a pointer on a settings structure
- * @param    PlaneIn1        (IN) Input YUV420 planar from video 1
- * @param    PlaneIn2        (IN) Input YUV420 planar from video 2
- * @param    PlaneOut        (IN/OUT) Output YUV420 planar
- * @param    pProgress        (IN/OUT) Progress indication (0-100)
- * @param    uiTransitionKind(IN) Unused
- *
- * @return    M4VIFI_OK:    No error
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_AlphaMagic( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
-                             M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
-                             M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiTransitionKind)
-{
-
-    M4OSA_ERR err;
-
-    M4xVSS_internal_AlphaMagicSettings* alphaContext;
-    M4VIFI_Int32 alphaProgressLevel;
-
-    M4VIFI_ImagePlane* planeswap;
-    M4VIFI_UInt32 x,y;
-
-    M4VIFI_UInt8 *p_out0;
-    M4VIFI_UInt8 *p_out1;
-    M4VIFI_UInt8 *p_out2;
-    M4VIFI_UInt8 *alphaMask;
-    /* "Old image" */
-    M4VIFI_UInt8 *p_in1_Y;
-    M4VIFI_UInt8 *p_in1_U;
-    M4VIFI_UInt8 *p_in1_V;
-    /* "New image" */
-    M4VIFI_UInt8 *p_in2_Y;
-    M4VIFI_UInt8 *p_in2_U;
-    M4VIFI_UInt8 *p_in2_V;
-
-    err = M4NO_ERROR;
-
-    alphaContext = (M4xVSS_internal_AlphaMagicSettings*)userData;
-
-    alphaProgressLevel = (pProgress->uiProgress * 128)/1000;
-
-    if( alphaContext->isreverse != M4OSA_FALSE)
-    {
-        alphaProgressLevel = 128 - alphaProgressLevel;
-        planeswap = PlaneIn1;
-        PlaneIn1 = PlaneIn2;
-        PlaneIn2 = planeswap;
-    }
-
-    p_out0 = PlaneOut[0].pac_data;
-    p_out1 = PlaneOut[1].pac_data;
-    p_out2 = PlaneOut[2].pac_data;
-
-    alphaMask = alphaContext->pPlane->pac_data;
-
-    /* "Old image" */
-    p_in1_Y = PlaneIn1[0].pac_data;
-    p_in1_U = PlaneIn1[1].pac_data;
-    p_in1_V = PlaneIn1[2].pac_data;
-    /* "New image" */
-    p_in2_Y = PlaneIn2[0].pac_data;
-    p_in2_U = PlaneIn2[1].pac_data;
-    p_in2_V = PlaneIn2[2].pac_data;
-
-     /**
-     * For each column ... */
-    for( y=0; y<PlaneOut->u_height; y++ )
-    {
-        /**
-         * ... and each row of the alpha mask */
-        for( x=0; x<PlaneOut->u_width; x++ )
-        {
-            /**
-             * If the value of the current pixel of the alpha mask is > to the current time
-             * ( current time is normalized on [0-255] ) */
-            if( alphaProgressLevel < alphaMask[x+y*PlaneOut->u_width] )
-            {
-                /* We keep "old image" in output plane */
-                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in1_Y+x+y*PlaneIn1[0].u_stride);
-                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
-                    *(p_in1_U+(x>>1)+(y>>1)*PlaneIn1[1].u_stride);
-                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
-                    *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride);
-            }
-            else
-            {
-                /* We take "new image" in output plane */
-                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in2_Y+x+y*PlaneIn2[0].u_stride);
-                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
-                    *(p_in2_U+(x>>1)+(y>>1)*PlaneIn2[1].u_stride);
-                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
-                    *(p_in2_V+(x>>1)+(y>>1)*PlaneIn2[2].u_stride);
-            }
-        }
-    }
-
-    return(err);
-}
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_AlphaMagicBlending( M4OSA_Void *userData,
- *                                    M4VIFI_ImagePlane PlaneIn1[3],
- *                                    M4VIFI_ImagePlane PlaneIn2[3],
- *                                    M4VIFI_ImagePlane *PlaneOut,
- *                                    M4VSS3GPP_ExternalProgress *pProgress,
- *                                    M4OSA_UInt32 uiTransitionKind)
- *
- * @brief    This function apply a color effect on an input YUV420 planar frame
- * @note
- * @param    userData        (IN) Contains a pointer on a settings structure
- * @param    PlaneIn1        (IN) Input YUV420 planar from video 1
- * @param    PlaneIn2        (IN) Input YUV420 planar from video 2
- * @param    PlaneOut        (IN/OUT) Output YUV420 planar
- * @param    pProgress        (IN/OUT) Progress indication (0-100)
- * @param    uiTransitionKind(IN) Unused
- *
- * @return    M4VIFI_OK:    No error
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_AlphaMagicBlending( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
-                                     M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
-                                     M4VSS3GPP_ExternalProgress *pProgress,
-                                     M4OSA_UInt32 uiTransitionKind)
-{
-    M4OSA_ERR err;
-
-    M4xVSS_internal_AlphaMagicSettings* alphaContext;
-    M4VIFI_Int32 alphaProgressLevel;
-    M4VIFI_Int32 alphaBlendLevelMin;
-    M4VIFI_Int32 alphaBlendLevelMax;
-    M4VIFI_Int32 alphaBlendRange;
-
-    M4VIFI_ImagePlane* planeswap;
-    M4VIFI_UInt32 x,y;
-    M4VIFI_Int32 alphaMaskValue;
-
-    M4VIFI_UInt8 *p_out0;
-    M4VIFI_UInt8 *p_out1;
-    M4VIFI_UInt8 *p_out2;
-    M4VIFI_UInt8 *alphaMask;
-    /* "Old image" */
-    M4VIFI_UInt8 *p_in1_Y;
-    M4VIFI_UInt8 *p_in1_U;
-    M4VIFI_UInt8 *p_in1_V;
-    /* "New image" */
-    M4VIFI_UInt8 *p_in2_Y;
-    M4VIFI_UInt8 *p_in2_U;
-    M4VIFI_UInt8 *p_in2_V;
-
-
-    err = M4NO_ERROR;
-
-    alphaContext = (M4xVSS_internal_AlphaMagicSettings*)userData;
-
-    alphaProgressLevel = (pProgress->uiProgress * 128)/1000;
-
-    if( alphaContext->isreverse != M4OSA_FALSE)
-    {
-        alphaProgressLevel = 128 - alphaProgressLevel;
-        planeswap = PlaneIn1;
-        PlaneIn1 = PlaneIn2;
-        PlaneIn2 = planeswap;
-    }
-
-    alphaBlendLevelMin = alphaProgressLevel-alphaContext->blendingthreshold;
-
-    alphaBlendLevelMax = alphaProgressLevel+alphaContext->blendingthreshold;
-
-    alphaBlendRange = (alphaContext->blendingthreshold)*2;
-
-    p_out0 = PlaneOut[0].pac_data;
-    p_out1 = PlaneOut[1].pac_data;
-    p_out2 = PlaneOut[2].pac_data;
-
-    alphaMask = alphaContext->pPlane->pac_data;
-
-    /* "Old image" */
-    p_in1_Y = PlaneIn1[0].pac_data;
-    p_in1_U = PlaneIn1[1].pac_data;
-    p_in1_V = PlaneIn1[2].pac_data;
-    /* "New image" */
-    p_in2_Y = PlaneIn2[0].pac_data;
-    p_in2_U = PlaneIn2[1].pac_data;
-    p_in2_V = PlaneIn2[2].pac_data;
-
-    /* apply Alpha Magic on each pixel */
-       for( y=0; y<PlaneOut->u_height; y++ )
-    {
-        for( x=0; x<PlaneOut->u_width; x++ )
-        {
-            alphaMaskValue = alphaMask[x+y*PlaneOut->u_width];
-            if( alphaBlendLevelMax < alphaMaskValue )
-            {
-                /* We keep "old image" in output plane */
-                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in1_Y+x+y*PlaneIn1[0].u_stride);
-                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
-                    *(p_in1_U+(x>>1)+(y>>1)*PlaneIn1[1].u_stride);
-                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
-                    *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride);
-            }
-            else if( (alphaBlendLevelMin < alphaMaskValue)&&
-                    (alphaMaskValue <= alphaBlendLevelMax ) )
-            {
-                /* We blend "old and new image" in output plane */
-                *( p_out0+x+y*PlaneOut[0].u_stride)=(M4VIFI_UInt8)
-                    (( (alphaMaskValue-alphaBlendLevelMin)*( *(p_in1_Y+x+y*PlaneIn1[0].u_stride))
-                        +(alphaBlendLevelMax-alphaMaskValue)\
-                            *( *(p_in2_Y+x+y*PlaneIn2[0].u_stride)) )/alphaBlendRange );
-
-                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=(M4VIFI_UInt8)\
-                    (( (alphaMaskValue-alphaBlendLevelMin)*( *(p_in1_U+(x>>1)+(y>>1)\
-                        *PlaneIn1[1].u_stride))
-                            +(alphaBlendLevelMax-alphaMaskValue)*( *(p_in2_U+(x>>1)+(y>>1)\
-                                *PlaneIn2[1].u_stride)) )/alphaBlendRange );
-
-                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
-                    (M4VIFI_UInt8)(( (alphaMaskValue-alphaBlendLevelMin)\
-                        *( *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride))
-                                +(alphaBlendLevelMax-alphaMaskValue)*( *(p_in2_V+(x>>1)+(y>>1)\
-                                    *PlaneIn2[2].u_stride)) )/alphaBlendRange );
-
-            }
-            else
-            {
-                /* We take "new image" in output plane */
-                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in2_Y+x+y*PlaneIn2[0].u_stride);
-                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
-                    *(p_in2_U+(x>>1)+(y>>1)*PlaneIn2[1].u_stride);
-                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
-                    *(p_in2_V+(x>>1)+(y>>1)*PlaneIn2[2].u_stride);
-            }
-        }
-    }
-
-    return(err);
-}
-
-#define M4XXX_SampleAddress(plane, x, y)  ( (plane).pac_data + (plane).u_topleft + (y)\
-     * (plane).u_stride + (x) )
-
-static void M4XXX_CopyPlane(M4VIFI_ImagePlane* dest, M4VIFI_ImagePlane* source)
-{
-    M4OSA_UInt32    height, width, sourceStride, destStride, y;
-    M4OSA_MemAddr8    sourceWalk, destWalk;
-
-    /* cache the vars used in the loop so as to avoid them being repeatedly fetched and
-     recomputed from memory. */
-    height = dest->u_height;
-    width = dest->u_width;
-
-    sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(*source, 0, 0);
-    sourceStride = source->u_stride;
-
-    destWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(*dest, 0, 0);
-    destStride = dest->u_stride;
-
-    for (y=0; y<height; y++)
-    {
-        memcpy((void *)destWalk, (void *)sourceWalk, width);
-        destWalk += destStride;
-        sourceWalk += sourceStride;
-    }
-}
-
-static M4OSA_ERR M4xVSS_VerticalSlideTransition(M4VIFI_ImagePlane* topPlane,
-                                                M4VIFI_ImagePlane* bottomPlane,
-                                                M4VIFI_ImagePlane *PlaneOut,
-                                                M4OSA_UInt32    shiftUV)
-{
-    M4OSA_UInt32 i;
-
-    /* Do three loops, one for each plane type, in order to avoid having too many buffers
-    "hot" at the same time (better for cache). */
-    for (i=0; i<3; i++)
-    {
-        M4OSA_UInt32    topPartHeight, bottomPartHeight, width, sourceStride, destStride, y;
-        M4OSA_MemAddr8    sourceWalk, destWalk;
-
-        /* cache the vars used in the loop so as to avoid them being repeatedly fetched and
-         recomputed from memory. */
-        if (0 == i) /* Y plane */
-        {
-            bottomPartHeight = 2*shiftUV;
-        }
-        else /* U and V planes */
-        {
-            bottomPartHeight = shiftUV;
-        }
-        topPartHeight = PlaneOut[i].u_height - bottomPartHeight;
-        width = PlaneOut[i].u_width;
-
-        sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(topPlane[i], 0, bottomPartHeight);
-        sourceStride = topPlane[i].u_stride;
-
-        destWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], 0, 0);
-        destStride = PlaneOut[i].u_stride;
-
-        /* First the part from the top source clip frame. */
-        for (y=0; y<topPartHeight; y++)
-        {
-            memcpy((void *)destWalk, (void *)sourceWalk, width);
-            destWalk += destStride;
-            sourceWalk += sourceStride;
-        }
-
-        /* and now change the vars to copy the part from the bottom source clip frame. */
-        sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(bottomPlane[i], 0, 0);
-        sourceStride = bottomPlane[i].u_stride;
-
-        /* destWalk is already at M4XXX_SampleAddress(PlaneOut[i], 0, topPartHeight) */
-
-        for (y=0; y<bottomPartHeight; y++)
-        {
-            memcpy((void *)destWalk, (void *)sourceWalk, width);
-            destWalk += destStride;
-            sourceWalk += sourceStride;
-        }
-    }
-    return M4NO_ERROR;
-}
-
-static M4OSA_ERR M4xVSS_HorizontalSlideTransition(M4VIFI_ImagePlane* leftPlane,
-                                                  M4VIFI_ImagePlane* rightPlane,
-                                                  M4VIFI_ImagePlane *PlaneOut,
-                                                  M4OSA_UInt32    shiftUV)
-{
-    M4OSA_UInt32 i, y;
-    /* If we shifted by exactly 0, or by the width of the target image, then we would get the left
-    frame or the right frame, respectively. These cases aren't handled too well by the general
-    handling, since they result in 0-size memcopies, so might as well particularize them. */
-
-    if (0 == shiftUV)    /* output left frame */
-    {
-        for (i = 0; i<3; i++) /* for each YUV plane */
-        {
-            M4XXX_CopyPlane(&(PlaneOut[i]), &(leftPlane[i]));
-        }
-
-        return M4NO_ERROR;
-    }
-
-    if (PlaneOut[1].u_width == shiftUV) /* output right frame */
-    {
-        for (i = 0; i<3; i++) /* for each YUV plane */
-        {
-            M4XXX_CopyPlane(&(PlaneOut[i]), &(rightPlane[i]));
-        }
-
-        return M4NO_ERROR;
-    }
-
-
-    /* Do three loops, one for each plane type, in order to avoid having too many buffers
-    "hot" at the same time (better for cache). */
-    for (i=0; i<3; i++)
-    {
-        M4OSA_UInt32    height, leftPartWidth, rightPartWidth;
-        M4OSA_UInt32    leftStride,    rightStride,    destStride;
-        M4OSA_MemAddr8    leftWalk,    rightWalk,    destWalkLeft, destWalkRight;
-
-        /* cache the vars used in the loop so as to avoid them being repeatedly fetched
-        and recomputed from memory. */
-        height = PlaneOut[i].u_height;
-
-        if (0 == i) /* Y plane */
-        {
-            rightPartWidth = 2*shiftUV;
-        }
-        else /* U and V planes */
-        {
-            rightPartWidth = shiftUV;
-        }
-        leftPartWidth = PlaneOut[i].u_width - rightPartWidth;
-
-        leftWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(leftPlane[i], rightPartWidth, 0);
-        leftStride = leftPlane[i].u_stride;
-
-        rightWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(rightPlane[i], 0, 0);
-        rightStride = rightPlane[i].u_stride;
-
-        destWalkLeft = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], 0, 0);
-        destWalkRight = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], leftPartWidth, 0);
-        destStride = PlaneOut[i].u_stride;
-
-        for (y=0; y<height; y++)
-        {
-            memcpy((void *)destWalkLeft, (void *)leftWalk, leftPartWidth);
-            leftWalk += leftStride;
-
-            memcpy((void *)destWalkRight, (void *)rightWalk, rightPartWidth);
-            rightWalk += rightStride;
-
-            destWalkLeft += destStride;
-            destWalkRight += destStride;
-        }
-    }
-
-    return M4NO_ERROR;
-}
-
-
-M4OSA_ERR M4xVSS_SlideTransition( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
-                                  M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
-                                  M4VSS3GPP_ExternalProgress *pProgress,
-                                  M4OSA_UInt32 uiTransitionKind)
-{
-    M4xVSS_internal_SlideTransitionSettings* settings =
-         (M4xVSS_internal_SlideTransitionSettings*)userData;
-    M4OSA_UInt32    shiftUV;
-
-    M4OSA_TRACE1_0("inside M4xVSS_SlideTransition");
-    if ((M4xVSS_SlideTransition_RightOutLeftIn == settings->direction)
-        || (M4xVSS_SlideTransition_LeftOutRightIn == settings->direction) )
-    {
-        /* horizontal slide */
-        shiftUV = ((PlaneOut[1]).u_width * pProgress->uiProgress)/1000;
-        M4OSA_TRACE1_2("M4xVSS_SlideTransition upper: shiftUV = %d,progress = %d",
-            shiftUV,pProgress->uiProgress );
-        if (M4xVSS_SlideTransition_RightOutLeftIn == settings->direction)
-        {
-            /* Put the previous clip frame right, the next clip frame left, and reverse shiftUV
-            (since it's a shift from the left frame) so that we start out on the right
-            (i.e. not left) frame, it
-            being from the previous clip. */
-            return M4xVSS_HorizontalSlideTransition(PlaneIn2, PlaneIn1, PlaneOut,
-                 (PlaneOut[1]).u_width - shiftUV);
-        }
-        else /* Left out, right in*/
-        {
-            return M4xVSS_HorizontalSlideTransition(PlaneIn1, PlaneIn2, PlaneOut, shiftUV);
-        }
-    }
-    else
-    {
-        /* vertical slide */
-        shiftUV = ((PlaneOut[1]).u_height * pProgress->uiProgress)/1000;
-        M4OSA_TRACE1_2("M4xVSS_SlideTransition bottom: shiftUV = %d,progress = %d",shiftUV,
-            pProgress->uiProgress );
-        if (M4xVSS_SlideTransition_TopOutBottomIn == settings->direction)
-        {
-            /* Put the previous clip frame top, the next clip frame bottom. */
-            return M4xVSS_VerticalSlideTransition(PlaneIn1, PlaneIn2, PlaneOut, shiftUV);
-        }
-        else /* Bottom out, top in */
-        {
-            return M4xVSS_VerticalSlideTransition(PlaneIn2, PlaneIn1, PlaneOut,
-                (PlaneOut[1]).u_height - shiftUV);
-        }
-    }
-
-    /* Note: it might be worthwhile to do some parameter checking, see if dimensions match, etc.,
-    at least in debug mode. */
-}
-
-
-/**
- ******************************************************************************
- * prototype    M4xVSS_FadeBlackTransition(M4OSA_Void *pFunctionContext,
- *                                                    M4VIFI_ImagePlane *PlaneIn,
- *                                                    M4VIFI_ImagePlane *PlaneOut,
- *                                                    M4VSS3GPP_ExternalProgress *pProgress,
- *                                                    M4OSA_UInt32 uiEffectKind)
- *
- * @brief    This function apply a fade to black and then a fade from black
- * @note
- * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
- * @param    PlaneIn            (IN) Input YUV420 planar
- * @param    PlaneOut        (IN/OUT) Output YUV420 planar
- * @param    pProgress        (IN/OUT) Progress indication (0-100)
- * @param    uiEffectKind    (IN) Unused
- *
- * @return    M4VIFI_OK:    No error
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_FadeBlackTransition(M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
-                                     M4VIFI_ImagePlane PlaneIn2[3],
-                                     M4VIFI_ImagePlane *PlaneOut,
-                                     M4VSS3GPP_ExternalProgress *pProgress,
-                                     M4OSA_UInt32 uiTransitionKind)
-{
-    M4OSA_Int32 tmp = 0;
-    M4OSA_ERR err = M4NO_ERROR;
-
-
-    if((pProgress->uiProgress) < 500)
-    {
-        /**
-         * Compute where we are in the effect (scale is 0->1024) */
-        tmp = (M4OSA_Int32)((1.0 - ((M4OSA_Float)(pProgress->uiProgress*2)/1000)) * 1024 );
-
-        /**
-         * Apply the darkening effect */
-        err = M4VFL_modifyLumaWithScale( (M4ViComImagePlane*)PlaneIn1,
-             (M4ViComImagePlane*)PlaneOut, tmp, M4OSA_NULL);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4xVSS_FadeBlackTransition: M4VFL_modifyLumaWithScale returns\
-                 error 0x%x, returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", err);
-            return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
-        }
-    }
-    else
-    {
-        /**
-         * Compute where we are in the effect (scale is 0->1024). */
-        tmp = (M4OSA_Int32)( (((M4OSA_Float)(((pProgress->uiProgress-500)*2))/1000)) * 1024 );
-
-        /**
-         * Apply the darkening effect */
-        err = M4VFL_modifyLumaWithScale((M4ViComImagePlane*)PlaneIn2,
-             (M4ViComImagePlane*)PlaneOut, tmp, M4OSA_NULL);
-        if (M4NO_ERROR != err)
-        {
-            M4OSA_TRACE1_1("M4xVSS_FadeBlackTransition:\
-                 M4VFL_modifyLumaWithScale returns error 0x%x,\
-                     returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", err);
-            return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
-        }
-    }
-
-
-    return M4VIFI_OK;
-}
-
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext,
- *                                                        M4OSA_Void* pBufferIn,
- *                                                        M4OSA_Void* pBufferOut,
- *                                                        M4OSA_UInt32* convertedSize)
- *
- * @brief    This function convert from the customer format to UTF8
- * @note
- * @param    pContext        (IN)    The integrator own context
- * @param    pBufferIn        (IN)    Buffer to convert
- * @param    pBufferOut        (OUT)    Converted buffer
- * @param    convertedSize    (OUT)    Size of the converted buffer
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
-                                       M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_ERR err;
-
-    pBufferOut = pBufferIn;
-    if(xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
-        && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
-    {
-        M4OSA_UInt32 ConvertedSize = xVSS_context->UTFConversionContext.m_TempOutConversionSize;
-
-        memset((void *)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,0
-            ,(M4OSA_UInt32)xVSS_context->UTFConversionContext.m_TempOutConversionSize);
-
-        err = xVSS_context->UTFConversionContext.pConvToUTF8Fct((M4OSA_Void*)pBufferIn,
-            (M4OSA_UInt8*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
-                 (M4OSA_UInt32*)&ConvertedSize);
-        if(err == M4xVSSWAR_BUFFER_OUT_TOO_SMALL)
-        {
-            M4OSA_TRACE2_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
-
-            /*free too small buffer*/
-            free(xVSS_context->\
-                UTFConversionContext.pTempOutConversionBuffer);
-
-            /*re-allocate the buffer*/
-            xVSS_context->UTFConversionContext.pTempOutConversionBuffer    =
-                 (M4OSA_Void*)M4OSA_32bitAlignedMalloc(ConvertedSize*sizeof(M4OSA_UInt8), M4VA,
-                     (M4OSA_Char *)"M4xVSS_internalConvertToUTF8: UTF conversion buffer");
-            if(M4OSA_NULL == xVSS_context->UTFConversionContext.pTempOutConversionBuffer)
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertToUTF8");
-                return M4ERR_ALLOC;
-            }
-            xVSS_context->UTFConversionContext.m_TempOutConversionSize = ConvertedSize;
-
-            memset((void *)xVSS_context->\
-                UTFConversionContext.pTempOutConversionBuffer,0,(M4OSA_UInt32)xVSS_context->\
-                    UTFConversionContext.m_TempOutConversionSize);
-
-            err = xVSS_context->UTFConversionContext.pConvToUTF8Fct((M4OSA_Void*)pBufferIn,
-                (M4OSA_Void*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
-                    (M4OSA_UInt32*)&ConvertedSize);
-            if(err != M4NO_ERROR)
-            {
-                M4OSA_TRACE1_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
-                return err;
-            }
-        }
-        else if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
-            return err;
-        }
-        /*decoded path*/
-        pBufferOut = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-        (*convertedSize) = ConvertedSize;
-    }
-    return M4NO_ERROR;
-}
-
-
-/**
- ******************************************************************************
- * prototype    M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext)
- *
- * @brief    This function convert from UTF8 to the customer format
- * @note
- * @param    pContext    (IN) The integrator own context
- * @param    pBufferIn        (IN)    Buffer to convert
- * @param    pBufferOut        (OUT)    Converted buffer
- * @param    convertedSize    (OUT)    Size of the converted buffer
- *
- * @return    M4NO_ERROR:    No error
- * @return    M4ERR_PARAMETER: At least one of the function parameters is null
- ******************************************************************************
- */
-M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
-                                        M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize)
-{
-    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
-    M4OSA_ERR err;
-
-    pBufferOut = pBufferIn;
-    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
-        && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
-    {
-        M4OSA_UInt32 ConvertedSize = xVSS_context->UTFConversionContext.m_TempOutConversionSize;
-
-        memset((void *)xVSS_context->\
-            UTFConversionContext.pTempOutConversionBuffer,0,(M4OSA_UInt32)xVSS_context->\
-                UTFConversionContext.m_TempOutConversionSize);
-
-        err = xVSS_context->UTFConversionContext.pConvFromUTF8Fct\
-            ((M4OSA_Void*)pBufferIn,(M4OSA_UInt8*)xVSS_context->\
-                UTFConversionContext.pTempOutConversionBuffer, (M4OSA_UInt32*)&ConvertedSize);
-        if(err == M4xVSSWAR_BUFFER_OUT_TOO_SMALL)
-        {
-            M4OSA_TRACE2_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
-
-            /*free too small buffer*/
-            free(xVSS_context->\
-                UTFConversionContext.pTempOutConversionBuffer);
-
-            /*re-allocate the buffer*/
-            xVSS_context->UTFConversionContext.pTempOutConversionBuffer    =
-                (M4OSA_Void*)M4OSA_32bitAlignedMalloc(ConvertedSize*sizeof(M4OSA_UInt8), M4VA,
-                     (M4OSA_Char *)"M4xVSS_internalConvertFromUTF8: UTF conversion buffer");
-            if(M4OSA_NULL == xVSS_context->UTFConversionContext.pTempOutConversionBuffer)
-            {
-                M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertFromUTF8");
-                return M4ERR_ALLOC;
-            }
-            xVSS_context->UTFConversionContext.m_TempOutConversionSize = ConvertedSize;
-
-            memset((void *)xVSS_context->\
-                UTFConversionContext.pTempOutConversionBuffer,0,(M4OSA_UInt32)xVSS_context->\
-                    UTFConversionContext.m_TempOutConversionSize);
-
-            err = xVSS_context->UTFConversionContext.pConvFromUTF8Fct((M4OSA_Void*)pBufferIn,
-                (M4OSA_Void*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
-                     (M4OSA_UInt32*)&ConvertedSize);
-            if(err != M4NO_ERROR)
-            {
-                M4OSA_TRACE1_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
-                return err;
-            }
-        }
-        else if(err != M4NO_ERROR)
-        {
-            M4OSA_TRACE1_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
-            return err;
-        }
-        /*decoded path*/
-        pBufferOut = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
-        (*convertedSize) = ConvertedSize;
-    }
-
-
-    return M4NO_ERROR;
-}
diff --git a/libvideoeditor/vss/src/MODULE_LICENSE_APACHE2 b/libvideoeditor/vss/src/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/libvideoeditor/vss/src/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/libvideoeditor/vss/src/NOTICE b/libvideoeditor/vss/src/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/libvideoeditor/vss/src/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
-   Copyright (c) 2005-2008, The Android Open Source Project
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
diff --git a/libvideoeditor/vss/src/VideoEditorResampler.cpp b/libvideoeditor/vss/src/VideoEditorResampler.cpp
deleted file mode 100755
index 53537f0..0000000
--- a/libvideoeditor/vss/src/VideoEditorResampler.cpp
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_NDEBUG 1
-#include <audio_utils/primitives.h>
-#include <utils/Log.h>
-#include "AudioResampler.h"
-#include "VideoEditorResampler.h"
-
-namespace android {
-
-struct VideoEditorResampler : public AudioBufferProvider {
-
-    public:
-
-        virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
-        virtual void releaseBuffer(Buffer* buffer);
-
-    enum { //Sampling freq
-     kFreq8000Hz = 8000,
-     kFreq11025Hz = 11025,
-     kFreq12000Hz = 12000,
-     kFreq16000Hz = 16000,
-     kFreq22050Hz = 22050,
-     kFreq24000Hz = 24000,
-     kFreq32000Hz = 32000,
-     kFreq44100 = 44100,
-     kFreq48000 = 48000,
-    };
-
-    AudioResampler *mResampler;
-    int16_t* mInput;
-    int nbChannels;
-    int nbSamples;
-    M4OSA_Int32 outSamplingRate;
-    M4OSA_Int32 inSamplingRate;
-
-    int16_t *mTmpInBuffer;
-};
-
-#define MAX_SAMPLEDURATION_FOR_CONVERTION 40 //ms
-
-status_t VideoEditorResampler::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, int64_t pts) {
-
-    uint32_t dataSize = pBuffer->frameCount * this->nbChannels * sizeof(int16_t);
-    mTmpInBuffer = (int16_t*)malloc(dataSize);
-    memcpy(mTmpInBuffer, this->mInput, dataSize);
-    pBuffer->raw = (void*)mTmpInBuffer;
-
-    return OK;
-}
-
-void VideoEditorResampler::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
-
-    if(pBuffer->raw != NULL) {
-        free(pBuffer->raw);
-        pBuffer->raw = NULL;
-        mTmpInBuffer = NULL;
-    }
-    pBuffer->frameCount = 0;
-}
-
-extern "C" {
-
-M4OSA_Context  LVAudioResamplerCreate(M4OSA_Int32 bitDepth, M4OSA_Int32 inChannelCount,
-                                     M4OSA_Int32 sampleRate, M4OSA_Int32 quality) {
-
-    VideoEditorResampler *context = new VideoEditorResampler();
-    context->mResampler = AudioResampler::create(
-        bitDepth, inChannelCount, sampleRate);
-    if (context->mResampler == NULL) {
-        return NULL;
-    }
-    context->mResampler->setSampleRate(android::VideoEditorResampler::kFreq32000Hz);
-    context->mResampler->setVolume(0x1000, 0x1000);
-    context->nbChannels = inChannelCount;
-    context->outSamplingRate = sampleRate;
-    context->mInput = NULL;
-    context->mTmpInBuffer = NULL;
-
-    return ((M4OSA_Context )context);
-}
-
-
-void LVAudiosetSampleRate(M4OSA_Context resamplerContext, M4OSA_Int32 inSampleRate) {
-
-    VideoEditorResampler *context =
-      (VideoEditorResampler *)resamplerContext;
-    context->mResampler->setSampleRate(inSampleRate);
-    /*
-     * nbSamples is calculated for 40ms worth of data;hence sample rate
-     * is used to calculate the nbSamples
-     */
-    context->inSamplingRate = inSampleRate;
-    // Allocate buffer for maximum allowed number of samples.
-    context->mInput = (int16_t*)malloc( (inSampleRate * MAX_SAMPLEDURATION_FOR_CONVERTION *
-                                   context->nbChannels * sizeof(int16_t)) / 1000);
-}
-
-void LVAudiosetVolume(M4OSA_Context resamplerContext, M4OSA_Int16 left, M4OSA_Int16 right) {
-
-    VideoEditorResampler *context =
-       (VideoEditorResampler *)resamplerContext;
-    context->mResampler->setVolume(left,right);
-}
-
-void LVDestroy(M4OSA_Context resamplerContext) {
-
-    VideoEditorResampler *context =
-       (VideoEditorResampler *)resamplerContext;
-
-    if (context->mTmpInBuffer != NULL) {
-        free(context->mTmpInBuffer);
-        context->mTmpInBuffer = NULL;
-    }
-
-    if (context->mInput != NULL) {
-        free(context->mInput);
-        context->mInput = NULL;
-    }
-
-    if (context->mResampler != NULL) {
-        delete context->mResampler;
-        context->mResampler = NULL;
-    }
-
-    if (context != NULL) {
-        delete context;
-        context = NULL;
-    }
-}
-
-void LVAudioresample_LowQuality(M4OSA_Int16* out, M4OSA_Int16* input,
-                                     M4OSA_Int32 outFrameCount, M4OSA_Context resamplerContext) {
-
-    VideoEditorResampler *context =
-      (VideoEditorResampler *)resamplerContext;
-    int32_t *pTmpBuffer = NULL;
-
-    context->nbSamples = (context->inSamplingRate * outFrameCount) / context->outSamplingRate;
-    memcpy(context->mInput,input,(context->nbSamples * context->nbChannels * sizeof(int16_t)));
-
-    /*
-     SRC module always gives stereo output, hence 2 for stereo audio
-    */
-    pTmpBuffer = (int32_t*)malloc(outFrameCount * 2 * sizeof(int32_t));
-    memset(pTmpBuffer, 0x00, outFrameCount * 2 * sizeof(int32_t));
-
-    context->mResampler->resample((int32_t *)pTmpBuffer,
-       (size_t)outFrameCount, (VideoEditorResampler *)resamplerContext);
-    // Convert back to 16 bits
-    ditherAndClamp((int32_t*)out, pTmpBuffer, outFrameCount);
-    free(pTmpBuffer);
-    pTmpBuffer = NULL;
-}
-
-}
-
-} //namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/Android.mk b/libvideoeditor/vss/stagefrightshells/Android.mk
deleted file mode 100755
index 5053e7d..0000000
--- a/libvideoeditor/vss/stagefrightshells/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditor3gpReader.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditor3gpReader.h
deleted file mode 100755
index 7a9a012..0000000
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditor3gpReader.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditor3gpReader.cpp
-* @brief  StageFright shell 3GP Reader
-*************************************************************************
-*/
-#ifndef VIDEOEDITOR_3GPREADER_H
-#define VIDEOEDITOR_3GPREADER_H
-
-#include "M4READER_Common.h"
-
-M4OSA_ERR VideoEditor3gpReader_getInterface(
-        M4READER_MediaType *pMediaType,
-        M4READER_GlobalInterface **pRdrGlobalInterface,
-        M4READER_DataInterface **pRdrDataInterface);
-
-#endif /* VIDEOEDITOR_3GPREADER_H */
-
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioDecoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioDecoder.h
deleted file mode 100755
index 0d3b801..0000000
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioDecoder.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorAudioDecoder.cpp
-* @brief  StageFright shell Audio Decoder
-*************************************************************************
-*/
-#ifndef VIDEOEDITOR_AUDIODECODER_H
-#define VIDEOEDITOR_AUDIODECODER_H
-
-#include "M4AD_Common.h"
-
-M4OSA_ERR VideoEditorAudioDecoder_getInterface_AAC(M4AD_Type* pDecoderType,
-        M4AD_Interface** pDecoderInterface);
-
-M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRNB(M4AD_Type* pDecoderType,
-        M4AD_Interface** pDecoderInterface);
-
-M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRWB(M4AD_Type* pDecoderType,
-        M4AD_Interface** pDecoderInterface);
-
-M4OSA_ERR VideoEditorAudioDecoder_getInterface_MP3(M4AD_Type* pDecoderType,
-        M4AD_Interface** pDecoderInterface);
-
-#endif /* VIDEOEDITOR_AUDIODECODER_H */
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioEncoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioEncoder.h
deleted file mode 100755
index f4f6b04..0000000
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioEncoder.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorAudioEncoder.cpp
-* @brief  StageFright shell Audio Encoder
-*************************************************************************
-*/
-#ifndef VIDEOEDITOR_AUDIOENCODER_H
-#define VIDEOEDITOR_AUDIOENCODER_H
-
-#include "M4OSA_CoreID.h"
-#include "M4OSA_Memory.h"
-#include "M4ENCODER_AudioCommon.h"
-
-M4OSA_ERR VideoEditorAudioEncoder_getInterface_AAC(
-        M4ENCODER_AudioFormat* pFormat,
-        M4ENCODER_AudioGlobalInterface** pEncoderInterface);
-
-M4OSA_ERR VideoEditorAudioEncoder_getInterface_AMRNB(
-        M4ENCODER_AudioFormat* pFormat,
-        M4ENCODER_AudioGlobalInterface** pEncoderInterface);
-
-M4OSA_ERR VideoEditorAudioEncoder_getInterface_MP3(
-        M4ENCODER_AudioFormat* pFormat,
-        M4ENCODER_AudioGlobalInterface** pEncoderInterface);
-
-#endif /* VIDEOEDITOR_AUDIOENCODER_H */
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorBuffer.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorBuffer.h
deleted file mode 100755
index 3aff6a7..0000000
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorBuffer.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorBuffer.c
-* @brief  StageFright shell Buffer
-*************************************************************************
-*/
-#ifndef   VIDEOEDITOR_BUFFER_H
-#define   VIDEOEDITOR_BUFFER_H
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Debug.h"
-#include "M4OSA_Memory.h"
-#include "M4OSA_CharStar.h"
-#include "M4_Utils.h"
-
-#include "LV_Macros.h"
-
-/*--- Core id for VIDEOEDITOR Buffer allocations  ---*/
-#define VIDEOEDITOR_BUFFER_EXTERNAL 0x012F
-
-/* ----- errors  -----*/
-#define M4ERR_NO_BUFFER_AVAILABLE \
-    M4OSA_ERR_CREATE(M4_ERR,VIDEOEDITOR_BUFFER_EXTERNAL,0x000001)
-#define M4ERR_NO_BUFFER_MATCH \
-    M4OSA_ERR_CREATE(M4_ERR,VIDEOEDITOR_BUFFER_EXTERNAL,0x000002)
-
-typedef enum {
-    VIDEOEDITOR_BUFFER_kEmpty = 0,
-    VIDEOEDITOR_BUFFER_kFilled,
-} VIDEOEDITOR_BUFFER_State;
-
-/**
- ************************************************************************
- * Structure    LVOMX_BUFFER_Buffer
- * @brief       One OMX Buffer and data related to it
- ************************************************************************
-*/
-typedef struct {
-    M4OSA_Void* pData;              /**< Pointer to the data*/
-    M4OSA_UInt32 size;
-    VIDEOEDITOR_BUFFER_State state; /**< Buffer state */
-    M4OSA_UInt32 idx;               /**< Index of the buffer inside the pool */
-    M4_MediaTime    buffCTS;        /**< Time stamp of the buffer */
-} VIDEOEDITOR_BUFFER_Buffer;
-
-/**
- ************************************************************************
- * Structure    LVOMX_BUFFER_Pool
- * @brief       Structure to manage buffers
- ************************************************************************
-*/
-typedef struct {
-    VIDEOEDITOR_BUFFER_Buffer* pNXPBuffer;
-    M4OSA_UInt32 NB;
-    M4OSA_Char* poolName;
-} VIDEOEDITOR_BUFFER_Pool;
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif //__cplusplus
-
-/**
- ************************************************************************
- M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
- *         M4OSA_UInt32 nbBuffers)
- * @brief   Allocate a pool of nbBuffers buffers
- *
- * @param   ppool      : IN The buffer pool to create
- * @param   nbBuffers  : IN The number of buffers in the pool
- * @param   poolName   : IN a name given to the pool
- * @return  Error code
- ************************************************************************
-*/
-M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
-        M4OSA_UInt32 nbBuffers, M4OSA_Char* poolName);
-
-/**
- ************************************************************************
- M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(LVOMX_BUFFER_Pool* ppool)
- * @brief   Deallocate a buffer pool
- *
- * @param   ppool      : IN The buffer pool to free
- * @return  Error code
- ************************************************************************
-*/
-M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(VIDEOEDITOR_BUFFER_Pool* ppool);
-
-/**
- ************************************************************************
- M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
- *         VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
- * @brief   Returns a buffer in a given state
- *
- * @param   ppool      : IN The buffer pool
- * @param   desiredState : IN The buffer state
- * @param   pNXPBuffer : IN The selected buffer
- * @return  Error code
- ************************************************************************
-*/
-M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
-        VIDEOEDITOR_BUFFER_State desiredState,
-        VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer);
-
-
-M4OSA_ERR VIDEOEDITOR_BUFFER_initPoolBuffers(VIDEOEDITOR_BUFFER_Pool* ppool,
-        M4OSA_UInt32 lSize);
-
-M4OSA_ERR VIDEOEDITOR_BUFFER_getOldestBuffer(VIDEOEDITOR_BUFFER_Pool *pool,
-        VIDEOEDITOR_BUFFER_State desiredState,
-        VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer);
-
-#ifdef __cplusplus
-}
-#endif //__cplusplus
-#endif /*VIDEOEDITOR_BUFFER_H*/
-
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMain.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMain.h
deleted file mode 100755
index 4c3b517..0000000
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMain.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __VIDEO_EDITOR_API_H__
-#define __VIDEO_EDITOR_API_H__
-
-#include "M4OSA_Types.h"
-
-typedef enum
-{
-    MSG_TYPE_PROGRESS_INDICATION,     // Playback progress indication event
-    MSG_TYPE_PLAYER_ERROR,            // Playback error
-    MSG_TYPE_PREVIEW_END,             // Preview of clips is complete
-    MSG_TYPE_OVERLAY_UPDATE,          // update overlay during preview
-    MSG_TYPE_OVERLAY_CLEAR,           // clear the overlay
-} progress_callback_msg_type;
-
-typedef struct {
-    int overlaySettingsIndex;
-    int clipIndex;
-} VideoEditorCurretEditInfo;
-
-typedef struct
-{
-    M4OSA_Void     *pFile;                   /** PCM file path */
-    M4OSA_Bool     bRemoveOriginal;          /** If true, the original audio track
-                                                 is not taken into account */
-    M4OSA_UInt32   uiNbChannels;            /** Number of channels (1=mono, 2=stereo) of BGM clip*/
-    M4OSA_UInt32   uiSamplingFrequency;     /** Sampling audio frequency (8000 for amr, 16000 or
-                                                more for aac) of BGM clip*/
-    M4OSA_UInt32   uiExtendedSamplingFrequency; /** Extended frequency for AAC+,
-                                                eAAC+ streams of BGM clip*/
-    M4OSA_UInt32   uiAddCts;                /** Time, in milliseconds, at which the added
-                                                audio track is inserted */
-    M4OSA_UInt32   uiAddVolume;             /** Volume, in percentage, of the added audio track */
-    M4OSA_UInt32   beginCutMs;
-    M4OSA_UInt32   endCutMs;
-    M4OSA_Int32    fileType;
-    M4OSA_Bool     bLoop;                   /** Looping on/off **/
-    /* Audio ducking */
-    M4OSA_UInt32   uiInDucking_threshold;   /** Threshold value at which
-                                                background music shall duck */
-    M4OSA_UInt32   uiInDucking_lowVolume;   /** lower the background track to
-                                                this factor of current level */
-    M4OSA_Bool     bInDucking_enable;       /** enable ducking */
-    M4OSA_UInt32   uiBTChannelCount;        /** channel count for BT */
-    M4OSA_Void     *pPCMFilePath;
-} M4xVSS_AudioMixingSettings;
-
-typedef struct
-{
-    M4OSA_Void      *pBuffer;            /* YUV420 buffer of frame to be rendered*/
-    M4OSA_UInt32    timeMs;            /* time stamp of the frame to be rendered*/
-    M4OSA_UInt32    uiSurfaceWidth;    /* Surface display width*/
-    M4OSA_UInt32    uiSurfaceHeight;    /* Surface display height*/
-    M4OSA_UInt32    uiFrameWidth;        /* Frame width*/
-    M4OSA_UInt32    uiFrameHeight;        /* Frame height*/
-    M4OSA_Bool      bApplyEffect;        /* Apply video effects before render*/
-    M4OSA_UInt32    clipBeginCutTime;  /* Clip begin cut time relative to storyboard */
-    M4OSA_UInt32    clipEndCutTime;    /* Clip end cut time relative to storyboard */
-    M4OSA_UInt32    videoRotationDegree; /* Video rotation degree */
-
-} VideoEditor_renderPreviewFrameStr;
-#endif /*__VIDEO_EDITOR_API_H__*/
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMp3Reader.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMp3Reader.h
deleted file mode 100755
index 5b3be40..0000000
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMp3Reader.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorMp3Reader.cpp
-* @brief  StageFright shell MP3 Reader
-*************************************************************************
-*/
-#ifndef VIDEOEDITOR_MP3READER_H
-#define VIDEOEDITOR_MP3READER_H
-
-#include "M4READER_Common.h"
-
-M4OSA_ERR VideoEditorMp3Reader_getInterface(
-        M4READER_MediaType *pMediaType,
-        M4READER_GlobalInterface **pRdrGlobalInterface,
-        M4READER_DataInterface **pRdrDataInterface);
-
-#endif /* VIDEOEDITOR_MP3READER_H */
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorUtils.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorUtils.h
deleted file mode 100755
index a21b21d..0000000
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorUtils.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorUtils.cpp
-* @brief  StageFright shell Utilities
-*************************************************************************
-*/
-#ifndef ANDROID_UTILS_H_
-#define ANDROID_UTILS_H_
-
-/*******************
- *     HEADERS     *
- *******************/
-
-#include "M4OSA_Debug.h"
-
-#include "utils/Log.h"
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-
-/**
- *************************************************************************
- * VIDEOEDITOR_CHECK(test, errCode)
- * @note This macro displays an error message and goes to function cleanUp label
- *       if the test fails.
- *************************************************************************
- */
-#define VIDEOEDITOR_CHECK(test, errCode) \
-{ \
-    if( !(test) ) { \
-        ALOGV("!!! %s (L%d) check failed : " #test ", yields error 0x%.8x", \
-            __FILE__, __LINE__, errCode); \
-        err = (errCode); \
-        goto cleanUp; \
-    } \
-}
-
-/**
- *************************************************************************
- * SAFE_FREE(p)
- * @note This macro calls free and makes sure the pointer is set to NULL.
- *************************************************************************
- */
-#define SAFE_FREE(p) \
-{ \
-    if(M4OSA_NULL != (p)) { \
-        free((p)) ; \
-        (p) = M4OSA_NULL ; \
-    } \
-}
-
-/**
- *************************************************************************
- * SAFE_MALLOC(p, type, count, comment)
- * @note This macro allocates a buffer, checks for success and fills the buffer
- *       with 0.
- *************************************************************************
- */
-#define SAFE_MALLOC(p, type, count, comment) \
-{ \
-    (p) = (type*)M4OSA_32bitAlignedMalloc(sizeof(type)*(count), 0xFF,(M4OSA_Char*)comment);\
-    VIDEOEDITOR_CHECK(M4OSA_NULL != (p), M4ERR_ALLOC); \
-    memset((void *)(p), 0,sizeof(type)*(count)); \
-}
-
-
-    /********************
-     *    UTILITIES     *
-     ********************/
-
-
-namespace android {
-
-/*--------------------------*/
-/* DISPLAY METADATA CONTENT */
-/*--------------------------*/
-void displayMetaData(const sp<MetaData> meta);
-
-// Build the AVC codec spcific info from the StageFright encoders output
-status_t buildAVCCodecSpecificData(uint8_t **outputData, size_t *outputSize,
-        const uint8_t *data, size_t size, MetaData *param);
-
-}//namespace android
-
-
-#endif //ANDROID_UTILS_H_
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder.h
deleted file mode 100755
index 1eea3a6..0000000
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorVideoDecoder.cpp
-* @brief  StageFright shell video decoder
-*************************************************************************
-*/
-#ifndef VIDEOEDITOR_VIDEODECODER_H
-#define VIDEOEDITOR_VIDEODECODER_H
-
-#include "M4DECODER_Common.h"
-
-M4OSA_ERR VideoEditorVideoDecoder_getInterface_MPEG4(
-        M4DECODER_VideoType *pDecoderType,
-        M4OSA_Context *pDecoderInterface);
-
-M4OSA_ERR VideoEditorVideoDecoder_getInterface_H264(
-        M4DECODER_VideoType *pDecoderType,
-        M4OSA_Context *pDecoderInterface);
-
-M4OSA_ERR VideoEditorVideoDecoder_getSoftwareInterface_MPEG4(
-        M4DECODER_VideoType *pDecoderType,
-        M4OSA_Context *pDecInterface);
-
-M4OSA_ERR VideoEditorVideoDecoder_getSoftwareInterface_H264(
-        M4DECODER_VideoType *pDecoderType,
-        M4OSA_Context *pDecInterface);
-
-M4OSA_ERR VideoEditorVideoDecoder_getVideoDecodersAndCapabilities(
-    M4DECODER_VideoDecoders** decoders);
-
-#endif // VIDEOEDITOR_VIDEODECODER_H
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h
deleted file mode 100755
index 6762643..0000000
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
-*************************************************************************
-* @file   VideoEditorVideoDecoder_Internal.h
-* @brief  StageFright shell video decoder internal header file*
-*************************************************************************
-*/
-
-#include "M4OSA_Types.h"
-#include "M4OSA_Debug.h"
-#include "M4OSA_Memory.h"
-#include "M4_Common.h"
-#include "M4OSA_CoreID.h"
-
-#include "M4DA_Types.h"
-#include "M4READER_Common.h"
-#include "M4VIFI_FiltersAPI.h"
-#include "M4TOOL_VersionInfo.h"
-#include "M4DECODER_Common.h"
-#include "M4OSA_Semaphore.h"
-#include "VideoEditorBuffer.h"
-#include "M4VD_Tools.h"
-#include "I420ColorConverter.h"
-
-#include <utils/RefBase.h>
-#include <android/rect.h>
-#include <OMX_Video.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/OMXCodec.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-
-#define VIDEOEDITOR_VIDEC_SHELL_VER_MAJOR     0
-#define VIDEOEDITOR_VIDEC_SHELL_VER_MINOR     0
-#define VIDEOEDITOR_VIDEC_SHELL_VER_REVISION  1
-
-/* ERRORS */
-#define M4ERR_SF_DECODER_RSRC_FAIL M4OSA_ERR_CREATE(M4_ERR, 0xFF, 0x0001)
-
-namespace android {
-
-typedef enum {
-    VIDEOEDITOR_kMpeg4VideoDec,
-    VIDEOEDITOR_kH263VideoDec,
-    VIDEOEDITOR_kH264VideoDec
-} VIDEOEDITOR_CodecType;
-
-
-/*typedef struct{
-    M4OSA_UInt32 stream_byte;
-    M4OSA_UInt32 stream_index;
-    M4OSA_MemAddr8 in;
-
-} VIDEOEDITOR_VIDEO_Bitstream_ctxt;*/
-
-typedef M4VS_Bitstream_ctxt VIDEOEDITOR_VIDEO_Bitstream_ctxt;
-
-typedef struct {
-
-    /** Stagefrigth params */
-    OMXClient               mClient; /**< OMX Client session instance. */
-    sp<MediaSource>         mVideoDecoder; /**< Stagefright decoder instance */
-    sp<MediaSource>         mReaderSource; /**< Reader access > */
-
-    /* READER */
-    M4READER_GlobalInterface *m_pReaderGlobal;
-    M4READER_DataInterface  *m_pReader;
-    M4_AccessUnit           *m_pNextAccessUnitToDecode;
-
-    /* STREAM PARAMS */
-    M4_VideoStreamHandler*  m_pVideoStreamhandler;
-
-    /* User filter params. */
-    M4VIFI_PlanConverterFunctionType *m_pFilter;
-    M4OSA_Void              *m_pFilterUserData;
-
-    M4_MediaTime            m_lastDecodedCTS;
-    M4_MediaTime            m_lastRenderCts;
-    M4OSA_Bool              mReachedEOS;
-    VIDEOEDITOR_CodecType   mDecoderType;
-    M4DECODER_VideoSize     m_VideoSize;
-    M4DECODER_MPEG4_DecoderConfigInfo m_Dci; /**< Decoder Config info */
-    VIDEOEDITOR_BUFFER_Pool *m_pDecBufferPool; /**< Decoded buffer pool */
-    OMX_COLOR_FORMATTYPE    decOuputColorFormat;
-
-    M4OSA_UInt32            mNbInputFrames;
-    M4OSA_Double            mFirstInputCts;
-    M4OSA_Double            mLastInputCts;
-    M4OSA_UInt32            mNbRenderedFrames;
-    M4OSA_Double            mFirstRenderedCts;
-    M4OSA_Double            mLastRenderedCts;
-    M4OSA_UInt32            mNbOutputFrames;
-    M4OSA_Double            mFirstOutputCts;
-    M4OSA_Double            mLastOutputCts;
-    M4OSA_Int32             mGivenWidth, mGivenHeight; //Used in case of
-                                                       //INFO_FORMAT_CHANGED
-    ARect                   mCropRect;  // These are obtained from kKeyCropRect.
-    I420ColorConverter*     mI420ColorConverter;
-
-    // Time interval between two consequtive/neighboring video frames.
-    M4_MediaTime            mFrameIntervalMs;
-
-} VideoEditorVideoDecoder_Context;
-
-} //namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoEncoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoEncoder.h
deleted file mode 100755
index fd5154f..0000000
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoEncoder.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorVideoEncoder.cpp
-* @brief  StageFright shell video encoder
-*************************************************************************
-*/
-#ifndef VIDEOEDITOR_VIDEOENCODER_H
-#define VIDEOEDITOR_VIDEOENCODER_H
-
-#include "M4ENCODER_common.h"
-
-M4OSA_ERR VideoEditorVideoEncoder_getInterface_H263(M4ENCODER_Format* pFormat,
-        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode);
-
-M4OSA_ERR VideoEditorVideoEncoder_getInterface_MPEG4(M4ENCODER_Format* pFormat,
-        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode);
-
-M4OSA_ERR VideoEditorVideoEncoder_getInterface_H264(M4ENCODER_Format* pFormat,
-        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode);
-
-#endif //VIDEOEDITOR_VIDEOENCODER_H
diff --git a/libvideoeditor/vss/stagefrightshells/src/Android.mk b/libvideoeditor/vss/stagefrightshells/src/Android.mk
deleted file mode 100755
index e30b85d..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/Android.mk
+++ /dev/null
@@ -1,68 +0,0 @@
-#
-# Copyright (C) 2011 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
-    MediaBufferPuller.cpp \
-    VideoEditorVideoDecoder.cpp \
-    VideoEditorAudioDecoder.cpp \
-    VideoEditorMp3Reader.cpp \
-    VideoEditor3gpReader.cpp \
-    VideoEditorUtils.cpp \
-    VideoEditorBuffer.c \
-    VideoEditorVideoEncoder.cpp \
-    VideoEditorAudioEncoder.cpp
-
-LOCAL_C_INCLUDES += \
-    $(TOP)/frameworks/av/media/libmediaplayerservice \
-    $(TOP)/frameworks/av/media/libstagefright \
-    $(TOP)/frameworks/av/media/libstagefright/include \
-    $(TOP)/frameworks/av/media/libstagefright/rtsp \
-    $(call include-path-for, corecg graphics) \
-    $(TOP)/frameworks/av/libvideoeditor/lvpp \
-    $(TOP)/frameworks/av/libvideoeditor/osal/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/common/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/mcs/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/stagefrightshells/inc \
-    $(TOP)/frameworks/native/include/media/editor \
-    $(TOP)/frameworks/native/include/media/openmax
-
-LOCAL_SHARED_LIBRARIES :=     \
-    libcutils                 \
-    libutils                  \
-    libmedia                  \
-    libbinder                 \
-    libstagefright            \
-    libstagefright_foundation \
-    libstagefright_omx        \
-    libgui                    \
-    libvideoeditor_osal       \
-    libvideoeditorplayer      \
-
-LOCAL_CFLAGS += \
-
-LOCAL_STATIC_LIBRARIES := \
-    libstagefright_color_conversion
-
-
-LOCAL_MODULE:= libvideoeditor_stagefrightshells
-
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_STATIC_LIBRARY)
diff --git a/libvideoeditor/vss/stagefrightshells/src/MODULE_LICENSE_APACHE2 b/libvideoeditor/vss/stagefrightshells/src/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.cpp b/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.cpp
deleted file mode 100644
index acc8268..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaBufferPuller"
-#include <utils/Log.h>
-
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>
-#include "MediaBufferPuller.h"
-
-namespace android {
-
-
-MediaBufferPuller::MediaBufferPuller(const sp<MediaSource>& source)
-    : mSource(source),
-      mAskToStart(false),
-      mAskToStop(false),
-      mAcquireStopped(false),
-      mReleaseStopped(false),
-      mSourceError(OK) {
-
-    androidCreateThread(acquireThreadStart, this);
-    androidCreateThread(releaseThreadStart, this);
-}
-
-MediaBufferPuller::~MediaBufferPuller() {
-    stop();
-}
-
-bool MediaBufferPuller::hasMediaSourceReturnedError() const {
-    Mutex::Autolock autolock(mLock);
-    return ((mSourceError != OK) ? true : false);
-}
-void MediaBufferPuller::start() {
-    Mutex::Autolock autolock(mLock);
-    mAskToStart = true;
-    mAcquireCond.signal();
-    mReleaseCond.signal();
-}
-
-void MediaBufferPuller::stop() {
-    Mutex::Autolock autolock(mLock);
-    mAskToStop = true;
-    mAcquireCond.signal();
-    mReleaseCond.signal();
-    while (!mAcquireStopped || !mReleaseStopped) {
-        mUserCond.wait(mLock);
-    }
-
-    // Release remaining buffers
-    for (size_t i = 0; i < mBuffers.size(); i++) {
-        mBuffers.itemAt(i)->release();
-    }
-
-    for (size_t i = 0; i < mReleaseBuffers.size(); i++) {
-        mReleaseBuffers.itemAt(i)->release();
-    }
-
-    mBuffers.clear();
-    mReleaseBuffers.clear();
-}
-
-MediaBuffer* MediaBufferPuller::getBufferNonBlocking() {
-    Mutex::Autolock autolock(mLock);
-    if (mBuffers.empty()) {
-        return NULL;
-    } else {
-        MediaBuffer* b = mBuffers.itemAt(0);
-        mBuffers.removeAt(0);
-        return b;
-    }
-}
-
-MediaBuffer* MediaBufferPuller::getBufferBlocking() {
-    Mutex::Autolock autolock(mLock);
-    while (mBuffers.empty() && !mAcquireStopped) {
-        mUserCond.wait(mLock);
-    }
-
-    if (mBuffers.empty()) {
-        return NULL;
-    } else {
-        MediaBuffer* b = mBuffers.itemAt(0);
-        mBuffers.removeAt(0);
-        return b;
-    }
-}
-
-void MediaBufferPuller::putBuffer(MediaBuffer* buffer) {
-    Mutex::Autolock autolock(mLock);
-    mReleaseBuffers.push(buffer);
-    mReleaseCond.signal();
-}
-
-int MediaBufferPuller::acquireThreadStart(void* arg) {
-    MediaBufferPuller* self = (MediaBufferPuller*)arg;
-    self->acquireThreadFunc();
-    return 0;
-}
-
-int MediaBufferPuller::releaseThreadStart(void* arg) {
-    MediaBufferPuller* self = (MediaBufferPuller*)arg;
-    self->releaseThreadFunc();
-    return 0;
-}
-
-void MediaBufferPuller::acquireThreadFunc() {
-    mLock.lock();
-
-    // Wait for the start signal
-    while (!mAskToStart && !mAskToStop) {
-        mAcquireCond.wait(mLock);
-    }
-
-    // Loop until we are asked to stop, or there is nothing more to read
-    while (!mAskToStop) {
-        MediaBuffer* pBuffer;
-        mLock.unlock();
-        status_t result = mSource->read(&pBuffer, NULL);
-        mLock.lock();
-        mSourceError = result;
-        if (result != OK) {
-            break;
-        }
-        mBuffers.push(pBuffer);
-        mUserCond.signal();
-    }
-
-    mAcquireStopped = true;
-    mUserCond.signal();
-    mLock.unlock();
-}
-
-void MediaBufferPuller::releaseThreadFunc() {
-    mLock.lock();
-
-    // Wait for the start signal
-    while (!mAskToStart && !mAskToStop) {
-        mReleaseCond.wait(mLock);
-    }
-
-    // Loop until we are asked to stop
-    while (1) {
-        if (mReleaseBuffers.empty()) {
-            if (mAskToStop) {
-                break;
-            } else {
-                mReleaseCond.wait(mLock);
-                continue;
-            }
-        }
-        MediaBuffer* pBuffer = mReleaseBuffers.itemAt(0);
-        mReleaseBuffers.removeAt(0);
-        mLock.unlock();
-        pBuffer->release();
-        mLock.lock();
-    }
-
-    mReleaseStopped = true;
-    mUserCond.signal();
-    mLock.unlock();
-}
-
-};  // namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.h b/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.h
deleted file mode 100644
index ed72a53..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _MEDIA_BUFFER_PULLER_H
-#define _MEDIA_BUFFER_PULLER_H
-
-#include <utils/threads.h>
-#include <utils/Vector.h>
-
-
-namespace android {
-
-struct MediaSource;
-struct MediaBuffer;
-
-/*
- * An object of this class can pull a list of media buffers
- * from a MediaSource repeatedly. The user can then get the
- * buffers from that list.
- */
-struct MediaBufferPuller {
-public:
-    MediaBufferPuller(const sp<MediaSource>& source);
-    ~MediaBufferPuller();
-
-    // Start to build up the list of the buffers.
-    void start();
-
-    // Release the list of the available buffers, and stop
-    // pulling buffers from the MediaSource.
-    void stop();
-
-    // Get a buffer from the list. If there is no buffer available
-    // at the time this method is called, NULL is returned.
-    MediaBuffer* getBufferBlocking();
-
-    // Get a buffer from the list. If there is no buffer available
-    // at the time this method is called, it blocks waiting for
-    // a buffer to become available or until stop() is called.
-    MediaBuffer* getBufferNonBlocking();
-
-    // Add a buffer to the end of the list available media buffers
-    void putBuffer(MediaBuffer* buffer);
-
-    // Check whether the source returned an error or not.
-    bool hasMediaSourceReturnedError() const;
-
-private:
-    static int acquireThreadStart(void* arg);
-    void acquireThreadFunc();
-
-    static int releaseThreadStart(void* arg);
-    void releaseThreadFunc();
-
-    sp<MediaSource> mSource;
-    Vector<MediaBuffer*> mBuffers;
-    Vector<MediaBuffer*> mReleaseBuffers;
-
-    mutable Mutex mLock;
-    Condition mUserCond;     // for the user of this class
-    Condition mAcquireCond;  // for the acquire thread
-    Condition mReleaseCond;  // for the release thread
-
-    bool mAskToStart;      // Asks the threads to start
-    bool mAskToStop;       // Asks the threads to stop
-    bool mAcquireStopped;  // The acquire thread has stopped
-    bool mReleaseStopped;  // The release thread has stopped
-    status_t mSourceError; // Error returned by MediaSource read
-
-    // Don't call me!
-    MediaBufferPuller(const MediaBufferPuller&);
-    MediaBufferPuller& operator=(const MediaBufferPuller&);
-};
-
-}  // namespace android
-
-#endif  // _MEDIA_BUFFER_PULLER_H
diff --git a/libvideoeditor/vss/stagefrightshells/src/NOTICE b/libvideoeditor/vss/stagefrightshells/src/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
-   Copyright (c) 2005-2008, The Android Open Source Project
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditor3gpReader.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditor3gpReader.cpp
deleted file mode 100755
index 99cf9ec..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/VideoEditor3gpReader.cpp
+++ /dev/null
@@ -1,2034 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditor3gpReader.cpp
-* @brief  StageFright shell 3GP Reader
-*************************************************************************
-*/
-
-#define LOG_NDEBUG 1
-#define LOG_TAG "VIDEOEDITOR_3GPREADER"
-
-/**
- * HEADERS
- *
- */
-#define VIDEOEDITOR_BITSTREAM_PARSER
-
-#include "M4OSA_Debug.h"
-#include "VideoEditor3gpReader.h"
-#include "M4SYS_AccessUnit.h"
-#include "VideoEditorUtils.h"
-#include "M4READER_3gpCom.h"
-#include "M4_Common.h"
-#include "M4OSA_FileWriter.h"
-
-#ifdef VIDEOEDITOR_BITSTREAM_PARSER
-#include "M4OSA_CoreID.h"
-#include "M4OSA_Error.h"
-#include "M4OSA_Memory.h"
-#include "M4_Utils.h"
-#endif
-
-#include "ESDS.h"
-#include "utils/Log.h"
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-
-/**
- * SOURCE CLASS
- */
-namespace android {
-/**
- * ENGINE INTERFACE
- */
-
-/**
- ************************************************************************
- * @brief   Array of AMR NB/WB bitrates
- * @note    Array to match the mode and the bit rate
- ************************************************************************
-*/
-const M4OSA_UInt32 VideoEditor3gpReader_AmrBitRate [2 /* 8kHz / 16kHz     */]
-                                                   [9 /* the bitrate mode */] =
-{
-    {4750, 5150, 5900,  6700,  7400,  7950,  10200, 12200, 0},
-    {6600, 8850, 12650, 14250, 15850, 18250, 19850, 23050, 23850}
-};
-
-/**
- *******************************************************************************
- * structure VideoEditor3gpReader_Context
- * @brief:This structure defines the context of the StageFright 3GP shell Reader
- *******************************************************************************
-*/
-typedef struct {
-    sp<DataSource>              mDataSource;
-    sp<MediaExtractor>          mExtractor;
-    sp<MediaSource>             mAudioSource;
-    sp<MediaSource>             mVideoSource;
-    M4_StreamHandler*           mAudioStreamHandler;
-    M4_StreamHandler*           mVideoStreamHandler;
-    M4SYS_AccessUnit            mAudioAu;
-    M4SYS_AccessUnit            mVideoAu;
-    M4OSA_Time                  mMaxDuration;
-    int64_t                     mFileSize;
-    M4_StreamType               mStreamType;
-    M4OSA_UInt32                mStreamId;
-    int32_t                     mTracks;
-    int32_t                     mCurrTrack;
-    M4OSA_Bool                  mAudioSeeking;
-    M4OSA_Time                  mAudioSeekTime;
-    M4OSA_Bool                  mVideoSeeking;
-    M4OSA_Time                  mVideoSeekTime;
-
-} VideoEditor3gpReader_Context;
-
-#ifdef VIDEOEDITOR_BITSTREAM_PARSER
-/**
- ************************************************************************
- * structure    VideoEditor3gpReader_BitStreamParserContext
- * @brief       Internal BitStreamParser context
- ************************************************************************
-*/
-typedef struct {
-    M4OSA_UInt32*   mPbitStream;   /**< bitstream pointer (32bits aligned) */
-    M4OSA_Int32     mSize;         /**< bitstream size in bytes */
-    M4OSA_Int32     mIndex;        /**< byte index */
-    M4OSA_Int32     mBitIndex;     /**< bit index */
-    M4OSA_Int32     mStructSize;   /**< size of structure */
-} VideoEditor3gpReader_BitStreamParserContext;
-
-/**
- *******************************************************************************
- * @brief   Allocates the context and initializes internal data.
- * @param   pContext    (OUT)  Pointer to the BitStreamParser context to create.
- * @param   bitStream   A pointer to the bitstream
- * @param   size        The size of the bitstream in bytes
- *******************************************************************************
-*/
-static void VideoEditor3gpReader_BitStreamParserInit(void** pContext,
-        void* pBitStream, M4OSA_Int32 size) {
-    VideoEditor3gpReader_BitStreamParserContext* pStreamContext;
-
-    *pContext=M4OSA_NULL;
-    pStreamContext = (VideoEditor3gpReader_BitStreamParserContext*)M4OSA_32bitAlignedMalloc(
-        sizeof(VideoEditor3gpReader_BitStreamParserContext), M4READER_3GP,
-            (M4OSA_Char*)"3GP BitStreamParser Context");
-    if (M4OSA_NULL == pStreamContext) {
-        return;
-    }
-    pStreamContext->mPbitStream=(M4OSA_UInt32*)pBitStream;
-    pStreamContext->mSize=size;
-    pStreamContext->mIndex=0;
-    pStreamContext->mBitIndex=0;
-    pStreamContext->mStructSize =
-        sizeof(VideoEditor3gpReader_BitStreamParserContext);
-
-    *pContext=pStreamContext;
-}
-/**
- **********************************************************************
- * @brief   Clean up context
- * @param   pContext    (IN/OUT)  BitStreamParser context.
- **********************************************************************
-*/
-static void VideoEditor3gpReader_BitStreamParserCleanUp(void* pContext) {
-    free((M4OSA_Int32*)pContext);
-}
-/**
- *****************************************************************************
- * @brief   Read the next <length> bits in the bitstream.
- * @note    The function does not update the bitstream pointer.
- * @param   pContext    (IN/OUT) BitStreamParser context.
- * @param   length      (IN) The number of bits to extract from the bitstream
- * @return  the read bits
- *****************************************************************************
-*/
-static M4OSA_UInt32 VideoEditor3gpReader_BitStreamParserShowBits(void* pContext,
-        M4OSA_Int32 length) {
-    VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
-        (VideoEditor3gpReader_BitStreamParserContext*)pContext;
-
-    M4OSA_UInt32 u_mask;
-    M4OSA_UInt32 retval;
-    M4OSA_Int32 i_ovf;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0,
-        "VideoEditor3gpReader_BitStreamParserShowBits:invalid context pointer");
-
-    retval=(M4OSA_UInt32)GET_MEMORY32(pStreamContext->\
-        mPbitStream[ pStreamContext->mIndex ]);
-    i_ovf = pStreamContext->mBitIndex + length - 32;
-    u_mask = (length >= 32) ? 0xffffffff: (1 << length) - 1;
-
-    /* do we have enough bits availble in the current word(32bits)*/
-    if (i_ovf <= 0) {
-        retval=(retval >> (- i_ovf)) & u_mask;
-    } else {
-        M4OSA_UInt32 u_nextword = (M4OSA_UInt32)GET_MEMORY32(
-            pStreamContext->mPbitStream[ pStreamContext->mIndex + 1 ]);
-        M4OSA_UInt32 u_msb_mask, u_msb_value, u_lsb_mask, u_lsb_value;
-
-        u_msb_mask = ((1 << (32 - pStreamContext->mBitIndex)) - 1) << i_ovf;
-        u_msb_value = retval << i_ovf;
-        u_lsb_mask = (1 << i_ovf) - 1;
-        u_lsb_value = u_nextword >> (32 - i_ovf);
-        retval= (u_msb_value & u_msb_mask ) | (u_lsb_value & u_lsb_mask);
-    }
-    /* return the bits...*/
-    return retval;
-}
-/**
- ************************************************************************
- * @brief   Increment the bitstream pointer of <length> bits.
- * @param   pContext    (IN/OUT) BitStreamParser context.
- * @param   length      (IN) The number of bit to shift the bitstream
- ************************************************************************
-*/
-static void VideoEditor3gpReader_BitStreamParserFlushBits(void* pContext,
-        M4OSA_Int32 length) {
-    VideoEditor3gpReader_BitStreamParserContext* pStreamContext=(
-        VideoEditor3gpReader_BitStreamParserContext*)pContext;
-    M4OSA_Int32 val;
-
-    if (M4OSA_NULL == pStreamContext) {
-        return;
-    }
-    val=pStreamContext->mBitIndex + length;
-    /* update the bits...*/
-    pStreamContext->mBitIndex += length;
-
-    if (val - 32 >= 0) {
-        /* update the bits...*/
-        pStreamContext->mBitIndex -= 32;
-        /* update the words*/
-        pStreamContext->mIndex++;
-    }
-}
-
-static M4OSA_UInt32 VideoEditor3gpReader_BitStreamParserGetBits(
-        void* pContext,M4OSA_Int32 bitPos, M4OSA_Int32 bitLength) {
-    VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
-        (VideoEditor3gpReader_BitStreamParserContext*)pContext;
-
-    M4OSA_Int32 bitLocation, bitIndex;
-    M4OSA_UInt32 retval=0;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0,
-        "VideoEditor3gpReader_BitStreamParserGetBits: invalid context pointer");
-
-    /* computes the word location*/
-    bitLocation=bitPos/32;
-    bitIndex=(bitPos) % 32;
-
-    if (bitLocation < pStreamContext->mSize) {
-        M4OSA_UInt32 u_mask;
-        M4OSA_Int32 i_ovf = bitIndex + bitLength - 32;
-        retval=(M4OSA_UInt32)GET_MEMORY32(
-            pStreamContext->mPbitStream[ bitLocation ]);
-
-        u_mask = (bitLength >= 32) ? 0xffffffff: (1 << bitLength) - 1;
-
-        if (i_ovf <= 0) {
-            retval=(retval >> (- i_ovf)) & u_mask;
-        } else {
-            M4OSA_UInt32 u_nextword = (M4OSA_UInt32)GET_MEMORY32(
-                pStreamContext->mPbitStream[ bitLocation + 1 ]);
-            M4OSA_UInt32 u_msb_mask, u_msb_value, u_lsb_mask, u_lsb_value;
-
-            u_msb_mask = ((1 << (32 - bitIndex)) - 1) << i_ovf;
-            u_msb_value = retval << i_ovf;
-            u_lsb_mask = (1 << i_ovf) - 1;
-            u_lsb_value = u_nextword >> (32 - i_ovf);
-            retval= (u_msb_value & u_msb_mask ) | (u_lsb_value & u_lsb_mask);
-        }
-    }
-    return retval;
-}
-
-static void VideoEditor3gpReader_BitStreamParserRestart(void* pContext) {
-    VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
-        (VideoEditor3gpReader_BitStreamParserContext*)pContext;
-
-    if (M4OSA_NULL == pStreamContext) {
-        return;
-    }
-    /* resets the bitstream pointers*/
-    pStreamContext->mIndex=0;
-    pStreamContext->mBitIndex=0;
-}
-/**
- *******************************************************************************
- * @brief  Get a pointer to the current byte pointed by the bitstream pointer.
- * @note   It should be used carefully as the pointer is in the bitstream itself
- *         and no copy is made.
- * @param  pContext    (IN/OUT)  BitStreamParser context.
- * @return Pointer to the current location in the bitstream
- *******************************************************************************
-*/
-static M4OSA_UInt8*  VideoEditor3gpReader_GetCurrentbitStreamPointer(
-        void* pContext) {
-    VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
-        (VideoEditor3gpReader_BitStreamParserContext*)pContext;
-    M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0, "invalid context pointer");
-
-    return (M4OSA_UInt8*)((M4OSA_UInt8*)pStreamContext->mPbitStream + \
-        pStreamContext->mIndex * sizeof(M4OSA_UInt32) + \
-        pStreamContext->mBitIndex/8) ;
-}
-
-static M4OSA_Int32 VideoEditor3gpReader_BitStreamParserGetSize(void* pContext) {
-    VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
-        (VideoEditor3gpReader_BitStreamParserContext*)pContext;
-    M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0, "invalid context pointer");
-
-    return pStreamContext->mSize;
-}
-
-
-static void VideoEditor3gpReader_MPEG4BitStreamParserInit(void** pContext,
-        void* pBitStream, M4OSA_Int32 size) {
-    VideoEditor3gpReader_BitStreamParserInit(pContext, pBitStream, size);
-}
-static M4OSA_Int32 VideoEditor3gpReader_GetMpegLengthFromInteger(void* pContext,
-        M4OSA_UInt32 val) {
-    M4OSA_UInt32 length=0;
-    M4OSA_UInt32 numBytes=0;
-    M4OSA_UInt32 b=0;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL==pContext), 0, "invalid context pointer");
-
-    /* the length is encoded as a sequence of bytes. The highest bit is used
-    to indicate that the length continues on the next byte.
-
-    The length can be: 0x80 0x80 0x80 0x22
-    of just            0x22 (highest bit not set)
-
-    */
-
-    do {
-        b=(val & ((0xff)<< (8 * numBytes)))>> (8 * numBytes);
-        length=(length << 7) | (b & 0x7f);
-        numBytes++;
-    } while ((b & 0x80) && numBytes < 4);
-
-    return length;
-}
-
-/**
- *******************************************************************************
- * @brief  Decode an MPEG4 Systems descriptor size from an encoded SDL size data
- * @note   The value is read from the current bitstream location.
- * @param  pContext    (IN/OUT)  BitStreamParser context.
- * @return Size in a human readable form
- *******************************************************************************
-*/
-static M4OSA_Int32 VideoEditor3gpReader_GetMpegLengthFromStream(void* pContext){
-    M4OSA_UInt32 length=0;
-    M4OSA_UInt32 numBytes=0;
-    M4OSA_UInt32 b=0;
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL==pContext), 0, "invalid context pointer");
-
-    /* the length is encoded as a sequence of bytes. The highest bit is used
-    to indicate that the length continues on the next byte.
-
-    The length can be: 0x80 0x80 0x80 0x22
-    of just            0x22 (highest bit not set)
-    */
-
-    do {
-        b=VideoEditor3gpReader_BitStreamParserShowBits(pContext, 8);
-        VideoEditor3gpReader_BitStreamParserFlushBits(pContext, 8);
-        length=(length << 7) | (b & 0x7f);
-        numBytes++;
-    } while ((b & 0x80) && numBytes < 4);
-
-    return length;
-}
-#endif /* VIDEOEDITOR_BITSTREAM_PARSER */
-/**
-************************************************************************
-* @brief    create an instance of the 3gp reader
- * @note    allocates the context
- *
- * @param   pContext:       (OUT)   pointer on a reader context
- *
- * @return  M4NO_ERROR              there is no error
- * @return  M4ERR_ALLOC             a memory allocation has failed
- * @return  M4ERR_PARAMETER         at least one parameter is not valid
-************************************************************************
-*/
-
-M4OSA_ERR VideoEditor3gpReader_create(M4OSA_Context *pContext) {
-    VideoEditor3gpReader_Context* pC = NULL;
-    M4OSA_ERR err = M4NO_ERROR;
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext , M4ERR_PARAMETER);
-
-    ALOGV("VideoEditor3gpReader_create begin");
-
-    /* Context allocation & initialization */
-    SAFE_MALLOC(pC, VideoEditor3gpReader_Context, 1, "VideoEditor3gpReader");
-
-    memset(pC, sizeof(VideoEditor3gpReader_Context), 0);
-
-    pC->mAudioStreamHandler  = M4OSA_NULL;
-    pC->mAudioAu.dataAddress = M4OSA_NULL;
-    pC->mVideoStreamHandler  = M4OSA_NULL;
-    pC->mVideoAu.dataAddress = M4OSA_NULL;
-
-    pC->mAudioSeeking = M4OSA_FALSE;
-    pC->mAudioSeekTime = 0;
-
-    pC->mVideoSeeking = M4OSA_FALSE;
-    pC->mVideoSeekTime = 0;
-
-    pC->mMaxDuration = 0;
-
-    *pContext=pC;
-
-cleanUp:
-    if ( M4NO_ERROR == err ) {
-        ALOGV("VideoEditor3gpReader_create no error");
-    } else {
-        ALOGV("VideoEditor3gpReader_create ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditor3gpReader_create end ");
-    return err;
-}
-
-/**
-**************************************************************************
-* @brief    destroy the instance of the 3gp reader
-* @note after this call the context is invalid
-* @param    context:        (IN)    Context of the reader
-* @return   M4NO_ERROR              there is no error
-* @return   M4ERR_PARAMETER         pContext parameter is not properly set
-**************************************************************************
-*/
-
-M4OSA_ERR VideoEditor3gpReader_destroy(M4OSA_Context pContext) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditor3gpReader_Context* pC = M4OSA_NULL;
-
-    ALOGV("VideoEditor3gpReader_destroy begin");
-
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    pC = (VideoEditor3gpReader_Context*)pContext;
-
-    SAFE_FREE(pC->mAudioAu.dataAddress);
-    pC->mAudioAu.dataAddress = M4OSA_NULL;
-    SAFE_FREE(pC->mVideoAu.dataAddress);
-    pC->mVideoAu.dataAddress = M4OSA_NULL;
-    SAFE_FREE(pC);
-    pContext = M4OSA_NULL;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditor3gpReader_destroy no error");
-    }
-    else
-    {
-        ALOGV("VideoEditor3gpReader_destroy ERROR 0x%X", err);
-    }
-
-    ALOGV("VideoEditor3gpReader_destroy end ");
-    return err;
-}
-
-/**
-************************************************************************
-* @brief    open the reader and initializes its created instance
-* @note     this function open the media file
-* @param    context:            (IN)    Context of the reader
-* @param    pFileDescriptor:    (IN)    Pointer to proprietary data identifying
-*                                       the media to open
-* @return   M4NO_ERROR                  there is no error
-* @return   M4ERR_PARAMETER             the context is NULL
-* @return   M4ERR_UNSUPPORTED_MEDIA_TYPE
-*                                       the media is DRM protected
-************************************************************************
-*/
-
-M4OSA_ERR VideoEditor3gpReader_open(M4OSA_Context pContext,
-        M4OSA_Void* pFileDescriptor) {
-    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)pContext;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    ALOGV("VideoEditor3gpReader_open start ");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),  M4ERR_PARAMETER,
-        "VideoEditor3gpReader_open: invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_open: invalid pointer pFileDescriptor");
-
-    ALOGV("VideoEditor3gpReader_open Datasource start %s",
-        (char*)pFileDescriptor);
-    //pC->mDataSource = DataSource::CreateFromURI((char*)pFileDescriptor);
-    pC->mDataSource = new FileSource ((char*)pFileDescriptor);
-
-    if (pC->mDataSource == NULL) {
-        ALOGV("VideoEditor3gpReader_open Datasource error");
-        return M4ERR_PARAMETER;
-    }
-
-    pC->mExtractor = MediaExtractor::Create(pC->mDataSource,
-        MEDIA_MIMETYPE_CONTAINER_MPEG4);
-
-    if (pC->mExtractor == NULL) {
-        ALOGV("VideoEditor3gpReader_open extractor error");
-        return M4ERR_PARAMETER;
-    }
-
-    int32_t isDRMProtected = 0;
-    sp<MetaData> meta = pC->mExtractor->getMetaData();
-    meta->findInt32(kKeyIsDRM, &isDRMProtected);
-    if (isDRMProtected) {
-        ALOGV("VideoEditorMp3Reader_open error - DRM Protected");
-        return M4ERR_UNSUPPORTED_MEDIA_TYPE;
-    }
-
-    ALOGV("VideoEditor3gpReader_open end ");
-    return err;
-}
-
-/**
-************************************************************************
-* @brief    close the reader
-* @note     close the 3GP file
-* @param    context:        (IN)    Context of the reader
-* @return   M4NO_ERROR              there is no error
-* @return   M4ERR_PARAMETER         the context is NULL
-* @return   M4ERR_BAD_CONTEXT       provided context is not a valid one
-************************************************************************
-*/
-M4OSA_ERR VideoEditor3gpReader_close(M4OSA_Context context) {
-    VideoEditor3gpReader_Context *pC = (VideoEditor3gpReader_Context*)context;
-    M4READER_AudioSbrUserdata *pAudioSbrUserData;
-    M4_AccessUnit *pAU;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    ALOGV("VideoEditor3gpReader_close begin");
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_close: invalid context pointer");
-
-    if (pC->mAudioStreamHandler) {
-        ALOGV("VideoEditor3gpReader_close Audio");
-
-        if (M4OSA_NULL != pC->mAudioStreamHandler->m_pDecoderSpecificInfo) {
-            free(pC->mAudioStreamHandler->\
-                m_pDecoderSpecificInfo);
-            pC->mAudioStreamHandler->m_decoderSpecificInfoSize = 0;
-            pC->mAudioStreamHandler->m_pDecoderSpecificInfo = M4OSA_NULL;
-        }
-
-        if ((M4DA_StreamTypeAudioAac == pC->mAudioStreamHandler->m_streamType)
-            && (M4OSA_NULL != pC->mAudioStreamHandler->m_pUserData)) {
-            pAudioSbrUserData = (M4READER_AudioSbrUserdata*)(\
-                pC->mAudioStreamHandler->m_pUserData);
-
-            pAU = (M4_AccessUnit*)pAudioSbrUserData->m_pFirstAU;
-            if (M4OSA_NULL != pAU) {
-                free(pAU);
-            }
-
-            if (M4OSA_NULL != pAudioSbrUserData->m_pAacDecoderUserConfig) {
-                free(pAudioSbrUserData->\
-                    m_pAacDecoderUserConfig);
-            }
-            free(pAudioSbrUserData);
-            pC->mAudioStreamHandler->m_pUserData = M4OSA_NULL;
-        }
-
-        if (pC->mAudioStreamHandler->m_pESDSInfo != M4OSA_NULL) {
-            free(pC->mAudioStreamHandler->m_pESDSInfo);
-            pC->mAudioStreamHandler->m_pESDSInfo = M4OSA_NULL;
-            pC->mAudioStreamHandler->m_ESDSInfoSize = 0;
-        }
-        /* Finally destroy the stream handler */
-        free(pC->mAudioStreamHandler);
-        pC->mAudioStreamHandler = M4OSA_NULL;
-
-        pC->mAudioSource->stop();
-        pC->mAudioSource.clear();
-    }
-    if (pC->mVideoStreamHandler) {
-        ALOGV("VideoEditor3gpReader_close Video ");
-
-        if(M4OSA_NULL != pC->mVideoStreamHandler->m_pDecoderSpecificInfo) {
-            free(pC->mVideoStreamHandler->\
-                m_pDecoderSpecificInfo);
-            pC->mVideoStreamHandler->m_decoderSpecificInfoSize = 0;
-            pC->mVideoStreamHandler->m_pDecoderSpecificInfo = M4OSA_NULL;
-        }
-
-        if(M4OSA_NULL != pC->mVideoStreamHandler->m_pH264DecoderSpecificInfo) {
-            free(pC->mVideoStreamHandler->\
-                m_pH264DecoderSpecificInfo);
-            pC->mVideoStreamHandler->m_H264decoderSpecificInfoSize = 0;
-            pC->mVideoStreamHandler->m_pH264DecoderSpecificInfo = M4OSA_NULL;
-        }
-
-        if(pC->mVideoStreamHandler->m_pESDSInfo != M4OSA_NULL) {
-            free(pC->mVideoStreamHandler->m_pESDSInfo);
-            pC->mVideoStreamHandler->m_pESDSInfo = M4OSA_NULL;
-            pC->mVideoStreamHandler->m_ESDSInfoSize = 0;
-        }
-
-        /* Finally destroy the stream handler */
-        free(pC->mVideoStreamHandler);
-        pC->mVideoStreamHandler = M4OSA_NULL;
-
-        pC->mVideoSource->stop();
-        pC->mVideoSource.clear();
-    }
-    pC->mExtractor.clear();
-    pC->mDataSource.clear();
-
-    ALOGV("VideoEditor3gpReader_close end");
-    return err;
-}
-
-/**
-************************************************************************
-* @brief    get an option from the 3gp reader
-* @note     it allows the caller to retrieve a property value:
-*
-* @param    context:        (IN)    Context of the reader
-* @param    optionId:       (IN)    indicates the option to get
-* @param    pValue:         (OUT)   pointer to structure or value (allocated
-*                                   by user) where option is stored
-*
-* @return   M4NO_ERROR              there is no error
-* @return   M4ERR_BAD_CONTEXT       provided context is not a valid one
-* @return   M4ERR_PARAMETER         at least one parameter is not properly set
-* @return   M4ERR_BAD_OPTION_ID     when the option ID is not a valid one
-* @return   M4ERR_VIDEO_NOT_H263    No video stream H263 in file.
-* @return   M4ERR_NO_VIDEO_STREAM_RETRIEVED_YET
-*           Function 3gpReader_getNextStreamHandler must be called before
-************************************************************************
-*/
-M4OSA_ERR VideoEditor3gpReader_getOption(M4OSA_Context context,
-        M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
-    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    ALOGV("VideoEditor3gpReader_getOption begin %d", optionId);
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_getOption: invalid pointer on value");
-
-    switch (optionId) {
-    case M4READER_kOptionID_Duration:
-        {
-            ALOGV("VideoEditor3gpReader_getOption duration %d",pC->mMaxDuration);
-            *(M4OSA_Time*)pValue = pC->mMaxDuration;
-        }
-        break;
-    case M4READER_kOptionID_Version:
-        /* not used */
-        ALOGV("VideoEditor3gpReader_getOption: M4READER_kOptionID_Version");
-        break;
-
-    case M4READER_kOptionID_Copyright:
-        /* not used */
-        ALOGV(">>>>>>>   M4READER_kOptionID_Copyright");
-        break;
-
-    case M4READER_kOptionID_CreationTime:
-        /* not used */
-        ALOGV("VideoEditor3gpReader_getOption M4READER_kOptionID_CreationTime");
-    break;
-
-    case M4READER_kOptionID_Bitrate:
-        {
-            M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
-
-            if (pC->mMaxDuration != 0) {
-                M4OSA_UInt32 ui32Tmp = (M4OSA_UInt32)pC->mMaxDuration;
-                *pBitrate = (M4OSA_UInt32)(pC->mFileSize * 8000.0 / pC->mMaxDuration);
-            }
-            ALOGV("VideoEditor3gpReader_getOption bitrate %ld", *pBitrate);
-        }
-    break;
-    case M4READER_3GP_kOptionID_H263Properties:
-        {
-            if(M4OSA_NULL == pC->mVideoStreamHandler) {
-                ALOGV("VideoEditor3gpReader_getOption no videoStream retrieved");
-
-                err = M4ERR_NO_VIDEO_STREAM_RETRIEVED_YET;
-                break;
-            }
-            if((M4DA_StreamTypeVideoH263 != pC->mVideoStreamHandler->\
-                m_streamType) || (pC->mVideoStreamHandler->\
-                m_decoderSpecificInfoSize < 7)) {
-                ALOGV("VideoEditor3gpReader_getOption DSI Size %d",
-                    pC->mVideoStreamHandler->m_decoderSpecificInfoSize);
-
-                err = M4ERR_VIDEO_NOT_H263;
-                break;
-            }
-
-            /* MAGICAL in the decoder confi H263: the 7th byte is the profile
-             * number, 6th byte is the level number */
-            ((M4READER_3GP_H263Properties *)pValue)->uiProfile =
-                pC->mVideoStreamHandler->m_pDecoderSpecificInfo[6];
-            ((M4READER_3GP_H263Properties *)pValue)->uiLevel =
-                pC->mVideoStreamHandler->m_pDecoderSpecificInfo[5];
-            ALOGV("VideoEditor3gpReader_getOption M4READER_3GP_kOptionID_\
-            H263Properties end");
-        }
-        break;
-    case M4READER_3GP_kOptionID_PurpleLabsDrm:
-        ALOGV("VideoEditor3gpReaderOption M4READER_3GP_kOptionID_PurpleLabsDrm");
-        /* not used */
-        break;
-
-    case M4READER_kOptionID_GetNumberOfAudioAu:
-        /* not used */
-        ALOGV("VideoEditor3gpReadeOption M4READER_kOptionID_GetNumberOfAudioAu");
-    break;
-
-    case M4READER_kOptionID_GetNumberOfVideoAu:
-        /* not used */
-        ALOGV("VideoEditor3gpReader_getOption :GetNumberOfVideoAu");
-    break;
-
-    case M4READER_kOptionID_GetMetadata:
-        /* not used */
-        ALOGV("VideoEditor3gpReader_getOption M4READER_kOptionID_GetMetadata");
-    break;
-
-    case M4READER_kOptionID_3gpFtypBox:
-        /* used only for SEMC */
-        ALOGV("VideoEditor3gpReader_getOption M4READER_kOptionID_3gpFtypBox");
-        err = M4ERR_BAD_OPTION_ID; //check this
-        break;
-
-#ifdef OPTIONID_GET_NEXT_VIDEO_CTS
-    case M4READER_3GP_kOptionID_getNextVideoCTS:
-        /* not used */
-        ALOGV("VideoEditor3gpReader_getOption: getNextVideoCTS");
-        break;
-#endif
-    default:
-        {
-            err = M4ERR_BAD_OPTION_ID;
-            ALOGV("VideoEditor3gpReader_getOption M4ERR_BAD_OPTION_ID");
-        }
-        break;
-    }
-    ALOGV("VideoEditor3gpReader_getOption end: optionID: x%x", optionId);
-    return err;
-}
-/**
-************************************************************************
-* @brief    set an option on the 3gp reader
-* @note No option can be set yet.
-* @param    context:        (IN)    Context of the reader
-* @param    optionId:       (IN)    indicates the option to set
-* @param    pValue:         (IN)    pointer to structure or value (allocated
-*                                   by user) where option is stored
-* @return   M4NO_ERROR              there is no error
-* @return   M4ERR_BAD_CONTEXT       provided context is not a valid one
-* @return   M4ERR_PARAMETER         at least one parameter is not properly set
-* @return   M4ERR_BAD_OPTION_ID     when the option ID is not a valid one
-************************************************************************
-*/
-M4OSA_ERR VideoEditor3gpReader_setOption(M4OSA_Context context,
-        M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
-    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    /* Check function parameters */
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
-        "invalid value pointer");
-
-    ALOGV("VideoEditor3gpReader_setOption begin %d",optionId);
-
-    switch(optionId) {
-        case M4READER_kOptionID_SetOsaFileReaderFctsPtr:
-        break;
-
-        case static_cast<M4OSA_OptionID>(M4READER_3GP_kOptionID_AudioOnly):
-        break;
-
-        case static_cast<M4OSA_OptionID>(M4READER_3GP_kOptionID_VideoOnly):
-        break;
-
-        case static_cast<M4OSA_OptionID>(M4READER_3GP_kOptionID_FastOpenMode):
-        break;
-
-        case static_cast<M4OSA_OptionID>(M4READER_kOptionID_MaxMetadataSize):
-        break;
-
-        default:
-        {
-            ALOGV("VideoEditor3gpReader_setOption: returns M4ERR_BAD_OPTION_ID");
-            err = M4ERR_BAD_OPTION_ID;
-        }
-        break;
-    }
-    ALOGV("VideoEditor3gpReader_setOption end ");
-    return err;
-}
-/**
- ************************************************************************
- * @brief   fill the access unit structure with initialization values
- * @param   context:        (IN)     Context of the reader
- * @param   pStreamHandler: (IN)     pointer to the stream handler to which
- *                                   the access unit will be associated
- * @param   pAccessUnit:    (IN/OUT) pointer to the access unit (allocated
- *                                   by the caller) to initialize
- * @return  M4NO_ERROR               there is no error
- * @return  M4ERR_PARAMETER          at least one parameter is not properly set
- ************************************************************************
-*/
-M4OSA_ERR VideoEditor3gpReader_fillAuStruct(M4OSA_Context context,
-        M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
-    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
-    M4OSA_ERR err= M4NO_ERROR;
-
-    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
-        "VideoEditor3gpReader_fillAuStruct: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_fillAuStruc invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
-        "VideoEditor3gpReader_fillAuStruct: invalid pointer to M4_AccessUnit");
-
-    ALOGV("VideoEditor3gpReader_fillAuStruct begin");
-
-    /* Initialize pAccessUnit structure */
-    pAccessUnit->m_size         = 0;
-    pAccessUnit->m_CTS          = 0;
-    pAccessUnit->m_DTS          = 0;
-    pAccessUnit->m_attribute    = 0;
-    pAccessUnit->m_dataAddress  = M4OSA_NULL;
-    pAccessUnit->m_maxsize      = pStreamHandler->m_maxAUSize;
-    pAccessUnit->m_streamID     = pStreamHandler->m_streamId;
-    pAccessUnit->m_structSize   = sizeof(M4_AccessUnit);
-
-    ALOGV("VideoEditor3gpReader_fillAuStruct end");
-    return M4NO_ERROR;
-}
-
-/**
-********************************************************************************
-* @brief    jump into the stream at the specified time
-* @note
-* @param    context:        (IN)   Context of the reader
-* @param    pStreamHandler  (IN)   the stream handler of the stream to make jump
-* @param    pTime           (I/O)IN  the time to jump to (in ms)
-*                                OUT the time to which the stream really jumped
-* @return   M4NO_ERROR             there is no error
-* @return   M4ERR_PARAMETER        at least one parameter is not properly set
-********************************************************************************
-*/
-M4OSA_ERR VideoEditor3gpReader_jump(M4OSA_Context context,
-        M4_StreamHandler *pStreamHandler, M4OSA_Int32* pTime) {
-    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4SYS_AccessUnit* pAu;
-    M4OSA_Time time64;
-
-    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_jump: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_jump: invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_jump: invalid time pointer");
-
-    ALOGV("VideoEditor3gpReader_jump begin");
-
-    if (*pTime == (pStreamHandler->m_duration)) {
-        *pTime -= 1;
-    }
-    time64 = (M4OSA_Time)*pTime;
-
-    ALOGV("VideoEditor3gpReader_jump time us %ld ", time64);
-
-    if ((pC->mAudioStreamHandler != M4OSA_NULL) &&
-            (pStreamHandler->m_streamId == pC->mAudioStreamHandler->m_streamId))
-            {
-        pAu = &pC->mAudioAu;
-        pAu->CTS = time64;
-        pAu->DTS = time64;
-
-        time64 = time64 * 1000; /* Convert the time into micro sec */
-        pC->mAudioSeeking = M4OSA_TRUE;
-        pC->mAudioSeekTime = time64;
-        ALOGV("VideoEditor3gpReader_jump AUDIO time us %ld ", time64);
-    } else if ((pC->mVideoStreamHandler != M4OSA_NULL) &&
-            (pStreamHandler->m_streamId == pC->mVideoStreamHandler->m_streamId))
-            {
-        pAu = &pC->mVideoAu;
-        pAu->CTS = time64;
-        pAu->DTS = time64;
-
-        time64 = time64 * 1000; /* Convert the time into micro sec */
-        pC->mVideoSeeking = M4OSA_TRUE;
-        pC->mVideoSeekTime = time64;
-        ALOGV("VideoEditor3gpReader_jump VIDEO time us %ld ", time64);
-    } else {
-        ALOGV("VideoEditor3gpReader_jump passed StreamHandler is not known\n");
-        return M4ERR_PARAMETER;
-    }
-    time64 = time64 / 1000; /* Convert the time into milli sec */
-    ALOGV("VideoEditor3gpReader_jump time ms before seekset %ld ", time64);
-
-    *pTime = (M4OSA_Int32)time64;
-
-    ALOGV("VideoEditor3gpReader_jump end");
-    err = M4NO_ERROR;
-    return err;
-}
-/**
-********************************************************************************
-* @brief    reset the stream, that is seek it to beginning and make it ready
-* @note
-* @param    context:        (IN)    Context of the reader
-* @param    pStreamHandler  (IN)    The stream handler of the stream to reset
-* @return   M4NO_ERROR              there is no error
-* @return   M4ERR_PARAMETER         at least one parameter is not properly set
-********************************************************************************
-*/
-M4OSA_ERR VideoEditor3gpReader_reset(M4OSA_Context context,
-        M4_StreamHandler *pStreamHandler) {
-    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4SYS_StreamID streamIdArray[2];
-    M4SYS_AccessUnit* pAu;
-    M4OSA_Time time64 = 0;
-
-    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_reset: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_reset: invalid pointer to M4_StreamHandler");
-
-    ALOGV("VideoEditor3gpReader_reset begin");
-
-    if (pStreamHandler == (M4_StreamHandler*)pC->mAudioStreamHandler) {
-        pAu = &pC->mAudioAu;
-    } else if (pStreamHandler == (M4_StreamHandler*)pC->mVideoStreamHandler) {
-        pAu = &pC->mVideoAu;
-    } else {
-        ALOGV("VideoEditor3gpReader_reset passed StreamHandler is not known\n");
-        return M4ERR_PARAMETER;
-    }
-
-    pAu->CTS = time64;
-    pAu->DTS = time64;
-
-    ALOGV("VideoEditor3gpReader_reset end");
-    return err;
-}
-
-/**
-********************************************************************************
-* @brief  Gets an access unit (AU) from the stream handler source.
-* @note   An AU is the smallest possible amount of data to be decoded by decoder
-*
-* @param    context:        (IN) Context of the reader
-* @param    pStreamHandler  (IN) The stream handler of the stream to make jump
-* @param    pAccessUnit     (IO) Pointer to access unit to fill with read data
-* @return   M4NO_ERROR           there is no error
-* @return   M4ERR_PARAMETER      at least one parameter is not properly set
-* @returns  M4ERR_ALLOC          memory allocation failed
-* @returns  M4WAR_NO_MORE_AU     there are no more access unit in the stream
-********************************************************************************
-*/
-M4OSA_ERR VideoEditor3gpReader_getNextAu(M4OSA_Context context,
-        M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
-    VideoEditor3gpReader_Context* pC=(VideoEditor3gpReader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4SYS_AccessUnit* pAu;
-    int64_t tempTime64 = 0;
-    MediaBuffer *mMediaBuffer = NULL;
-    MediaSource::ReadOptions options;
-    M4OSA_Bool flag = M4OSA_FALSE;
-    status_t error;
-    int32_t i32Tmp = 0;
-
-    M4OSA_DEBUG_IF1(( pC== 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_getNextAu: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_getNextAu: invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
-        "VideoEditor3gpReader_getNextAu: invalid pointer to M4_AccessUnit");
-
-    ALOGV("VideoEditor3gpReader_getNextAu begin");
-
-    if (pStreamHandler == (M4_StreamHandler*)pC->mAudioStreamHandler) {
-        ALOGV("VideoEditor3gpReader_getNextAu audio stream");
-        pAu = &pC->mAudioAu;
-        if (pC->mAudioSeeking == M4OSA_TRUE) {
-            ALOGV("VideoEditor3gpReader_getNextAu audio seek time: %ld",
-                pC->mAudioSeekTime);
-            options.setSeekTo(pC->mAudioSeekTime);
-            pC->mAudioSource->read(&mMediaBuffer, &options);
-
-            mMediaBuffer->meta_data()->findInt64(kKeyTime,
-                (int64_t*)&tempTime64);
-            options.clearSeekTo();
-            pC->mAudioSeeking = M4OSA_FALSE;
-            flag = M4OSA_TRUE;
-        } else {
-            ALOGV("VideoEditor3gpReader_getNextAu audio no seek:");
-            pC->mAudioSource->read(&mMediaBuffer, &options);
-            if (mMediaBuffer != NULL) {
-                mMediaBuffer->meta_data()->findInt64(kKeyTime,
-                    (int64_t*)&tempTime64);
-            }
-        }
-    } else if (pStreamHandler == (M4_StreamHandler*)pC->mVideoStreamHandler) {
-        ALOGV("VideoEditor3gpReader_getNextAu video steram ");
-        pAu = &pC->mVideoAu;
-        if(pC->mVideoSeeking == M4OSA_TRUE) {
-            flag = M4OSA_TRUE;
-            ALOGV("VideoEditor3gpReader_getNextAu seek: %ld",pC->mVideoSeekTime);
-            options.setSeekTo(pC->mVideoSeekTime,
-                MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-            do
-            {
-                if (mMediaBuffer != NULL) {
-                    ALOGV("VideoEditor3gpReader_getNextAu free the MediaBuffer");
-                    mMediaBuffer->release();
-                }
-                error = pC->mVideoSource->read(&mMediaBuffer, &options);
-                ALOGV("VE3gpReader_getNextAu MediaBuffer %x , error %d",
-                    mMediaBuffer, error);
-                if (mMediaBuffer != NULL)
-                {
-                    if (mMediaBuffer->meta_data()->findInt32(kKeyIsSyncFrame,
-                        &i32Tmp) && i32Tmp) {
-                            ALOGV("SYNC FRAME FOUND--%d", i32Tmp);
-                        pAu->attribute = AU_RAP;
-                    }
-                    else {
-                        pAu->attribute = AU_P_Frame;
-                    }
-                    mMediaBuffer->meta_data()->findInt64(kKeyTime,
-                        (int64_t*)&tempTime64);
-                } else {
-                    break;
-                }
-                options.clearSeekTo();
-            } while(tempTime64 < pC->mVideoSeekTime);
-
-            ALOGV("VE3gpReader_getNextAu: video  time with seek  = %lld:",
-                tempTime64);
-            pC->mVideoSeeking = M4OSA_FALSE;
-        } else {
-            ALOGV("VideoEditor3gpReader_getNextAu video no seek:");
-            pC->mVideoSource->read(&mMediaBuffer, &options);
-
-            if(mMediaBuffer != NULL) {
-                if (mMediaBuffer->meta_data()->findInt32(kKeyIsSyncFrame,
-                    &i32Tmp) && i32Tmp) {
-                    ALOGV("SYNC FRAME FOUND--%d", i32Tmp);
-                    pAu->attribute = AU_RAP;
-                }
-                else {
-                    pAu->attribute = AU_P_Frame;
-                }
-                mMediaBuffer->meta_data()->findInt64(kKeyTime,
-                    (int64_t*)&tempTime64);
-                ALOGV("VE3gpReader_getNextAu: video no seek time = %lld:",
-                    tempTime64);
-            }else {
-                ALOGV("VE3gpReader_getNextAu:video no seek time buffer is NULL");
-            }
-        }
-    } else {
-        ALOGV("VideoEditor3gpReader_getNextAu M4ERR_PARAMETER");
-        return M4ERR_PARAMETER;
-    }
-
-    if (mMediaBuffer != NULL) {
-        if( (pAu->dataAddress == NULL) ||  (pAu->size < \
-            mMediaBuffer->range_length())) {
-            if(pAu->dataAddress != NULL) {
-                free((M4OSA_Int32*)pAu->dataAddress);
-                pAu->dataAddress = NULL;
-            }
-            ALOGV("Buffer lenght = %d ,%d",(mMediaBuffer->range_length() +\
-                3) & ~0x3,(mMediaBuffer->range_length()));
-
-            pAu->dataAddress = (M4OSA_Int32*)M4OSA_32bitAlignedMalloc(
-                (mMediaBuffer->range_length() + 3) & ~0x3,M4READER_3GP,
-                    (M4OSA_Char*)"pAccessUnit->m_dataAddress" );
-            if(pAu->dataAddress == NULL) {
-                ALOGV("VideoEditor3gpReader_getNextAu malloc failed");
-                return M4ERR_ALLOC;
-            }
-        }
-        pAu->size = mMediaBuffer->range_length();
-
-        memcpy((void *)pAu->dataAddress,
-            (void *)((const char *)mMediaBuffer->data() + mMediaBuffer->range_offset()),
-            mMediaBuffer->range_length());
-
-        if( (pStreamHandler == (M4_StreamHandler*)pC->mVideoStreamHandler)  &&
-            (pStreamHandler->m_streamType == M4DA_StreamTypeVideoMpeg4Avc) ) {
-            M4OSA_UInt32 size = mMediaBuffer->range_length();
-            M4OSA_UInt8 *lbuffer;
-
-            lbuffer = (M4OSA_UInt8 *) pAu->dataAddress;
-            ALOGV("pAccessUnit->m_dataAddress size = %x",size);
-
-            lbuffer[0] = (size >> 24) & 0xFF;
-            lbuffer[1] = (size >> 16) & 0xFF;
-            lbuffer[2] = (size >> 8) & 0xFF;
-            lbuffer[3] = (size) & 0xFF;
-        }
-
-        pAu->CTS = tempTime64;
-
-        pAu->CTS = pAu->CTS / 1000; //converting the microsec to millisec
-        ALOGV("VideoEditor3gpReader_getNextAu CTS = %ld",pAu->CTS);
-
-        pAu->DTS  = pAu->CTS;
-        if (pStreamHandler == (M4_StreamHandler*)pC->mAudioStreamHandler) {
-            pAu->attribute = M4SYS_kFragAttrOk;
-        }
-        mMediaBuffer->release();
-
-        pAccessUnit->m_dataAddress = (M4OSA_Int8*) pAu->dataAddress;
-        pAccessUnit->m_size = pAu->size;
-        pAccessUnit->m_maxsize = pAu->size;
-        pAccessUnit->m_CTS = pAu->CTS;
-        pAccessUnit->m_DTS = pAu->DTS;
-        pAccessUnit->m_attribute = pAu->attribute;
-
-    } else {
-        ALOGV("VideoEditor3gpReader_getNextAu: M4WAR_NO_MORE_AU (EOS) reached");
-        pAccessUnit->m_size = 0;
-        err = M4WAR_NO_MORE_AU;
-    }
-    options.clearSeekTo();
-
-    pAu->nbFrag = 0;
-    mMediaBuffer = NULL;
-    ALOGV("VideoEditor3gpReader_getNextAu end ");
-
-    return err;
-}
-/**
- *******************************************************************************
- * @brief   Split the AVC DSI in its different components and write it in
- *          ONE memory buffer
- * @note
- * @param   pStreamHandler:         (IN/OUT) The MPEG4-AVC stream
- * @param   pDecoderConfigLocal:    (IN) The DSI buffer
- * @param   decoderConfigSizeLocal: (IN) The DSI buffer size
- * @return  M4NO_ERROR              there is no error
- * @return  ERR_FILE_SYNTAX_ERROR   pDecoderConfigLocal is NULL
- *******************************************************************************
-*/
-static M4OSA_ERR VideoEditor3gpReader_AnalyseAvcDsi(
-        M4_StreamHandler *pStreamHandler, M4OSA_Int32* pDecoderConfigLocal,
-        M4OSA_Int32 decoderConfigSizeLocal) {
-    struct _avcSpecificInfo *pAvcSpecInfo = M4OSA_NULL;
-    M4OSA_UInt32 uiSpecInfoSize;
-    M4OSA_Context pBitParserContext = M4OSA_NULL;
-    M4OSA_MemAddr8 pPos;
-
-    /**
-     * First parsing to get the total allocation size (we must not do
-     * multiple malloc, but only one instead) */
-    {
-        M4OSA_Int32 val;
-        M4OSA_UInt32 i,j;
-        M4OSA_UInt8 nalUnitLength;
-        M4OSA_UInt8  numOfSequenceParameterSets;
-        M4OSA_UInt32 uiTotalSizeOfSPS = 0;
-        M4OSA_UInt8  numOfPictureParameterSets;
-        M4OSA_UInt32 uiTotalSizeOfPPS = 0;
-        M4OSA_UInt32 uiSize;
-        struct _avcSpecificInfo avcSpIf;
-
-        avcSpIf.m_nalUnitLength = 0;
-
-        if (M4OSA_NULL == pDecoderConfigLocal) {
-            return M4ERR_READER3GP_DECODER_CONFIG_ERROR;
-        }
-
-        VideoEditor3gpReader_MPEG4BitStreamParserInit(&pBitParserContext,
-            pDecoderConfigLocal, decoderConfigSizeLocal);
-
-        if (M4OSA_NULL == pBitParserContext) {
-            return M4ERR_ALLOC;
-        }
-
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-                                       /* 8 bits -- configuration version */
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-                                       /* 8 bits -- avc profile indication*/
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-                                       /* 8 bits -- profile compatibility */
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-                                       /* 8 bits -- avc level indication*/
-        val=VideoEditor3gpReader_BitStreamParserShowBits(pBitParserContext, 8);
-                       /* 6 bits reserved 111111b 2 bits length Size minus one*/
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-                                       /* m_nalUnitLength */
-
-        nalUnitLength = (M4OSA_UInt8)((val & 0x03) + 1);/*0b11111100*/
-        if (nalUnitLength > 4) {
-            pStreamHandler->m_decoderSpecificInfoSize = 0;
-            pStreamHandler->m_pDecoderSpecificInfo = M4OSA_NULL;
-            VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
-        } else {
-            /**
-             * SPS table */
-            val=VideoEditor3gpReader_BitStreamParserShowBits(pBitParserContext,
-            8);/* 3 bits-reserved 111b-5 bits number of sequence parameter set*/
-            numOfSequenceParameterSets = val & 0x1F;
-            /*1F instead of E0*/ /*0b11100000*/ /*Number of seq parameter sets*/
-            VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-            for (i=0; i < numOfSequenceParameterSets; i++) {
-                /**
-                 * Get the size of this element */
-                uiSize =
-                    (M4OSA_UInt32)VideoEditor3gpReader_BitStreamParserShowBits(
-                    pBitParserContext, 16);
-                uiTotalSizeOfSPS += uiSize;
-                VideoEditor3gpReader_BitStreamParserFlushBits(
-                    pBitParserContext, 16);
-                /**
-                 *Read the element(dont keep it, we only want size right now) */
-                for (j=0; j<uiSize; j++) {
-                    VideoEditor3gpReader_BitStreamParserFlushBits(
-                        pBitParserContext, 8);
-                }
-            }
-
-            /**
-             * SPS table */
-            numOfPictureParameterSets=(M4OSA_UInt8)\
-                VideoEditor3gpReader_BitStreamParserShowBits(pBitParserContext,
-                    8);
-            VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-            for (i=0; i < numOfPictureParameterSets; i++) {
-                /**
-                 * Get the size of this element */
-                uiSize = (M4OSA_UInt32)
-                    VideoEditor3gpReader_BitStreamParserShowBits(
-                    pBitParserContext, 16);
-                uiTotalSizeOfPPS += uiSize;
-                VideoEditor3gpReader_BitStreamParserFlushBits(
-                    pBitParserContext, 16);
-                /**
-                 *Read the element(dont keep it,we only want size right now)*/
-                for (j=0; j<uiSize; j++) {
-                    VideoEditor3gpReader_BitStreamParserFlushBits(
-                        pBitParserContext, 8);
-                }
-            }
-
-            /**
-             * Compute the size of the full buffer */
-            uiSpecInfoSize = sizeof(struct _avcSpecificInfo) +
-                     numOfSequenceParameterSets * sizeof(struct _parameterSet)
-                     + /**< size of the table of SPS elements */
-                     numOfPictureParameterSets  * sizeof(struct _parameterSet)
-                     + /**< size of the table of PPS elements */
-                     uiTotalSizeOfSPS +
-                     uiTotalSizeOfPPS;
-            /**
-             * Allocate the buffer */
-            pAvcSpecInfo =(struct _avcSpecificInfo*)M4OSA_32bitAlignedMalloc(uiSpecInfoSize,
-                M4READER_3GP, (M4OSA_Char*)"MPEG-4 AVC DecoderSpecific");
-            if (M4OSA_NULL == pAvcSpecInfo) {
-                VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
-                return M4ERR_ALLOC;
-            }
-
-            /**
-             * Set the pointers to the correct part of the buffer */
-            pAvcSpecInfo->m_nalUnitLength = nalUnitLength;
-            pAvcSpecInfo->m_numOfSequenceParameterSets =
-                numOfSequenceParameterSets;
-            pAvcSpecInfo->m_numOfPictureParameterSets  =
-                numOfPictureParameterSets;
-
-            /* We place the SPS param sets table after m_pPictureParameterSet */
-            pAvcSpecInfo->m_pSequenceParameterSet= (struct _parameterSet*)(
-                (M4OSA_MemAddr8)(&pAvcSpecInfo->m_pPictureParameterSet) +
-                sizeof(pAvcSpecInfo->m_pPictureParameterSet));
-            /*We place the PPS param sets table after the SPS param sets table*/
-            pAvcSpecInfo->m_pPictureParameterSet = (struct _parameterSet*)(
-                (M4OSA_MemAddr8)(pAvcSpecInfo->m_pSequenceParameterSet) +
-                (numOfSequenceParameterSets * sizeof(struct _parameterSet)));
-            /**< The data will be placed after the PPS param sets table */
-            pPos = (M4OSA_MemAddr8)pAvcSpecInfo->m_pPictureParameterSet +
-                (numOfPictureParameterSets * sizeof(struct _parameterSet));
-
-            /**
-             * reset the bit parser */
-            VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
-        }
-    }
-
-    /**
-     * Second parsing to copy the data */
-    if (M4OSA_NULL != pAvcSpecInfo) {
-        M4OSA_Int32 i,j;
-
-        VideoEditor3gpReader_MPEG4BitStreamParserInit(&pBitParserContext,
-            pDecoderConfigLocal, decoderConfigSizeLocal);
-
-        if (M4OSA_NULL == pBitParserContext) {
-            free(pAvcSpecInfo);
-            return M4ERR_ALLOC;
-        }
-
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-            /* 8 bits -- configuration version */
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-            /* 8 bits -- avc profile indication*/
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-            /* 8 bits -- profile compatibility */
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-            /* 8 bits -- avc level indication*/
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-            /* m_nalUnitLength */
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-        /* 3 bits -- reserved 111b -- 5 bits number of sequence parameter set*/
-
-        for (i=0; i < pAvcSpecInfo->m_numOfSequenceParameterSets; i++) {
-            pAvcSpecInfo->m_pSequenceParameterSet[i].m_length =
-                (M4OSA_UInt16)VideoEditor3gpReader_BitStreamParserShowBits(
-                pBitParserContext, 16);
-            VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext,16);
-
-            pAvcSpecInfo->m_pSequenceParameterSet[i].m_pParameterSetUnit =
-                (M4OSA_UInt8*)pPos;  /**< current position in the buffer */
-            pPos += pAvcSpecInfo->m_pSequenceParameterSet[i].m_length;
-                /**< increment the position in the buffer */
-            for (j=0; j<pAvcSpecInfo->m_pSequenceParameterSet[i].m_length;j++){
-                pAvcSpecInfo->m_pSequenceParameterSet[i].m_pParameterSetUnit[j]=
-                    (M4OSA_UInt8)VideoEditor3gpReader_BitStreamParserShowBits(
-                    pBitParserContext, 8);
-                VideoEditor3gpReader_BitStreamParserFlushBits(
-                    pBitParserContext, 8);
-            }
-        }
-
-        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
-            /* number of pîcture parameter set*/
-
-        for (i=0; i < pAvcSpecInfo->m_numOfPictureParameterSets; i++) {
-            pAvcSpecInfo->m_pPictureParameterSet[i].m_length =
-                (M4OSA_UInt16)VideoEditor3gpReader_BitStreamParserShowBits(
-                pBitParserContext, 16);
-            VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext,16);
-
-            pAvcSpecInfo->m_pPictureParameterSet[i].m_pParameterSetUnit =
-                (M4OSA_UInt8*)pPos;   /**< current position in the buffer */
-            pPos += pAvcSpecInfo->m_pPictureParameterSet[i].m_length;
-                /**< increment the position in the buffer */
-            for (j=0; j<pAvcSpecInfo->m_pPictureParameterSet[i].m_length; j++) {
-                pAvcSpecInfo->m_pPictureParameterSet[i].m_pParameterSetUnit[j] =
-                    (M4OSA_UInt8)VideoEditor3gpReader_BitStreamParserShowBits(
-                    pBitParserContext, 8);
-                VideoEditor3gpReader_BitStreamParserFlushBits(
-                    pBitParserContext, 8);
-            }
-        }
-        VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
-        pStreamHandler->m_decoderSpecificInfoSize = uiSpecInfoSize;
-        pStreamHandler->m_pDecoderSpecificInfo = (M4OSA_UInt8*)pAvcSpecInfo;
-    }
-    pStreamHandler->m_H264decoderSpecificInfoSize  =  decoderConfigSizeLocal;
-    pStreamHandler->m_pH264DecoderSpecificInfo  = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
-        decoderConfigSizeLocal, M4READER_3GP,
-        (M4OSA_Char*)"MPEG-4 AVC DecoderSpecific");
-    if (M4OSA_NULL == pStreamHandler->m_pH264DecoderSpecificInfo) {
-        goto cleanup;
-    }
-
-    memcpy((void * ) pStreamHandler->m_pH264DecoderSpecificInfo,
-        (void * )pDecoderConfigLocal,
-        pStreamHandler->m_H264decoderSpecificInfoSize);
-    return M4NO_ERROR;
-cleanup:
-    VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
-    return M4ERR_READER3GP_DECODER_CONFIG_ERROR;
-}
-/**
-********************************************************************************
-* @brief    Get the next stream found in the 3gp file
-* @note
-* @param    context:     (IN)    Context of the reader
-* @param    pMediaFamily: OUT)   pointer to a user allocated
-*                                M4READER_MediaFamily that will be filled
-*                                with the media family of the found stream
-* @param    pStreamHandler:(OUT) pointer to StreamHandler that will be allocated
-*                                and filled with the found stream description
-* @return   M4NO_ERROR              there is no error
-* @return   M4ERR_BAD_CONTEXT       provided context is not a valid one
-* @return   M4ERR_PARAMETER         at least one parameter is not properly set
-* @return   M4WAR_NO_MORE_STREAM    no more available stream in the media
-********************************************************************************
-*/
-M4OSA_ERR VideoEditor3gpReader_getNextStreamHandler(M4OSA_Context context,
-        M4READER_MediaFamily *pMediaFamily,
-        M4_StreamHandler **pStreamHandler) {
-    VideoEditor3gpReader_Context* pC=(VideoEditor3gpReader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4SYS_StreamID streamIdArray[2];
-    M4SYS_StreamDescription streamDesc;
-    M4_AudioStreamHandler* pAudioStreamHandler;
-    M4_VideoStreamHandler* pVideoStreamHandler;
-    M4OSA_Int8 *DecoderSpecificInfo = M4OSA_NULL;
-    M4OSA_Int32 decoderSpecificInfoSize =0, maxAUSize = 0;
-
-    M4_StreamType streamType = M4DA_StreamTypeUnknown;
-    M4OSA_UInt8 temp, i, trackCount;
-    M4OSA_Bool haveAudio = M4OSA_FALSE;
-    M4OSA_Bool haveVideo = M4OSA_FALSE;
-    sp<MetaData> meta  = NULL;
-    int64_t Duration = 0;
-    M4OSA_UInt8* DecoderSpecific = M4OSA_NULL ;
-    uint32_t type;
-    const void *data;
-    size_t size;
-    const void *codec_specific_data;
-    size_t codec_specific_data_size;
-    M4OSA_Int32  ptempTime;
-    M4OSA_Int32  avgFPS=0;
-
-    ALOGV("VideoEditor3gpReader_getNextStreamHandler begin");
-
-    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_getNextStreamHandler: invalid context");
-    M4OSA_DEBUG_IF1((pMediaFamily   == 0), M4ERR_PARAMETER,
-        "getNextStreamHandler: invalid pointer to MediaFamily");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-        "getNextStreamHandler: invalid pointer to StreamHandler");
-
-    trackCount = pC->mExtractor->countTracks();
-    temp = pC->mCurrTrack;
-
-    if(temp >= trackCount) {
-        ALOGV("VideoEditor3gpReader_getNextStreamHandler error = %d",
-            M4WAR_NO_MORE_STREAM);
-        return (M4WAR_NO_MORE_STREAM);
-    } else {
-        const char *mime;
-        meta = pC->mExtractor->getTrackMetaData(temp);
-        CHECK(meta->findCString(kKeyMIMEType, &mime));
-
-        if (!haveVideo && !strncasecmp(mime, "video/", 6)) {
-            pC->mVideoSource = pC->mExtractor->getTrack(temp);
-            pC->mVideoSource->start();
-
-            *pMediaFamily = M4READER_kMediaFamilyVideo;
-            haveVideo = true;
-            ALOGV("VideoEditor3gpReader_getNextStreamHandler getTrack called");
-            if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
-                streamType = M4DA_StreamTypeVideoMpeg4Avc;
-            } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_H263)) {
-                streamType = M4DA_StreamTypeVideoH263;
-            } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)) {
-                streamType = M4DA_StreamTypeVideoMpeg4;
-            } else {
-                ALOGV("VideoEditor3gpReaderGetNextStreamHandler streamTypeNONE");
-            }
-            ALOGV("VideoEditor3gpReader_getNextStreamHandler: stream type: %d ",
-                streamType);
-
-            if(streamType != M4DA_StreamTypeUnknown) {
-                pC->mStreamType = streamType;
-                pC->mStreamId = pC->mCurrTrack;
-
-                pVideoStreamHandler = (M4_VideoStreamHandler*)M4OSA_32bitAlignedMalloc
-                    (sizeof(M4_VideoStreamHandler), M4READER_3GP,
-                    (M4OSA_Char*)"M4_VideoStreamHandler");
-                if (M4OSA_NULL == pVideoStreamHandler) {
-                    return M4ERR_ALLOC;
-                }
-                pVideoStreamHandler->m_structSize=sizeof(M4_VideoStreamHandler);
-
-                meta->findInt32(kKeyWidth,
-                    (int32_t*)&(pVideoStreamHandler->m_videoWidth));
-                meta->findInt32(kKeyHeight,
-                    (int32_t*)&(pVideoStreamHandler->m_videoHeight));
-
-                (*pStreamHandler)  = (M4_StreamHandler*)(pVideoStreamHandler);
-                meta->findInt64(kKeyDuration, (int64_t*)&(Duration));
-                ((*pStreamHandler)->m_duration) = (int32_t)((Duration)/1000); // conversion to mS
-                pC->mMaxDuration = ((*pStreamHandler)->m_duration);
-                if (pC->mMaxDuration == 0) {
-                    ALOGE("Video is too short: %lld Us", Duration);
-                    delete pVideoStreamHandler;
-                    pVideoStreamHandler = NULL;
-                    return M4ERR_PARAMETER;
-                }
-                ALOGV("VideoEditor3gpReader_getNextStreamHandler m_duration %d",
-                    (*pStreamHandler)->m_duration);
-
-                off64_t fileSize = 0;
-                pC->mDataSource->getSize(&fileSize);
-                pC->mFileSize  = fileSize;
-
-                ALOGV("VideoEditor3gpReader_getNextStreamHandler m_fileSize %d",
-                    pC->mFileSize);
-
-                meta->findInt32(kKeyMaxInputSize, (int32_t*)&(maxAUSize));
-                if(maxAUSize == 0) {
-                    maxAUSize = 70000;
-                }
-                (*pStreamHandler)->m_maxAUSize = maxAUSize;
-                ALOGV("<<<<<<<<<<   video: mMaxAUSize from MP4 extractor: %d",
-                    (*pStreamHandler)->m_maxAUSize);
-
-                ((M4_StreamHandler*)pVideoStreamHandler)->m_averageBitRate =
-                        (pC->mFileSize * 8000)/pC->mMaxDuration;
-                ALOGV("VideoEditor3gpReader_getNextStreamHandler m_averageBitrate %d",
-                    ((M4_StreamHandler*)pVideoStreamHandler)->m_averageBitRate);
-
-
-                meta->findInt32(kKeyFrameRate,
-                    (int32_t*)&(avgFPS));
-                ALOGV("<<<<<<<<<<   video: Average FPS from MP4 extractor: %d",
-                    avgFPS);
-
-                pVideoStreamHandler->m_averageFrameRate =(M4OSA_Float) avgFPS;
-                ALOGV("<<<<<<<<<<   video: Average FPS from MP4 extractor in FLOAT: %f",
-                    pVideoStreamHandler->m_averageFrameRate);
-
-                // Get the video rotation degree
-                int32_t rotationDegree;
-                if(!meta->findInt32(kKeyRotation, &rotationDegree)) {
-                    rotationDegree = 0;
-                }
-                pVideoStreamHandler->videoRotationDegrees = rotationDegree;
-
-                pC->mVideoStreamHandler =
-                    (M4_StreamHandler*)(pVideoStreamHandler);
-
-                /* Get the DSI info */
-                if(M4DA_StreamTypeVideoH263 == streamType) {
-                    if (meta->findData(kKeyD263, &type, &data, &size)) {
-                        (*pStreamHandler)->m_decoderSpecificInfoSize = size;
-                        if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
-                            DecoderSpecific = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
-                                (*pStreamHandler)->m_decoderSpecificInfoSize,
-                                M4READER_3GP,(M4OSA_Char*)"H263 DSI");
-                            if (M4OSA_NULL == DecoderSpecific) {
-                                return M4ERR_ALLOC;
-                            }
-                            memcpy((void *)DecoderSpecific,
-                                (void *)data, size);
-                            (*pStreamHandler)->m_pDecoderSpecificInfo =
-                                DecoderSpecific;
-                        }
-                        else {
-                            (*pStreamHandler)->m_pDecoderSpecificInfo =
-                                M4OSA_NULL;
-                            (*pStreamHandler)->m_decoderSpecificInfoSize = 0;
-                        }
-                        (*pStreamHandler)->m_pESDSInfo = M4OSA_NULL;
-                        (*pStreamHandler)->m_ESDSInfoSize = 0;
-                        (*pStreamHandler)->m_pH264DecoderSpecificInfo = M4OSA_NULL;
-                        (*pStreamHandler)->m_H264decoderSpecificInfoSize = 0;
-                    } else {
-                        ALOGV("VE_getNextStreamHandler: H263 dsi not found");
-                        (*pStreamHandler)->m_pDecoderSpecificInfo = M4OSA_NULL;
-                        (*pStreamHandler)->m_decoderSpecificInfoSize = 0;
-                        (*pStreamHandler)->m_H264decoderSpecificInfoSize = 0;
-                        (*pStreamHandler)->m_pH264DecoderSpecificInfo =
-                            M4OSA_NULL;
-                        (*pStreamHandler)->m_pESDSInfo = M4OSA_NULL;
-                        (*pStreamHandler)->m_ESDSInfoSize = 0;
-                    }
-                }
-                else if(M4DA_StreamTypeVideoMpeg4Avc == streamType) {
-                    if(meta->findData(kKeyAVCC, &type, &data, &size)) {
-                        decoderSpecificInfoSize = size;
-                        if (decoderSpecificInfoSize != 0) {
-                            DecoderSpecificInfo = (M4OSA_Int8*)M4OSA_32bitAlignedMalloc(
-                                decoderSpecificInfoSize, M4READER_3GP,
-                                (M4OSA_Char*)"H264 DecoderSpecific" );
-                            if (M4OSA_NULL == DecoderSpecificInfo) {
-                                ALOGV("VideoEditor3gp_getNextStream is NULL ");
-                                return M4ERR_ALLOC;
-                            }
-                            memcpy((void *)DecoderSpecificInfo,
-                                (void *)data, decoderSpecificInfoSize);
-                        } else {
-                            ALOGV("DSI Size %d", decoderSpecificInfoSize);
-                            DecoderSpecificInfo = M4OSA_NULL;
-                        }
-                    }
-                    (*pStreamHandler)->m_pESDSInfo = M4OSA_NULL;
-                    (*pStreamHandler)->m_ESDSInfoSize = 0;
-
-                    err = VideoEditor3gpReader_AnalyseAvcDsi(*pStreamHandler,
-                    (M4OSA_Int32*)DecoderSpecificInfo, decoderSpecificInfoSize);
-
-                    if (M4NO_ERROR != err) {
-                        return err;
-                    }
-                    ALOGV("decsize %d, h264decsize %d: %d", (*pStreamHandler)->\
-                        m_decoderSpecificInfoSize, (*pStreamHandler)->\
-                        m_H264decoderSpecificInfoSize);
-
-                    if(M4OSA_NULL != DecoderSpecificInfo) {
-                        free(DecoderSpecificInfo);
-                        DecoderSpecificInfo = M4OSA_NULL;
-                    }
-                } else if( (M4DA_StreamTypeVideoMpeg4 == streamType) ) {
-                    if (meta->findData(kKeyESDS, &type, &data, &size)) {
-                        ESDS esds((const char *)data, size);
-                        CHECK_EQ(esds.InitCheck(), (status_t)OK);
-
-                        (*pStreamHandler)->m_ESDSInfoSize = size;
-                        (*pStreamHandler)->m_pESDSInfo = (M4OSA_UInt8*)\
-                        M4OSA_32bitAlignedMalloc((*pStreamHandler)->m_ESDSInfoSize,
-                        M4READER_3GP, (M4OSA_Char*)"M4V DecoderSpecific" );
-                        if (M4OSA_NULL == (*pStreamHandler)->m_pESDSInfo) {
-                            return M4ERR_ALLOC;
-                        }
-                        memcpy((void *)(*pStreamHandler)->\
-                            m_pESDSInfo, (void *)data, size);
-
-                        esds.getCodecSpecificInfo(&codec_specific_data,
-                            &codec_specific_data_size);
-                        ALOGV("VE MP4 dsisize: %d, %x", codec_specific_data_size,
-                            codec_specific_data);
-
-                        (*pStreamHandler)->m_decoderSpecificInfoSize =
-                            codec_specific_data_size;
-                        if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
-                            DecoderSpecific = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
-                                (*pStreamHandler)->m_decoderSpecificInfoSize,
-                                M4READER_3GP, (M4OSA_Char*)" DecoderSpecific" );
-                            if (M4OSA_NULL == DecoderSpecific) {
-                                return M4ERR_ALLOC;
-                            }
-                            memcpy((void *)DecoderSpecific,
-                                (void *)codec_specific_data,
-                                codec_specific_data_size);
-                            (*pStreamHandler)->m_pDecoderSpecificInfo =
-                                DecoderSpecific;
-                        }
-                        else {
-                            (*pStreamHandler)->m_pDecoderSpecificInfo =
-                                M4OSA_NULL;
-                        }
-                        (*pStreamHandler)->m_pH264DecoderSpecificInfo =
-                            M4OSA_NULL;
-                        (*pStreamHandler)->m_H264decoderSpecificInfoSize = 0;
-                    }
-                } else {
-                    ALOGV("VideoEditor3gpReader_getNextStream NO video stream");
-                    return M4ERR_READER_UNKNOWN_STREAM_TYPE;
-                }
-            }
-            else {
-                ALOGV("VideoEditor3gpReader_getNextStream NO video stream");
-                return M4ERR_READER_UNKNOWN_STREAM_TYPE;
-            }
-
-        } else if (!haveAudio && !strncasecmp(mime, "audio/", 6)) {
-            ALOGV("VideoEditor3gpReader_getNextStream audio getTrack called");
-            pC->mAudioSource = pC->mExtractor->getTrack(pC->mCurrTrack);
-            pC->mAudioSource->start();
-            *pMediaFamily = M4READER_kMediaFamilyAudio;
-
-            if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
-                streamType = M4DA_StreamTypeAudioAmrNarrowBand;
-            } else if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
-                streamType = M4DA_StreamTypeAudioAmrWideBand;
-            }
-            else if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
-                streamType = M4DA_StreamTypeAudioAac;
-            } else {
-                ALOGV("VideoEditor3gpReader_getNextStrea streamtype Unknown ");
-            }
-            if(streamType != M4DA_StreamTypeUnknown) {
-                pC->mStreamType = streamType;
-                pC->mStreamId = pC->mCurrTrack;
-
-                ALOGV("VE streamtype %d ,id %d",  streamType, pC->mCurrTrack);
-
-                pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_32bitAlignedMalloc
-                    (sizeof(M4_AudioStreamHandler), M4READER_3GP,
-                    (M4OSA_Char*)"M4_AudioStreamHandler");
-                if (M4OSA_NULL == pAudioStreamHandler) {
-                    return M4ERR_ALLOC;
-                }
-                pAudioStreamHandler->m_structSize=sizeof(M4_AudioStreamHandler);
-                pAudioStreamHandler->m_byteSampleSize   = 0;
-                pAudioStreamHandler->m_nbChannels       = 0;
-                pAudioStreamHandler->m_samplingFrequency= 0;
-                pAudioStreamHandler->m_byteFrameLength  = 0;
-
-                (*pStreamHandler) = (M4_StreamHandler*)(pAudioStreamHandler);
-                pC->mAudioStreamHandler =
-                    (M4_StreamHandler*)(pAudioStreamHandler);
-                (*pStreamHandler)->m_averageBitRate = 0;
-                haveAudio = true;
-                pC->mAudioStreamHandler=(M4_StreamHandler*)pAudioStreamHandler;
-                pC->mAudioStreamHandler->m_pESDSInfo = M4OSA_NULL;
-                pC->mAudioStreamHandler->m_ESDSInfoSize = 0;
-
-                meta->findInt32(kKeyMaxInputSize, (int32_t*)&(maxAUSize));
-                if(maxAUSize == 0) {
-                    maxAUSize = 70000;
-                }
-                (*pStreamHandler)->m_maxAUSize = maxAUSize;
-                ALOGV("VE Audio mMaxAUSize from MP4 extractor: %d", maxAUSize);
-            }
-            if((M4DA_StreamTypeAudioAmrNarrowBand == streamType) ||
-                (M4DA_StreamTypeAudioAmrWideBand == streamType)) {
-                M4OSA_UInt32 freqIndex = 0; /**< AMR NB */
-                M4OSA_UInt32 modeSet;
-                M4OSA_UInt32 i;
-                M4OSA_Context pBitParserContext = M4OSA_NULL;
-
-                if(M4DA_StreamTypeAudioAmrWideBand == streamType) {
-                    freqIndex = 1; /**< AMR WB */
-                }
-
-                if (meta->findData(kKeyESDS, &type, &data, &size)) {
-                    ESDS esds((const char *)data, size);
-                    CHECK_EQ(esds.InitCheck(), (status_t)OK);
-
-                    esds.getCodecSpecificInfo(&codec_specific_data,
-                        &codec_specific_data_size);
-                    (*pStreamHandler)->m_decoderSpecificInfoSize =
-                        codec_specific_data_size;
-
-                    if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
-                        DecoderSpecific = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
-                            (*pStreamHandler)->m_decoderSpecificInfoSize,
-                            M4READER_3GP, (M4OSA_Char*)"AMR DecoderSpecific" );
-                        if (M4OSA_NULL == DecoderSpecific) {
-                            return M4ERR_ALLOC;
-                        }
-                        memcpy((void *)DecoderSpecific,
-                            (void *)codec_specific_data,
-                            codec_specific_data_size);
-                        (*pStreamHandler)->m_pDecoderSpecificInfo =
-                            DecoderSpecific;
-                    } else {
-                        (*pStreamHandler)->m_pDecoderSpecificInfo = M4OSA_NULL;
-                    }
-                } else {
-                    M4OSA_UChar AmrDsi[] =
-                        {'P','H','L','P',0x00, 0x00, 0x80, 0x00, 0x01,};
-                    (*pStreamHandler)->m_decoderSpecificInfoSize = 9;
-                    DecoderSpecific = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
-                        (*pStreamHandler)->m_decoderSpecificInfoSize,
-                        M4READER_3GP, (M4OSA_Char*)"PHLP DecoderSpecific" );
-                    if (M4OSA_NULL == DecoderSpecific) {
-                        return M4ERR_ALLOC;
-                    }
-                    if(freqIndex ==0) {
-                        AmrDsi[8] = 0x01;
-                    } else {
-                        AmrDsi[8] = 0x02;
-                    }
-                    for(i = 0; i< 9; i++) {
-                        DecoderSpecific[i] = AmrDsi[i];
-                    }
-                    (*pStreamHandler)->m_pDecoderSpecificInfo = DecoderSpecific;
-                }
-                (*pStreamHandler)->m_averageBitRate =
-                    VideoEditor3gpReader_AmrBitRate[freqIndex][7];
-            } else if((M4DA_StreamTypeAudioAac == streamType)) {
-                if (meta->findData(kKeyESDS, &type, &data, &size)) {
-                    ESDS esds((const char *)data, size);
-                    CHECK_EQ(esds.InitCheck(), (status_t)OK);
-
-                    (*pStreamHandler)->m_ESDSInfoSize = size;
-                    (*pStreamHandler)->m_pESDSInfo = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
-                        (*pStreamHandler)->m_ESDSInfoSize, M4READER_3GP,
-                        (M4OSA_Char*)"AAC DecoderSpecific" );
-                    if (M4OSA_NULL == (*pStreamHandler)->m_pESDSInfo) {
-                        return M4ERR_ALLOC;
-                    }
-                    memcpy((void *)(*pStreamHandler)->m_pESDSInfo,
-                    (void *)data, size);
-                    esds.getCodecSpecificInfo(&codec_specific_data,
-                        &codec_specific_data_size);
-
-                    ALOGV("VEdsi %d,%x",codec_specific_data_size,
-                        codec_specific_data);
-
-                    (*pStreamHandler)->m_decoderSpecificInfoSize =
-                        codec_specific_data_size;
-                    if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
-                        DecoderSpecific = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
-                            (*pStreamHandler)->m_decoderSpecificInfoSize,
-                            M4READER_3GP, (M4OSA_Char*)"AAC DecoderSpecific" );
-                        if (M4OSA_NULL == DecoderSpecific) {
-                            return M4ERR_ALLOC;
-                        }
-                        memcpy((void *)DecoderSpecific,
-                            (void *)codec_specific_data,
-                            codec_specific_data_size);
-                        (*pStreamHandler)->m_pDecoderSpecificInfo =
-                            DecoderSpecific;
-                    } else {
-                        (*pStreamHandler)->m_pDecoderSpecificInfo = M4OSA_NULL;
-                    }
-                }
-            } else {
-                ALOGV("VideoEditor3gpReader_getNextStream mStreamType: none ");
-                return M4ERR_READER_UNKNOWN_STREAM_TYPE;
-            }
-        } else {
-            ALOGV("VE noaudio-video stream:pC->mCurrTrack = %d ",pC->mCurrTrack);
-            pC->mCurrTrack++; //Increment current track to get the next track
-            return M4ERR_READER_UNKNOWN_STREAM_TYPE;
-        }
-        ALOGV("VE StreamType: %d, stremhandler %x",streamType, *pStreamHandler );
-        (*pStreamHandler)->m_streamType = streamType;
-        (*pStreamHandler)->m_streamId   = pC->mStreamId;
-        (*pStreamHandler)->m_pUserData  = M4OSA_NULL;
-        (*pStreamHandler)->m_structSize = sizeof(M4_StreamHandler);
-        (*pStreamHandler)->m_bStreamIsOK = M4OSA_TRUE;
-
-        meta->findInt64(kKeyDuration,
-            (int64_t*)&(Duration));
-
-        (*pStreamHandler)->m_duration = (int32_t)(Duration / 1000);
-
-        pC->mMaxDuration = ((*pStreamHandler)->m_duration);
-        ALOGV("VE str duration duration: %d ", (*pStreamHandler)->m_duration);
-
-        /* In AAC case: Put the first AU in pAudioStreamHandler->m_pUserData
-         *since decoder has to know if stream contains SBR data(Implicit sig) */
-        if(M4DA_StreamTypeAudioAac == (*pStreamHandler)->m_streamType) {
-            M4READER_AudioSbrUserdata*  pAudioSbrUserdata;
-
-            pAudioSbrUserdata = (M4READER_AudioSbrUserdata*)M4OSA_32bitAlignedMalloc(
-                sizeof(M4READER_AudioSbrUserdata),M4READER_3GP,
-                (M4OSA_Char*)"M4READER_AudioSbrUserdata");
-            if (M4OSA_NULL == pAudioSbrUserdata) {
-                err = M4ERR_ALLOC;
-                goto Error;
-            }
-            (*pStreamHandler)->m_pUserData = pAudioSbrUserdata;
-            pAudioSbrUserdata->m_bIsSbrEnabled = M4OSA_FALSE;
-
-            pAudioSbrUserdata->m_pFirstAU = (M4_AccessUnit*)M4OSA_32bitAlignedMalloc(
-                sizeof(M4_AccessUnit),M4READER_3GP, (M4OSA_Char*)"1st AAC AU");
-            if (M4OSA_NULL == pAudioSbrUserdata->m_pFirstAU) {
-                pAudioSbrUserdata->m_pAacDecoderUserConfig = M4OSA_NULL;
-                err = M4ERR_ALLOC;
-                goto Error;
-            }
-            pAudioSbrUserdata->m_pAacDecoderUserConfig = (M4_AacDecoderConfig*)\
-                M4OSA_32bitAlignedMalloc(sizeof(M4_AacDecoderConfig),M4READER_3GP,
-                (M4OSA_Char*)"m_pAacDecoderUserConfig");
-            if (M4OSA_NULL == pAudioSbrUserdata->m_pAacDecoderUserConfig) {
-                err = M4ERR_ALLOC;
-                goto Error;
-            }
-        }
-        if(M4DA_StreamTypeAudioAac == (*pStreamHandler)->m_streamType) {
-            M4_AudioStreamHandler* pAudioStreamHandler =
-                (M4_AudioStreamHandler*)(*pStreamHandler);
-            M4READER_AudioSbrUserdata* pUserData = (M4READER_AudioSbrUserdata*)\
-                (pAudioStreamHandler->m_basicProperties.m_pUserData);
-
-            err = VideoEditor3gpReader_fillAuStruct(pC, (*pStreamHandler),
-                (M4_AccessUnit*)pUserData->m_pFirstAU);
-            if (M4NO_ERROR != err) {
-                goto Error;
-            }
-            err = VideoEditor3gpReader_getNextAu(pC, (*pStreamHandler),
-                (M4_AccessUnit*)pUserData->m_pFirstAU);
-
-            /*
-             * 1. "M4WAR_NO_MORE_AU == err" indicates that there is no more
-             * access unit from the current track. In other words, there
-             * is only a single access unit from the current track, and
-             * the parsing of this track has reached EOS. The reason why
-             * the first access unit needs to be parsed here is because for
-             * some audio codec (like AAC), the very first access unit
-             * must be decoded before its configuration/encoding parameters
-             * (such as # of channels and sample rate) can be correctly
-             * determined.
-             *
-             * 2. "trackCount > pC->mCurrTrack" indicates that there are other
-             * tracks to be parsed, in addition to the current track.
-             *
-             * When both conditions 1 & 2 hold, other tracks should be
-             * parsed. Thus, we should not bail out.
-             */
-            if (M4WAR_NO_MORE_AU == err && trackCount > pC->mCurrTrack) {
-                err = M4NO_ERROR;
-            }
-
-            if (M4NO_ERROR != err) {
-                goto Error;
-            }
-            err = VideoEditor3gpReader_reset(pC, (*pStreamHandler));
-            if (M4NO_ERROR != err) {
-                goto Error;
-            }
-        }
-    }
-    pC->mCurrTrack++; //Increment the current track to get next track
-    ALOGV("pC->mCurrTrack = %d",pC->mCurrTrack);
-
-    if (!haveAudio && !haveVideo) {
-        *pMediaFamily=M4READER_kMediaFamilyUnknown;
-        return M4ERR_READER_UNKNOWN_STREAM_TYPE;
-    }
-Error:
-    ALOGV("VideoEditor3gpReader_getNextStreamHandler end error = %d",err);
-    return err;
-}
-
-M4OSA_ERR VideoEditor3gpReader_getPrevRapTime(M4OSA_Context context,
-    M4_StreamHandler *pStreamHandler, M4OSA_Int32* pTime)
-{
-    VideoEditor3gpReader_Context *pC = (VideoEditor3gpReader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-    MediaBuffer *mMediaBuffer = M4OSA_NULL;
-    MediaSource::ReadOptions options;
-    M4OSA_Time time64;
-    int64_t tempTime64 = 0;
-    status_t error;
-
-    ALOGV("VideoEditor3gpReader_getPrevRapTime begin");
-
-    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_getPrevRapTime: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_getPrevRapTime invalid pointer to StreamHandler");
-    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER,
-        "VideoEditor3gpReader_getPrevRapTime: invalid time pointer");
-    if (*pTime == (pStreamHandler->m_duration)) {
-        *pTime -= 1;
-    }
-
-    time64 = (M4OSA_Time)*pTime * 1000;
-
-    ALOGV("VideoEditor3gpReader_getPrevRapTime seek time: %ld",time64);
-    options.setSeekTo(time64, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-    error = pC->mVideoSource->read(&mMediaBuffer, &options);
-    if (error != OK) {
-        //Can not get the previous Sync.
-        //Must be end of stream.
-        return M4WAR_NO_MORE_AU;
-    }
-
-    mMediaBuffer->meta_data()->findInt64(kKeyTime, (int64_t*)&tempTime64);
-    ALOGV("VideoEditor3gpReader_getPrevRapTime read time %ld, %x", tempTime64,
-        mMediaBuffer);
-
-    *pTime = (M4OSA_Int32)(tempTime64 / 1000);
-
-    if(mMediaBuffer != M4OSA_NULL) {
-        ALOGV(" mMediaBuffer size = %d length %d", mMediaBuffer->size(),
-            mMediaBuffer->range_length());
-        mMediaBuffer->release();
-        mMediaBuffer = M4OSA_NULL;
-    }
-    options.clearSeekTo();
-
-    if(error != OK) {
-        ALOGV("VideoEditor3gpReader_getPrevRapTime end \
-            M4WAR_READER_INFORMATION_NOT_PRESENT");
-        return M4WAR_READER_INFORMATION_NOT_PRESENT;
-    } else {
-        ALOGV("VideoEditor3gpReader_getPrevRapTime end: err %x", err);
-        err = M4NO_ERROR;
-        return err;
-    }
-}
-
-extern "C" {
-M4OSA_ERR VideoEditor3gpReader_getInterface(M4READER_MediaType *pMediaType,
-        M4READER_GlobalInterface **pRdrGlobalInterface,
-        M4READER_DataInterface **pRdrDataInterface) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pMediaType,      M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrGlobalInterface, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrDataInterface, M4ERR_PARAMETER);
-
-    ALOGV("VideoEditor3gpReader_getInterface begin");
-    ALOGV("VideoEditor3gpReader_getInterface %d 0x%x 0x%x", *pMediaType,
-        *pRdrGlobalInterface,*pRdrDataInterface);
-
-    SAFE_MALLOC(*pRdrGlobalInterface, M4READER_GlobalInterface, 1,
-        "VideoEditor3gpReader_getInterface");
-    SAFE_MALLOC(*pRdrDataInterface, M4READER_DataInterface, 1,
-        "VideoEditor3gpReader_getInterface");
-
-    *pMediaType = M4READER_kMediaType3GPP;
-
-    (*pRdrGlobalInterface)->m_pFctCreate       = VideoEditor3gpReader_create;
-    (*pRdrGlobalInterface)->m_pFctDestroy      = VideoEditor3gpReader_destroy;
-    (*pRdrGlobalInterface)->m_pFctOpen         = VideoEditor3gpReader_open;
-    (*pRdrGlobalInterface)->m_pFctClose        = VideoEditor3gpReader_close;
-    (*pRdrGlobalInterface)->m_pFctGetOption    = VideoEditor3gpReader_getOption;
-    (*pRdrGlobalInterface)->m_pFctSetOption    = VideoEditor3gpReader_setOption;
-    (*pRdrGlobalInterface)->m_pFctGetNextStream =
-        VideoEditor3gpReader_getNextStreamHandler;
-    (*pRdrGlobalInterface)->m_pFctFillAuStruct =
-        VideoEditor3gpReader_fillAuStruct;
-    (*pRdrGlobalInterface)->m_pFctStart        = M4OSA_NULL;
-    (*pRdrGlobalInterface)->m_pFctStop         = M4OSA_NULL;
-    (*pRdrGlobalInterface)->m_pFctJump         = VideoEditor3gpReader_jump;
-    (*pRdrGlobalInterface)->m_pFctReset        = VideoEditor3gpReader_reset;
-    (*pRdrGlobalInterface)->m_pFctGetPrevRapTime =
-        VideoEditor3gpReader_getPrevRapTime;
-    (*pRdrDataInterface)->m_pFctGetNextAu      = VideoEditor3gpReader_getNextAu;
-    (*pRdrDataInterface)->m_readerContext      = M4OSA_NULL;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditor3gpReader_getInterface no error");
-    } else {
-        SAFE_FREE(*pRdrGlobalInterface);
-        SAFE_FREE(*pRdrDataInterface);
-
-        ALOGV("VideoEditor3gpReader_getInterface ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditor3gpReader_getInterface end");
-    return err;
-}
-
-}  /* extern "C" */
-
-}  /* namespace android */
-
-
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioDecoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioDecoder.cpp
deleted file mode 100755
index e4c7ea1..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioDecoder.cpp
+++ /dev/null
@@ -1,991 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorAudioDecoder.cpp
-* @brief  StageFright shell Audio Decoder
-*************************************************************************
-*/
-
-#define LOG_NDEBUG 1
-#define LOG_TAG "VIDEOEDITOR_AUDIODECODER"
-
-#include "M4OSA_Debug.h"
-#include "VideoEditorAudioDecoder.h"
-#include "VideoEditorUtils.h"
-#include "M4MCS_InternalTypes.h"
-
-#include "utils/Log.h"
-#include "utils/Vector.h"
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
-
-/********************
- *   DEFINITIONS    *
- ********************/
-// Version
-#define VIDEOEDITOR_AUDIO_DECODER_VERSION_MAJOR 1
-#define VIDEOEDITOR_AUDIO_DECODER_VERSION_MINOR 0
-#define VIDEOEDITOR_AUDIO_DECODER_VERSION_REV   0
-
-// Force using software decoder as engine does not support prefetch
-#define VIDEOEDITOR_FORCECODEC kSoftwareCodecsOnly
-
-namespace android {
-
-struct VideoEditorAudioDecoderSource : public MediaSource {
-    public:
-        static sp<VideoEditorAudioDecoderSource> Create(
-                const sp<MetaData>& format, void *decoderShellContext);
-        virtual status_t start(MetaData *params = NULL);
-        virtual status_t stop();
-        virtual sp<MetaData> getFormat();
-        virtual status_t read(MediaBuffer **buffer,
-        const ReadOptions *options = NULL);
-        virtual void storeBuffer(MediaBuffer *buffer);
-
-    protected:
-        virtual ~VideoEditorAudioDecoderSource();
-
-    private:
-        enum State {
-            CREATED,
-            STARTED,
-            ERROR
-        };
-        VideoEditorAudioDecoderSource(const sp<MetaData>& format,
-         void *decoderShellContext);
-        sp<MetaData> mFormat;
-        Vector<MediaBuffer*> mBuffers;
-        Mutex mLock;  // protects mBuffers
-        bool mIsEOS;
-        State mState;
-        void* mDecShellContext;
-        // Don't call me.
-        VideoEditorAudioDecoderSource(const VideoEditorAudioDecoderSource&);
-        VideoEditorAudioDecoderSource& operator=(
-            const VideoEditorAudioDecoderSource &);
-};
-
-/**
- ******************************************************************************
- * structure VideoEditorAudioDecoder_Context
- * @brief    This structure defines the context of the StageFright audio decoder
- *           shell
- ******************************************************************************
-*/
-
-typedef struct {
-    M4AD_Type                          mDecoderType;
-    M4_AudioStreamHandler*             mAudioStreamHandler;
-    sp<VideoEditorAudioDecoderSource>  mDecoderSource;
-    OMXClient                          mClient;
-    sp<MediaSource>                    mDecoder;
-    int32_t                            mNbOutputChannels;
-    uint32_t                           mNbInputFrames;
-    uint32_t                           mNbOutputFrames;
-    M4READER_DataInterface  *m_pReader;
-    M4_AccessUnit* m_pNextAccessUnitToDecode;
-    M4OSA_ERR readerErrCode;
-    int32_t timeStampMs;
-
-} VideoEditorAudioDecoder_Context;
-
-sp<VideoEditorAudioDecoderSource> VideoEditorAudioDecoderSource::Create(
-        const sp<MetaData>& format, void *decoderShellContext) {
-
-    sp<VideoEditorAudioDecoderSource> aSource =
-        new VideoEditorAudioDecoderSource(format, decoderShellContext);
-
-    return aSource;
-}
-
-VideoEditorAudioDecoderSource::VideoEditorAudioDecoderSource(
-        const sp<MetaData>& format, void* decoderShellContext):
-        mFormat(format),
-        mIsEOS(false),
-        mState(CREATED),
-        mDecShellContext(decoderShellContext) {
-}
-
-VideoEditorAudioDecoderSource::~VideoEditorAudioDecoderSource() {
-
-    if( STARTED == mState ) {
-        stop();
-    }
-}
-
-status_t VideoEditorAudioDecoderSource::start(MetaData *meta) {
-    status_t err = OK;
-
-    if( CREATED != mState ) {
-        ALOGV("VideoEditorAudioDecoderSource::start: invalid state %d", mState);
-        return UNKNOWN_ERROR;
-    }
-
-    mState = STARTED;
-
-cleanUp:
-    ALOGV("VideoEditorAudioDecoderSource::start END (0x%x)", err);
-    return err;
-}
-
-status_t VideoEditorAudioDecoderSource::stop() {
-    Mutex::Autolock autolock(mLock);
-    status_t err = OK;
-
-    ALOGV("VideoEditorAudioDecoderSource::stop begin");
-
-    if( STARTED != mState ) {
-        ALOGV("VideoEditorAudioDecoderSource::stop: invalid state %d", mState);
-        return UNKNOWN_ERROR;
-    }
-
-    if (!mBuffers.empty()) {
-        int n = mBuffers.size();
-        for (int i = 0; i < n; i++) {
-            mBuffers.itemAt(i)->release();
-        }
-        ALOGW("VideoEditorAudioDecoderSource::stop : %d buffer remained", n);
-        mBuffers.clear();
-    }
-
-    mState = CREATED;
-
-    ALOGV("VideoEditorAudioDecoderSource::stop END (0x%x)", err);
-    return err;
-}
-
-sp<MetaData> VideoEditorAudioDecoderSource::getFormat() {
-
-    ALOGV("VideoEditorAudioDecoderSource::getFormat");
-    return mFormat;
-}
-
-static MediaBuffer* readBufferFromReader(
-        VideoEditorAudioDecoder_Context* pDecContext) {
-    M4OSA_ERR lerr = M4NO_ERROR;
-    M4_AccessUnit* pAccessUnit = pDecContext->m_pNextAccessUnitToDecode;
-
-    // Get next AU from reader.
-    lerr = pDecContext->m_pReader->m_pFctGetNextAu(
-               pDecContext->m_pReader->m_readerContext,
-               (M4_StreamHandler*)pDecContext->mAudioStreamHandler,
-               pAccessUnit);
-
-    if (lerr == M4WAR_NO_MORE_AU) {
-        ALOGV("readBufferFromReader : EOS");
-        return NULL;
-    }
-
-    pDecContext->timeStampMs = pAccessUnit->m_CTS;
-
-    MediaBuffer* newBuffer = new MediaBuffer((size_t)pAccessUnit->m_size);
-    memcpy((void *)((M4OSA_Int8*)newBuffer->data() + newBuffer->range_offset()),
-        (void *)pAccessUnit->m_dataAddress, pAccessUnit->m_size);
-    newBuffer->meta_data()->setInt64(kKeyTime, (pAccessUnit->m_CTS * 1000LL));
-    return newBuffer;
-}
-
-status_t VideoEditorAudioDecoderSource::read(MediaBuffer **buffer,
-        const ReadOptions *options) {
-    Mutex::Autolock autolock(mLock);
-    MediaSource::ReadOptions readOptions;
-
-    VideoEditorAudioDecoder_Context* pDecContext =
-     (VideoEditorAudioDecoder_Context *)mDecShellContext;
-
-    if ( STARTED != mState ) {
-        ALOGV("VideoEditorAudioDecoderSource::read invalid state %d", mState);
-        return UNKNOWN_ERROR;
-    }
-
-    // Get a buffer from the reader if we don't have any
-    if(mBuffers.empty()) {
-        MediaBuffer* newBuffer = readBufferFromReader(pDecContext);
-        if (!newBuffer) {
-            *buffer = NULL;
-            pDecContext->readerErrCode = M4WAR_NO_MORE_AU;
-            return ERROR_END_OF_STREAM;
-        }
-        mBuffers.push(newBuffer);
-    }
-    *buffer = mBuffers.itemAt(0);
-    mBuffers.removeAt(0);
-
-    return OK;
-}
-
-void VideoEditorAudioDecoderSource::storeBuffer(MediaBuffer *buffer) {
-    Mutex::Autolock autolock(mLock);
-    VideoEditorAudioDecoder_Context* pDecContext =
-     (VideoEditorAudioDecoder_Context *)mDecShellContext;
-
-    ALOGV("VideoEditorAudioDecoderSource::storeBuffer begin");
-
-    // If the user didn't give us a buffer, get it from the reader.
-    if(buffer == NULL) {
-        MediaBuffer* newBuffer = readBufferFromReader(pDecContext);
-        if (!newBuffer) {
-            pDecContext->readerErrCode = M4WAR_NO_MORE_AU;
-            return;
-        }
-        buffer = newBuffer;
-    }
-
-    mBuffers.push(buffer);
-    ALOGV("VideoEditorAudioDecoderSource::storeBuffer END");
-}
-
-/********************
- *      TOOLS       *
- ********************/
-
-M4OSA_ERR VideoEditorAudioDecoder_getBits(M4OSA_Int8* pData,
-        M4OSA_UInt32 dataSize, M4OSA_UInt8 nbBits, M4OSA_Int32* pResult,
-        M4OSA_UInt32* pOffset) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt32 startByte = 0;
-    M4OSA_UInt32 startBit = 0;
-    M4OSA_UInt32 endByte = 0;
-    M4OSA_UInt32 endBit = 0;
-    M4OSA_UInt32 currentByte = 0;
-    M4OSA_UInt32 result = 0;
-    M4OSA_UInt32 ui32Tmp = 0;
-    M4OSA_UInt32 ui32Mask = 0;
-
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pData, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pOffset, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(32 >= nbBits, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK((*pOffset + nbBits) <= 8*dataSize, M4ERR_PARAMETER);
-
-    ALOGV("VideoEditorAudioDecoder_getBits begin");
-
-    startByte   = (*pOffset) >> 3;
-    endByte     = (*pOffset + nbBits) >> 3;
-    startBit    = (*pOffset) % 8;
-    endBit      = (*pOffset + nbBits) % 8;
-    currentByte = startByte;
-
-    // Extract the requested nunber of bits from memory
-    while( currentByte <= endByte) {
-        ui32Mask = 0x000000FF;
-        if( currentByte == startByte ) {
-            ui32Mask >>= startBit;
-        }
-        ui32Tmp = ui32Mask & ((M4OSA_UInt32)pData[currentByte]);
-        if( currentByte == endByte ) {
-            ui32Tmp >>= (8-endBit);
-            result <<= endBit;
-        } else {
-            result <<= 8;
-        }
-        result |= ui32Tmp;
-        currentByte++;
-    }
-
-    *pResult = result;
-    *pOffset += nbBits;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioDecoder_getBits no error");
-    } else {
-        ALOGV("VideoEditorAudioDecoder_getBits ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioDecoder_getBits end");
-    return err;
-}
-
-
-#define FREQ_TABLE_SIZE 16
-const M4OSA_UInt32 AD_AAC_FREQ_TABLE[FREQ_TABLE_SIZE] =
-    {96000, 88200, 64000, 48000, 44100,
-    32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350, 0, 0, 0};
-
-
-M4OSA_ERR VideoEditorAudioDecoder_parse_AAC_DSI(M4OSA_Int8* pDSI,
-        M4OSA_UInt32 dsiSize, AAC_DEC_STREAM_PROPS* pProperties) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_UInt32 offset = 0;
-    M4OSA_Int32 result = 0;
-
-    ALOGV("VideoEditorAudioDecoder_parse_AAC_DSI begin");
-
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pDSI, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pProperties, M4ERR_PARAMETER);
-
-    // Get the object type
-    err = VideoEditorAudioDecoder_getBits(pDSI, dsiSize, 5, &result, &offset);
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-    switch( result ) {
-        case 2:
-            /* Audio Object Type is 2 (AAC Low Complexity) */
-            pProperties->aPSPresent  = 0;
-            pProperties->aSBRPresent = 0;
-            break;
-        case 5:
-            /* Audio Object Type is 5 (Spectral Band Replication) */
-            pProperties->aPSPresent  = 0;
-            pProperties->aSBRPresent = 1;
-            break;
-        case 29:
-            /* Audio Object Type is 29 (Parametric Stereo) */
-            pProperties->aPSPresent  = 1;
-            pProperties->aSBRPresent = 1;
-            break;
-        default:
-            ALOGV("parse_AAC_DSI ERROR : object type %d is not supported",
-                result);
-            VIDEOEDITOR_CHECK(!"invalid AAC object type", M4ERR_BAD_OPTION_ID);
-            break;
-    }
-    pProperties->aAudioObjectType = (M4OSA_Int32)result;
-
-    // Get the frequency index
-    err = VideoEditorAudioDecoder_getBits(pDSI, dsiSize, 4, &result, &offset);
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-    VIDEOEDITOR_CHECK((0 <= result) && (FREQ_TABLE_SIZE > result),
-        M4ERR_PARAMETER);
-    pProperties->aSampFreq = AD_AAC_FREQ_TABLE[result];
-    pProperties->aExtensionSampFreq = 0;
-
-    // Get the number of channels
-    err = VideoEditorAudioDecoder_getBits(pDSI, dsiSize, 4, &result, &offset);
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-    pProperties->aNumChan = (M4OSA_UInt32)result;
-
-    // Set the max PCM samples per channel
-    pProperties->aMaxPCMSamplesPerCh = (pProperties->aSBRPresent) ? 2048 : 1024;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioDecoder_parse_AAC_DSI no error");
-    } else {
-        ALOGV("VideoEditorAudioDecoder_parse_AAC_DSI ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioDecoder_parse_AAC_DSI end");
-    return err;
-}
-
-/********************
- * ENGINE INTERFACE *
- ********************/
-
-M4OSA_ERR VideoEditorAudioDecoder_destroy(M4AD_Context pContext) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
-
-    ALOGV("VideoEditorAudioDecoder_destroy begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
-
-    // Stop the graph
-    if( M4OSA_NULL != pDecoderContext->mDecoder.get() ) {
-        pDecoderContext->mDecoder->stop();
-    }
-
-    // Destroy the graph
-    pDecoderContext->mDecoderSource.clear();
-    pDecoderContext->mDecoder.clear();
-    pDecoderContext->mClient.disconnect();
-
-    SAFE_FREE(pDecoderContext);
-    pContext = M4OSA_NULL;
-    ALOGV("VideoEditorAudioDecoder_destroy : DONE");
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioDecoder_destroy no error");
-    } else {
-        ALOGV("VideoEditorAudioDecoder_destroy ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioDecoder_destroy : end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_create(M4AD_Type decoderType,
-        M4AD_Context* pContext, M4_AudioStreamHandler* pStreamHandler,
-        void* pUserData) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
-    AAC_DEC_STREAM_PROPS aacProperties;
-    status_t result = OK;
-    sp<MetaData> decoderMetaData = NULL;
-    const char* mime = NULL;
-    uint32_t codecFlags = 0;
-
-    ALOGV("VideoEditorAudioDecoder_create begin: decoderType %d", decoderType);
-
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,       M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pStreamHandler, M4ERR_PARAMETER);
-
-    // Context allocation & initialization
-    SAFE_MALLOC(pDecoderContext, VideoEditorAudioDecoder_Context, 1,
-        "AudioDecoder");
-    pDecoderContext->mDecoderType = decoderType;
-    pDecoderContext->mAudioStreamHandler = pStreamHandler;
-
-    pDecoderContext->mNbInputFrames  = 0;
-    pDecoderContext->mNbOutputFrames = 0;
-    pDecoderContext->readerErrCode = M4NO_ERROR;
-    pDecoderContext->timeStampMs = -1;
-
-    ALOGV("VideoEditorAudioDecoder_create : maxAUSize %d",
-        pDecoderContext->mAudioStreamHandler->m_basicProperties.m_maxAUSize);
-
-    // Create the meta data for the decoder
-    decoderMetaData = new MetaData;
-    switch( pDecoderContext->mDecoderType ) {
-        case M4AD_kTypeAMRNB:
-            // StageFright parameters
-            mime = MEDIA_MIMETYPE_AUDIO_AMR_NB;
-            // Engine parameters
-            pDecoderContext->mAudioStreamHandler->m_byteFrameLength = 160;
-            // Number of bytes per sample
-            pDecoderContext->mAudioStreamHandler->m_byteSampleSize = 2;
-            pDecoderContext->mAudioStreamHandler->m_samplingFrequency = 8000;
-            pDecoderContext->mAudioStreamHandler->m_nbChannels = 1;
-            break;
-
-        case M4AD_kTypeAMRWB:
-            // StageFright parameters
-            mime = MEDIA_MIMETYPE_AUDIO_AMR_WB;
-
-            pDecoderContext->mAudioStreamHandler->m_byteFrameLength = 160;
-            // Number of bytes per sample
-            pDecoderContext->mAudioStreamHandler->m_byteSampleSize = 2;
-            pDecoderContext->mAudioStreamHandler->m_samplingFrequency = 16000;
-            pDecoderContext->mAudioStreamHandler->m_nbChannels = 1;
-            break;
-
-        case M4AD_kTypeAAC:
-            // Reject ADTS & ADIF (or any incorrect type)
-            VIDEOEDITOR_CHECK(M4DA_StreamTypeAudioAac ==
-                pDecoderContext->mAudioStreamHandler->\
-                m_basicProperties.m_streamType,M4ERR_PARAMETER);
-
-            // StageFright parameters
-            mime = MEDIA_MIMETYPE_AUDIO_AAC;
-
-            decoderMetaData->setData(kKeyESDS, kTypeESDS,
-                pStreamHandler->m_basicProperties.m_pESDSInfo,
-                pStreamHandler->m_basicProperties.m_ESDSInfoSize);
-
-            // Engine parameters
-            // Retrieve sampling frequency and number of channels from the DSI
-            err = VideoEditorAudioDecoder_parse_AAC_DSI(
-                (M4OSA_Int8*)pStreamHandler->m_basicProperties.\
-                    m_pDecoderSpecificInfo,
-                pStreamHandler->m_basicProperties.m_decoderSpecificInfoSize,
-                &aacProperties);
-
-            VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-            pDecoderContext->mAudioStreamHandler->m_byteFrameLength = 1024;
-            // Number of bytes per sample
-            pDecoderContext->mAudioStreamHandler->m_byteSampleSize = 2;
-            pDecoderContext->mAudioStreamHandler->m_samplingFrequency =
-                aacProperties.aSampFreq;
-            pDecoderContext->mAudioStreamHandler->m_nbChannels =
-                aacProperties.aNumChan;
-
-            // Copy the stream properties into userdata
-            if( M4OSA_NULL != pUserData ) {
-                memcpy((void *)pUserData,
-                    (void *)&aacProperties,
-                    sizeof(AAC_DEC_STREAM_PROPS));
-            }
-            break;
-
-        case M4AD_kTypeMP3:
-            // StageFright parameters
-            mime = MEDIA_MIMETYPE_AUDIO_MPEG;
-            break;
-
-        default:
-            VIDEOEDITOR_CHECK(!"AudioDecoder_open : incorrect input format",
-                M4ERR_STATE);
-            break;
-    }
-    decoderMetaData->setCString(kKeyMIMEType, mime);
-    decoderMetaData->setInt32(kKeySampleRate,
-        (int32_t)pDecoderContext->mAudioStreamHandler->m_samplingFrequency);
-    decoderMetaData->setInt32(kKeyChannelCount,
-        pDecoderContext->mAudioStreamHandler->m_nbChannels);
-    decoderMetaData->setInt64(kKeyDuration,
-        (int64_t)pDecoderContext->mAudioStreamHandler->\
-        m_basicProperties.m_duration);
-
-    // Create the decoder source
-    pDecoderContext->mDecoderSource = VideoEditorAudioDecoderSource::Create(
-        decoderMetaData, (void *)pDecoderContext);
-    VIDEOEDITOR_CHECK(NULL != pDecoderContext->mDecoderSource.get(),
-        M4ERR_STATE);
-
-    // Connect to the OMX client
-    result = pDecoderContext->mClient.connect();
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-
-    // Create the OMX codec
-#ifdef VIDEOEDITOR_FORCECODEC
-    codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
-#endif /* VIDEOEDITOR_FORCECODEC */
-
-    pDecoderContext->mDecoder = OMXCodec::Create(pDecoderContext->\
-        mClient.interface(),
-        decoderMetaData, false, pDecoderContext->mDecoderSource, NULL,
-            codecFlags);
-    VIDEOEDITOR_CHECK(NULL != pDecoderContext->mDecoder.get(), M4ERR_STATE);
-
-    // Get the output channels, the decoder might overwrite the input metadata
-    pDecoderContext->mDecoder->getFormat()->findInt32(kKeyChannelCount,
-        &pDecoderContext->mNbOutputChannels);
-    ALOGV("VideoEditorAudioDecoder_create : output chan %d",
-        pDecoderContext->mNbOutputChannels);
-
-    // Start the decoder
-    result = pDecoderContext->mDecoder->start();
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-
-    *pContext = pDecoderContext;
-    ALOGV("VideoEditorAudioDecoder_create : DONE");
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioDecoder_create no error");
-    } else {
-        VideoEditorAudioDecoder_destroy(pDecoderContext);
-        *pContext = M4OSA_NULL;
-        ALOGV("VideoEditorAudioDecoder_create ERROR 0x%X", err);
-    }
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_create_AAC(M4AD_Context* pContext,
-        M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
-
-    return VideoEditorAudioDecoder_create(
-        M4AD_kTypeAAC, pContext, pStreamHandler,pUserData);
-}
-
-
-M4OSA_ERR VideoEditorAudioDecoder_create_AMRNB(M4AD_Context* pContext,
-        M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
-
-    return VideoEditorAudioDecoder_create(
-        M4AD_kTypeAMRNB, pContext, pStreamHandler, pUserData);
-}
-
-
-M4OSA_ERR VideoEditorAudioDecoder_create_AMRWB(M4AD_Context* pContext,
-        M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
-
-    return VideoEditorAudioDecoder_create(
-        M4AD_kTypeAMRWB, pContext, pStreamHandler, pUserData);
-}
-
-
-M4OSA_ERR VideoEditorAudioDecoder_create_MP3(M4AD_Context* pContext,
-        M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
-
-    return VideoEditorAudioDecoder_create(
-        M4AD_kTypeMP3, pContext, pStreamHandler, pUserData);
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_processInputBuffer(
-        M4AD_Context pContext, M4AD_Buffer* pInputBuffer) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
-    MediaBuffer* buffer = NULL;
-
-    ALOGV("VideoEditorAudioDecoder_processInputBuffer begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-
-    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
-
-    if( M4OSA_NULL != pInputBuffer ) {
-        buffer = new MediaBuffer((size_t)pInputBuffer->m_bufferSize);
-        memcpy((void *)((M4OSA_Int8*)buffer->data() + buffer->range_offset()),
-            (void *)pInputBuffer->m_dataAddress, pInputBuffer->m_bufferSize);
-        buffer->meta_data()->setInt64(kKeyTime, pInputBuffer->m_timeStampUs);
-    }
-    pDecoderContext->mDecoderSource->storeBuffer(buffer);
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioDecoder_processInputBuffer no error");
-    } else {
-        ALOGV("VideoEditorAudioDecoder_processInputBuffer ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioDecoder_processInputBuffer end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_processOutputBuffer(M4AD_Context pContext,
-        MediaBuffer* buffer, M4AD_Buffer* pOuputBuffer) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
-    int32_t i32Tmp = 0;
-    int64_t i64Tmp = 0;
-    status_t result = OK;
-
-    ALOGV("VideoEditorAudioDecoder_processOutputBuffer begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != buffer, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pOuputBuffer, M4ERR_PARAMETER);
-
-    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
-
-    // Process the returned data
-    if( 0 == buffer->range_length() ) {
-        // Decoder has no data yet, nothing unusual
-        goto cleanUp;
-    }
-
-    pDecoderContext->mNbOutputFrames++;
-
-    if( pDecoderContext->mAudioStreamHandler->m_nbChannels ==
-        (M4OSA_UInt32)pDecoderContext->mNbOutputChannels ) {
-        // Just copy the PCMs
-        pOuputBuffer->m_bufferSize = (M4OSA_UInt32)buffer->range_length();
-        memcpy((void *)pOuputBuffer->m_dataAddress,
-            (void *)(((M4OSA_MemAddr8)buffer->data())+buffer->range_offset()),
-            buffer->range_length());
-    } else if( pDecoderContext->mAudioStreamHandler->m_nbChannels <
-        (M4OSA_UInt32)pDecoderContext->mNbOutputChannels ) {
-        // The decoder forces stereo output, downsample
-        pOuputBuffer->m_bufferSize = (M4OSA_UInt32)(buffer->range_length()/2);
-        M4OSA_Int16* pDataIn  = ((M4OSA_Int16*)buffer->data()) +
-            buffer->range_offset();
-        M4OSA_Int16* pDataOut = (M4OSA_Int16*)pOuputBuffer->m_dataAddress;
-        M4OSA_Int16* pDataEnd = pDataIn + \
-            (buffer->range_length()/sizeof(M4OSA_Int16));
-        while( pDataIn < pDataEnd ) {
-            *pDataOut = *pDataIn;
-            pDataIn+=2;
-            pDataOut++;
-        }
-    } else {
-        // The decoder forces mono output, not supported
-        VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
-    }
-
-cleanUp:
-    // Release the buffer
-    buffer->release();
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioDecoder_processOutputBuffer no error");
-    } else {
-        pOuputBuffer->m_bufferSize = 0;
-        ALOGV("VideoEditorAudioDecoder_processOutputBuffer ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioDecoder_processOutputBuffer end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_step(M4AD_Context pContext,
-        M4AD_Buffer* pInputBuffer, M4AD_Buffer* pOutputBuffer,
-        M4OSA_Bool bJump) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
-    status_t result = OK;
-    MediaBuffer* outputBuffer = NULL;
-
-    ALOGV("VideoEditorAudioDecoder_step begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
-    pDecoderContext->mNbInputFrames++;
-
-    // Push the input buffer to the decoder source
-    err = VideoEditorAudioDecoder_processInputBuffer(pDecoderContext,
-        pInputBuffer);
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-    // Read
-    result = pDecoderContext->mDecoder->read(&outputBuffer, NULL);
-    if (INFO_FORMAT_CHANGED == result) {
-        ALOGV("VideoEditorAudioDecoder_step: Audio decoder \
-         returned INFO_FORMAT_CHANGED");
-        CHECK(outputBuffer == NULL);
-        sp<MetaData> meta = pDecoderContext->mDecoder->getFormat();
-        int32_t sampleRate, channelCount;
-
-        CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
-        CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
-        ALOGV("VideoEditorAudioDecoder_step: samplingFreq = %d", sampleRate);
-        ALOGV("VideoEditorAudioDecoder_step: channelCnt = %d", channelCount);
-        pDecoderContext->mAudioStreamHandler->m_samplingFrequency =
-         (uint32_t)sampleRate;
-        pDecoderContext->mAudioStreamHandler->m_nbChannels =
-         (uint32_t)channelCount;
-        pDecoderContext->mNbOutputChannels = channelCount;
-
-        return M4WAR_INFO_FORMAT_CHANGE;
-    } else if (ERROR_END_OF_STREAM == result) {
-        ALOGV("VideoEditorAudioDecoder_step: Audio decoder \
-         returned ERROR_END_OF_STREAM");
-        pDecoderContext->readerErrCode = M4WAR_NO_MORE_AU;
-        return M4WAR_NO_MORE_AU;
-    } else if (OK != result) {
-        return M4ERR_STATE;
-    }
-
-    // Convert the PCM buffer
-    err = VideoEditorAudioDecoder_processOutputBuffer(pDecoderContext,
-        outputBuffer, pOutputBuffer);
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioDecoder_step no error");
-    } else {
-        ALOGV("VideoEditorAudioDecoder_step ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioDecoder_step end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_getVersion(M4_VersionInfo* pVersionInfo) {
-    M4OSA_ERR err = M4NO_ERROR;
-
-    ALOGV("VideoEditorAudioDecoder_getVersion begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pVersionInfo, M4ERR_PARAMETER);
-
-    pVersionInfo->m_major      = VIDEOEDITOR_AUDIO_DECODER_VERSION_MAJOR;
-    pVersionInfo->m_minor      = VIDEOEDITOR_AUDIO_DECODER_VERSION_MINOR;
-    pVersionInfo->m_revision   = VIDEOEDITOR_AUDIO_DECODER_VERSION_REV;
-    pVersionInfo->m_structSize = sizeof(M4_VersionInfo);
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioDecoder_getVersion no error");
-    } else {
-        ALOGV("VideoEditorAudioDecoder_getVersion ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioDecoder_getVersion end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_setOption(M4AD_Context pContext,
-        M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
-
-    ALOGV("VideoEditorAudioDecoder_setOption begin 0x%X", optionID);
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
-
-    switch( optionID ) {
-        case static_cast<M4OSA_UInt32>(M4AD_kOptionID_UserParam):
-            ALOGV("VideoEditorAudioDecodersetOption UserParam is not supported");
-            err = M4ERR_NOT_IMPLEMENTED;
-            break;
-
-        case M4AD_kOptionID_3gpReaderInterface:
-            ALOGV("VideoEditorAudioDecodersetOption 3gpReaderInterface");
-            pDecoderContext->m_pReader =
-             (M4READER_DataInterface *)optionValue;
-            break;
-
-        case M4AD_kOptionID_AudioAU:
-            ALOGV("VideoEditorAudioDecodersetOption AudioAU");
-            pDecoderContext->m_pNextAccessUnitToDecode =
-             (M4_AccessUnit *)optionValue;
-            break;
-
-        default:
-            ALOGV("VideoEditorAudioDecoder_setOption  unsupported optionId 0x%X",
-                optionID);
-            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
-            break;
-    }
-
-cleanUp:
-    if( ((M4OSA_UInt32)M4NO_ERROR == err) || ((M4OSA_UInt32)M4ERR_NOT_IMPLEMENTED == err) ) {
-        ALOGV("VideoEditorAudioDecoder_setOption error 0x%X", err);
-    } else {
-        ALOGV("VideoEditorAudioDecoder_setOption ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioDecoder_setOption end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_getOption(M4AD_Context pContext,
-        M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
-
-    ALOGV("VideoEditorAudioDecoder_getOption begin: optionID 0x%X", optionID);
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
-
-    switch( optionID ) {
-
-        case M4AD_kOptionID_GetAudioAUErrCode:
-            *(uint32_t *)optionValue = pDecoderContext->readerErrCode;
-            break;
-
-        case M4AD_kOptionID_AudioNbChannels:
-            *(uint32_t *)optionValue =
-             pDecoderContext->mAudioStreamHandler->m_nbChannels;
-            break;
-
-        case M4AD_kOptionID_AudioSampFrequency:
-            *(uint32_t *)optionValue =
-             pDecoderContext->mAudioStreamHandler->m_samplingFrequency;
-            break;
-
-        case M4AD_kOptionID_AuCTS:
-            *(uint32_t *)optionValue = pDecoderContext->timeStampMs;
-            break;
-
-        default:
-            ALOGV("VideoEditorAudioDecoder_getOption unsupported optionId 0x%X",
-                optionID);
-            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
-            break;
-    }
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioDecoder_getOption no error");
-    } else {
-        ALOGV("VideoEditorAudioDecoder_getOption ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioDecoder_getOption end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_getInterface(M4AD_Type decoderType,
-        M4AD_Type* pDecoderType, M4AD_Interface** pDecoderInterface) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pDecoderType, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pDecoderInterface, M4ERR_PARAMETER);
-
-    ALOGV("VideoEditorAudioDecoder_getInterface begin %d 0x%x 0x%x",
-        decoderType, pDecoderType, pDecoderInterface);
-
-    SAFE_MALLOC(*pDecoderInterface, M4AD_Interface, 1,
-        "VideoEditorAudioDecoder");
-
-    *pDecoderType = decoderType;
-
-    switch( decoderType ) {
-        case M4AD_kTypeAMRNB:
-            (*pDecoderInterface)->m_pFctCreateAudioDec =
-                VideoEditorAudioDecoder_create_AMRNB;
-            break;
-        case M4AD_kTypeAMRWB:
-            (*pDecoderInterface)->m_pFctCreateAudioDec =
-                VideoEditorAudioDecoder_create_AMRWB;
-            break;
-        case M4AD_kTypeAAC:
-            (*pDecoderInterface)->m_pFctCreateAudioDec =
-                VideoEditorAudioDecoder_create_AAC;
-            break;
-        case M4AD_kTypeMP3:
-            (*pDecoderInterface)->m_pFctCreateAudioDec =
-                VideoEditorAudioDecoder_create_MP3;
-            break;
-        default:
-            ALOGV("VEAD_getInterface ERROR: unsupported type %d", decoderType);
-            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
-        break;
-    }
-    (*pDecoderInterface)->m_pFctDestroyAudioDec   =
-        VideoEditorAudioDecoder_destroy;
-    (*pDecoderInterface)->m_pFctResetAudioDec     = M4OSA_NULL;
-    (*pDecoderInterface)->m_pFctStartAudioDec     = M4OSA_NULL;
-    (*pDecoderInterface)->m_pFctStepAudioDec      =
-        VideoEditorAudioDecoder_step;
-    (*pDecoderInterface)->m_pFctGetVersionAudioDec =
-        VideoEditorAudioDecoder_getVersion;
-    (*pDecoderInterface)->m_pFctSetOptionAudioDec =
-        VideoEditorAudioDecoder_setOption;
-    (*pDecoderInterface)->m_pFctGetOptionAudioDec =
-        VideoEditorAudioDecoder_getOption;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioDecoder_getInterface no error");
-    } else {
-        *pDecoderInterface = M4OSA_NULL;
-        ALOGV("VideoEditorAudioDecoder_getInterface ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioDecoder_getInterface end");
-    return err;
-}
-
-
-extern "C" {
-
-M4OSA_ERR VideoEditorAudioDecoder_getInterface_AAC(M4AD_Type* pDecoderType,
-        M4AD_Interface** pDecoderInterface) {
-    ALOGV("TEST: AAC VideoEditorAudioDecoder_getInterface no error");
-    return VideoEditorAudioDecoder_getInterface(
-        M4AD_kTypeAAC, pDecoderType, pDecoderInterface);
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRNB(M4AD_Type* pDecoderType,
-        M4AD_Interface** pDecoderInterface) {
-    ALOGV("TEST: AMR VideoEditorAudioDecoder_getInterface no error");
-    return VideoEditorAudioDecoder_getInterface(
-        M4AD_kTypeAMRNB, pDecoderType, pDecoderInterface);
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRWB(M4AD_Type* pDecoderType,
-        M4AD_Interface** pDecoderInterface) {
-
-    return VideoEditorAudioDecoder_getInterface(
-        M4AD_kTypeAMRWB, pDecoderType, pDecoderInterface);
-}
-
-M4OSA_ERR VideoEditorAudioDecoder_getInterface_MP3(M4AD_Type* pDecoderType,
-        M4AD_Interface** pDecoderInterface) {
-
-    return VideoEditorAudioDecoder_getInterface(
-        M4AD_kTypeMP3, pDecoderType, pDecoderInterface);
-}
-
-}  // extern "C"
-
-}  // namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioEncoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioEncoder.cpp
deleted file mode 100755
index a91f3ee..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioEncoder.cpp
+++ /dev/null
@@ -1,755 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorAudioEncoder.cpp
-* @brief  StageFright shell Audio Encoder
-*************************************************************************
-*/
-
-#define LOG_NDEBUG 1
-#define LOG_TAG "VIDEOEDITOR_AUDIOENCODER"
-
-#include "M4OSA_Debug.h"
-#include "VideoEditorAudioEncoder.h"
-#include "VideoEditorUtils.h"
-
-#include "utils/Log.h"
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
-
-/*** DEFINITIONS ***/
-// Force using software encoder as engine does not support prefetch
-#define VIDEOEDITOR_FORCECODEC kSoftwareCodecsOnly
-
-namespace android {
-struct VideoEditorAudioEncoderSource : public MediaSource {
-    public:
-        static sp<VideoEditorAudioEncoderSource> Create(
-            const sp<MetaData> &format);
-        virtual status_t start(MetaData *params = NULL);
-        virtual status_t stop();
-        virtual sp<MetaData> getFormat();
-        virtual status_t read(MediaBuffer **buffer,
-        const ReadOptions *options = NULL);
-        virtual int32_t storeBuffer(MediaBuffer *buffer);
-
-    protected:
-        virtual ~VideoEditorAudioEncoderSource();
-
-    private:
-        struct MediaBufferChain {
-            MediaBuffer* buffer;
-            MediaBufferChain* nextLink;
-        };
-        enum State {
-            CREATED,
-            STARTED,
-            ERROR
-        };
-
-        MediaBufferChain* mFirstBufferLink;
-        MediaBufferChain* mLastBufferLink;
-        int32_t mNbBuffer;
-        State mState;
-        sp<MetaData> mEncFormat;
-
-        VideoEditorAudioEncoderSource(const sp<MetaData> &format);
-
-        // Don't call me.
-        VideoEditorAudioEncoderSource(const VideoEditorAudioEncoderSource&);
-        VideoEditorAudioEncoderSource& operator=(
-            const VideoEditorAudioEncoderSource&);
-};
-
-sp<VideoEditorAudioEncoderSource> VideoEditorAudioEncoderSource::Create(
-    const sp<MetaData> &format) {
-
-    ALOGV("VideoEditorAudioEncoderSource::Create");
-    sp<VideoEditorAudioEncoderSource> aSource =
-        new VideoEditorAudioEncoderSource(format);
-
-    return aSource;
-}
-
-VideoEditorAudioEncoderSource::VideoEditorAudioEncoderSource(
-    const sp<MetaData> &format):
-        mFirstBufferLink(NULL),
-        mLastBufferLink(NULL),
-        mNbBuffer(0),
-        mState(CREATED),
-        mEncFormat(format) {
-    ALOGV("VideoEditorAudioEncoderSource::VideoEditorAudioEncoderSource");
-}
-
-
-VideoEditorAudioEncoderSource::~VideoEditorAudioEncoderSource() {
-    ALOGV("VideoEditorAudioEncoderSource::~VideoEditorAudioEncoderSource");
-
-    if( STARTED == mState ) {
-        stop();
-    }
-}
-
-status_t VideoEditorAudioEncoderSource::start(MetaData *meta) {
-    status_t err = OK;
-
-    ALOGV("VideoEditorAudioEncoderSource::start");
-
-    if( CREATED != mState ) {
-        ALOGV("VideoEditorAudioEncoderSource::start ERROR : invalid state %d",
-            mState);
-        return UNKNOWN_ERROR;
-    }
-
-    mState = STARTED;
-
-cleanUp:
-    ALOGV("VideoEditorAudioEncoderSource::start END (0x%x)", err);
-    return err;
-}
-
-status_t VideoEditorAudioEncoderSource::stop() {
-    status_t err = OK;
-
-    ALOGV("VideoEditorAudioEncoderSource::stop");
-
-    if( STARTED != mState ) {
-        ALOGV("VideoEditorAudioEncoderSource::stop ERROR: invalid state %d",
-            mState);
-        return UNKNOWN_ERROR;
-    }
-
-    int32_t i = 0;
-    MediaBufferChain* tmpLink = NULL;
-    while( mFirstBufferLink ) {
-        i++;
-        tmpLink = mFirstBufferLink;
-        mFirstBufferLink = mFirstBufferLink->nextLink;
-        delete tmpLink;
-    }
-    ALOGV("VideoEditorAudioEncoderSource::stop : %d buffer remained", i);
-    mFirstBufferLink = NULL;
-    mLastBufferLink = NULL;
-
-    mState = CREATED;
-
-    ALOGV("VideoEditorAudioEncoderSource::stop END (0x%x)", err);
-    return err;
-}
-
-sp<MetaData> VideoEditorAudioEncoderSource::getFormat() {
-    ALOGV("VideoEditorAudioEncoderSource::getFormat");
-    return mEncFormat;
-}
-
-status_t VideoEditorAudioEncoderSource::read(MediaBuffer **buffer,
-        const ReadOptions *options) {
-    MediaSource::ReadOptions readOptions;
-    status_t err = OK;
-    MediaBufferChain* tmpLink = NULL;
-
-    ALOGV("VideoEditorAudioEncoderSource::read");
-
-    if ( STARTED != mState ) {
-        ALOGV("VideoEditorAudioEncoderSource::read ERROR : invalid state %d",
-            mState);
-        return UNKNOWN_ERROR;
-    }
-
-    if( NULL == mFirstBufferLink ) {
-        *buffer = NULL;
-        ALOGV("VideoEditorAudioEncoderSource::read : EOS");
-        return ERROR_END_OF_STREAM;
-    }
-    *buffer = mFirstBufferLink->buffer;
-
-    tmpLink = mFirstBufferLink;
-    mFirstBufferLink = mFirstBufferLink->nextLink;
-    if( NULL == mFirstBufferLink ) {
-        mLastBufferLink = NULL;
-    }
-    delete tmpLink;
-    mNbBuffer--;
-
-    ALOGV("VideoEditorAudioEncoderSource::read END (0x%x)", err);
-    return err;
-}
-
-int32_t VideoEditorAudioEncoderSource::storeBuffer(MediaBuffer *buffer) {
-    status_t err = OK;
-
-    ALOGV("VideoEditorAudioEncoderSource::storeBuffer");
-
-    MediaBufferChain* newLink = new MediaBufferChain;
-    newLink->buffer = buffer;
-    newLink->nextLink = NULL;
-    if( NULL != mLastBufferLink ) {
-        mLastBufferLink->nextLink = newLink;
-    } else {
-        mFirstBufferLink = newLink;
-    }
-    mLastBufferLink = newLink;
-    mNbBuffer++;
-
-    ALOGV("VideoEditorAudioEncoderSource::storeBuffer END");
-    return mNbBuffer;
-}
-
-/********************
- * ENGINE INTERFACE *
- ********************/
-/**
- ******************************************************************************
- * structure VideoEditorAudioEncoder_Context
- * @brief    This structure defines the context of the StageFright audio
- *           encoder shell
- ******************************************************************************
-*/
-typedef struct {
-    M4ENCODER_AudioFormat             mFormat;
-    M4ENCODER_AudioParams*            mCodecParams;
-    M4ENCODER_AudioDecSpecificInfo    mDSI;
-    sp<VideoEditorAudioEncoderSource> mEncoderSource;
-    OMXClient                         mClient;
-    sp<MediaSource>                   mEncoder;
-    uint32_t                          mNbInputFrames;
-    uint32_t                          mNbOutputFrames;
-    int64_t                           mFirstOutputCts;
-    int64_t                           mLastOutputCts;
-} VideoEditorAudioEncoder_Context;
-
-M4OSA_ERR VideoEditorAudioEncoder_cleanup(M4OSA_Context pContext) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
-
-    ALOGV("VideoEditorAudioEncoder_cleanup begin");
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
-
-    SAFE_FREE(pEncoderContext->mDSI.pInfo);
-    SAFE_FREE(pEncoderContext);
-    pContext = M4OSA_NULL;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioEncoder_cleanup no error");
-    } else {
-        ALOGV("VideoEditorAudioEncoder_cleanup ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioEncoder_cleanup end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_init(M4ENCODER_AudioFormat format,
-        M4OSA_Context* pContext, M4OSA_Void* pUserData) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
-
-    ALOGV(" VideoEditorAudioEncoder_init begin: format %d", format);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    SAFE_MALLOC(pEncoderContext, VideoEditorAudioEncoder_Context, 1,
-        "VideoEditorAudioEncoder");
-    pEncoderContext->mFormat = format;
-
-    *pContext = pEncoderContext;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioEncoder_init no error");
-    } else {
-        VideoEditorAudioEncoder_cleanup(pEncoderContext);
-        *pContext = M4OSA_NULL;
-        ALOGV("VideoEditorAudioEncoder_init ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioEncoder_init end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_init_AAC(M4OSA_Context* pContext,
-        M4OSA_Void* pUserData) {
-    return VideoEditorAudioEncoder_init(M4ENCODER_kAAC, pContext, pUserData);
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_init_AMRNB(M4OSA_Context* pContext,
-        M4OSA_Void* pUserData) {
-    return VideoEditorAudioEncoder_init(M4ENCODER_kAMRNB, pContext, pUserData);
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_init_MP3(M4OSA_Context* pContext,
-        M4OSA_Void* pUserData) {
-    return VideoEditorAudioEncoder_init(M4ENCODER_kMP3, pContext, pUserData);
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_close(M4OSA_Context pContext) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
-
-    ALOGV("VideoEditorAudioEncoder_close begin");
-
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
-
-    SAFE_FREE(pEncoderContext->mCodecParams);
-
-    pEncoderContext->mEncoder->stop();
-    pEncoderContext->mEncoder.clear();
-    pEncoderContext->mClient.disconnect();
-    pEncoderContext->mEncoderSource.clear();
-
-    ALOGV("AudioEncoder_close:IN %d frames,OUT %d frames from %lld to %lld",
-        pEncoderContext->mNbInputFrames,
-        pEncoderContext->mNbOutputFrames, pEncoderContext->mFirstOutputCts,
-        pEncoderContext->mLastOutputCts);
-
-    if( pEncoderContext->mNbInputFrames != pEncoderContext->mNbInputFrames ) {
-        ALOGV("VideoEditorAudioEncoder_close:some frames were not encoded %d %d",
-            pEncoderContext->mNbInputFrames, pEncoderContext->mNbInputFrames);
-    }
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioEncoder_close no error");
-    } else {
-        ALOGV("VideoEditorAudioEncoder_close ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioEncoder_close begin end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_open(M4OSA_Context pContext,
-        M4ENCODER_AudioParams *pParams, M4ENCODER_AudioDecSpecificInfo *pDSI,
-        M4OSA_Context pGrabberContext) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
-    status_t result = OK;
-    sp<MetaData> encoderMetadata = NULL;
-    const char* mime = NULL;
-    int32_t iNbChannel = 0;
-    uint32_t codecFlags = 0;
-
-    ALOGV("VideoEditorAudioEncoder_open begin");
-
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pParams,  M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pDSI,     M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
-    pDSI->pInfo = M4OSA_NULL;
-    pDSI->infoSize = 0;
-
-    pEncoderContext->mNbInputFrames  = 0;
-    pEncoderContext->mNbOutputFrames = 0;
-    pEncoderContext->mFirstOutputCts = -1;
-    pEncoderContext->mLastOutputCts  = -1;
-
-    // Allocate & initialize the encoding parameters
-    ALOGV("VideoEditorAudioEncoder_open : params F=%d CN=%d BR=%d F=%d",
-        pParams->Frequency, pParams->ChannelNum, pParams->Bitrate,
-        pParams->Format);
-    SAFE_MALLOC(pEncoderContext->mCodecParams, M4ENCODER_AudioParams, 1,
-        "VIDEOEDITOR CodecParams");
-    pEncoderContext->mCodecParams->Frequency  = pParams->Frequency;
-    pEncoderContext->mCodecParams->ChannelNum = pParams->ChannelNum;
-    pEncoderContext->mCodecParams->Bitrate    = pParams->Bitrate;
-    pEncoderContext->mCodecParams->Format     = pParams->Format;
-
-    // Check output format consistency
-    VIDEOEDITOR_CHECK(pEncoderContext->mCodecParams->Format ==
-        pEncoderContext->mFormat, M4ERR_PARAMETER);
-
-    /**
-     * StageFright graph building
-     */
-    // Create the meta data for the encoder
-    encoderMetadata = new MetaData;
-    switch( pEncoderContext->mCodecParams->Format ) {
-        case M4ENCODER_kAAC:
-        {
-            mime = MEDIA_MIMETYPE_AUDIO_AAC;
-            break;
-        }
-        case M4ENCODER_kAMRNB:
-        {
-            mime = MEDIA_MIMETYPE_AUDIO_AMR_NB;
-            break;
-        }
-        default:
-        {
-            VIDEOEDITOR_CHECK(!"AudioEncoder_open : incorrect input format",
-            M4ERR_PARAMETER);
-            break;
-        }
-    }
-    encoderMetadata->setCString(kKeyMIMEType, mime);
-    encoderMetadata->setInt32(kKeySampleRate,
-        (int32_t)pEncoderContext->mCodecParams->Frequency);
-    encoderMetadata->setInt32(kKeyBitRate,
-        (int32_t)pEncoderContext->mCodecParams->Bitrate);
-
-    switch( pEncoderContext->mCodecParams->ChannelNum ) {
-        case M4ENCODER_kMono:
-        {
-            iNbChannel = 1;
-            break;
-        }
-        case M4ENCODER_kStereo:
-        {
-            iNbChannel = 2;
-            break;
-        }
-        default:
-        {
-            VIDEOEDITOR_CHECK(!"AudioEncoder_open : incorrect channel number",
-                M4ERR_STATE);
-            break;
-        }
-    }
-    encoderMetadata->setInt32(kKeyChannelCount, iNbChannel);
-
-    // Create the encoder source
-    pEncoderContext->mEncoderSource = VideoEditorAudioEncoderSource::Create(
-        encoderMetadata);
-    VIDEOEDITOR_CHECK(NULL != pEncoderContext->mEncoderSource.get(),
-        M4ERR_STATE);
-
-    // Connect to the OMX client
-    result = pEncoderContext->mClient.connect();
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-
-    // Create the OMX codec
-#ifdef VIDEOEDITOR_FORCECODEC
-    codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
-#endif /* VIDEOEDITOR_FORCECODEC */
-    // FIXME:
-    // We are moving away to use software AACEncoder and instead use OMX-based
-    // software AAC audio encoder. We want to use AACEncoder for now. After we
-    // fix the interface issue with the OMX-based AAC audio encoder, we should
-    // then set the component name back to NULL to allow the system to pick up
-    // the right AAC audio encoder.
-    pEncoderContext->mEncoder = OMXCodec::Create(
-            pEncoderContext->mClient.interface(), encoderMetadata, true,
-            pEncoderContext->mEncoderSource, "AACEncoder" /* component name */,
-            codecFlags);
-    VIDEOEDITOR_CHECK(NULL != pEncoderContext->mEncoder.get(), M4ERR_STATE);
-
-    // Start the graph
-    result = pEncoderContext->mEncoder->start();
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-
-    // Get AAC DSI, this code can only work with software encoder
-    if( M4ENCODER_kAAC == pEncoderContext->mCodecParams->Format ) {
-        int32_t      isCodecConfig = 0;
-        MediaBuffer* buffer        = NULL;
-
-        // Read once to get the DSI
-        result = pEncoderContext->mEncoder->read(&buffer, NULL);
-        VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-        VIDEOEDITOR_CHECK(buffer->meta_data()->findInt32(kKeyIsCodecConfig,
-            &isCodecConfig) && isCodecConfig, M4ERR_STATE);
-
-        // Save the DSI
-        pEncoderContext->mDSI.infoSize = (M4OSA_UInt32)buffer->range_length();
-        SAFE_MALLOC(pEncoderContext->mDSI.pInfo, M4OSA_Int8,
-            pEncoderContext->mDSI.infoSize, "Encoder header");
-
-        memcpy((void *)pEncoderContext->mDSI.pInfo,
-            (void *)((M4OSA_MemAddr8)(buffer->data())+buffer->range_offset()),
-            pEncoderContext->mDSI.infoSize);
-
-        buffer->release();
-        *pDSI = pEncoderContext->mDSI;
-    }
-    ALOGV("VideoEditorAudioEncoder_open : DONE");
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioEncoder_open no error");
-    } else {
-        VideoEditorAudioEncoder_close(pEncoderContext);
-        ALOGV("VideoEditorAudioEncoder_open ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioEncoder_open end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_processInputBuffer(M4OSA_Context pContext,
-        M4ENCODER_AudioBuffer* pInBuffer) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
-    M4OSA_Int8* pData = M4OSA_NULL;
-    MediaBuffer* buffer = NULL;
-    int32_t nbBuffer = 0;
-
-    ALOGV("VideoEditorAudioEncoder_processInputBuffer begin");
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
-
-    switch( pEncoderContext->mCodecParams->ChannelNum ) {
-        case M4ENCODER_kMono:
-        case M4ENCODER_kStereo:
-            // Let the MediaBuffer own the data so we don't have to free it
-            buffer = new MediaBuffer((size_t)pInBuffer->pTableBufferSize[0]);
-            pData = (M4OSA_Int8*)buffer->data() + buffer->range_offset();
-            memcpy((void *)pData, (void *)pInBuffer->pTableBuffer[0],
-                pInBuffer->pTableBufferSize[0]);
-            break;
-        default:
-            ALOGV("VEAE_processInputBuffer unsupported channel configuration %d",
-                pEncoderContext->mCodecParams->ChannelNum);
-            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
-            break;
-    }
-
-    ALOGV("VideoEditorAudioEncoder_processInputBuffer : store %d bytes",
-        buffer->range_length());
-    // Push the buffer to the source
-    nbBuffer = pEncoderContext->mEncoderSource->storeBuffer(buffer);
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioEncoder_processInputBuffer no error");
-    } else {
-        if( NULL != buffer ) {
-            buffer->release();
-        }
-        ALOGV("VideoEditorAudioEncoder_processInputBuffer ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioEncoder_processInputBuffer end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_processOutputBuffer(M4OSA_Context pContext,
-        MediaBuffer* buffer, M4ENCODER_AudioBuffer* pOutBuffer) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
-    M4OSA_UInt32 Cts = 0;
-    int32_t i32Tmp = 0;
-    int64_t i64Tmp = 0;
-    status_t result = OK;
-
-    ALOGV("VideoEditorAudioEncoder_processOutputBuffer begin");
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,   M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != buffer,     M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pOutBuffer, M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
-
-    // Process the returned AU
-    if( 0 == buffer->range_length() ) {
-        // Encoder has no data yet, nothing unusual
-        ALOGV("VideoEditorAudioEncoder_processOutputBuffer : buffer is empty");
-        pOutBuffer->pTableBufferSize[0] = 0;
-        goto cleanUp;
-    }
-    if( buffer->meta_data()->findInt32(kKeyIsCodecConfig, &i32Tmp) && i32Tmp ) {
-        /* This should not happen with software encoder,
-         * DSI was retrieved beforehand */
-        VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_STATE);
-    } else {
-        // Check the CTS
-        VIDEOEDITOR_CHECK(buffer->meta_data()->findInt64(kKeyTime, &i64Tmp),
-            M4ERR_STATE);
-        Cts = (M4OSA_Int32)(i64Tmp/1000);
-
-        pEncoderContext->mNbOutputFrames++;
-        if( 0 > pEncoderContext->mFirstOutputCts ) {
-            pEncoderContext->mFirstOutputCts = i64Tmp;
-        }
-        pEncoderContext->mLastOutputCts = i64Tmp;
-
-        // Format the AU
-        memcpy((void *)pOutBuffer->pTableBuffer[0],
-            (void *)((M4OSA_MemAddr8)(buffer->data())+buffer->range_offset()),
-            buffer->range_length());
-        pOutBuffer->pTableBufferSize[0] = (M4OSA_UInt32)buffer->range_length();
-    }
-
-cleanUp:
-    // Release the buffer
-    buffer->release();
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioEncoder_processOutputBuffer no error");
-    } else {
-        ALOGV("VideoEditorAudioEncoder_processOutputBuffer ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioEncoder_processOutputBuffer end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_step(M4OSA_Context pContext,
-        M4ENCODER_AudioBuffer* pInBuffer, M4ENCODER_AudioBuffer* pOutBuffer) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
-    status_t result = OK;
-    MediaBuffer* buffer = NULL;
-
-    ALOGV("VideoEditorAudioEncoder_step begin");
-
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,   M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pInBuffer,  M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pOutBuffer, M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
-    pEncoderContext->mNbInputFrames++;
-
-    // Push the input buffer to the encoder source
-    err = VideoEditorAudioEncoder_processInputBuffer(pEncoderContext,pInBuffer);
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-    // Read
-    result = pEncoderContext->mEncoder->read(&buffer, NULL);
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-
-    // Provide the encoded AU to the writer
-    err = VideoEditorAudioEncoder_processOutputBuffer(pEncoderContext, buffer,
-        pOutBuffer);
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioEncoder_step no error");
-    } else {
-        ALOGV("VideoEditorAudioEncoder_step ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioEncoder_step end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_getOption(M4OSA_Context pContext,
-        M4OSA_OptionID optionID, M4OSA_DataOption* optionValue) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
-
-    ALOGV("VideoEditorAudioEncoder_getOption begin optionID 0x%X", optionID);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
-
-    switch( optionID ) {
-        default:
-            ALOGV("VideoEditorAudioEncoder_getOption: unsupported optionId 0x%X",
-                optionID);
-            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
-            break;
-    }
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioEncoder_getOption no error");
-    } else {
-        ALOGV("VideoEditorAudioEncoder_getOption ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorAudioEncoder_getOption end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_getInterface(
-        M4ENCODER_AudioFormat format, M4ENCODER_AudioFormat* pFormat,
-        M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
-    M4OSA_ERR err = M4NO_ERROR;
-
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pFormat,           M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pEncoderInterface, M4ERR_PARAMETER);
-
-    ALOGV("VideoEditorAudioEncoder_getInterface 0x%x 0x%x",pFormat,
-        pEncoderInterface);
-    SAFE_MALLOC(*pEncoderInterface, M4ENCODER_AudioGlobalInterface, 1,
-        "AudioEncoder");
-
-    *pFormat = format;
-
-    switch( format ) {
-        case M4ENCODER_kAAC:
-        {
-            (*pEncoderInterface)->pFctInit = VideoEditorAudioEncoder_init_AAC;
-            break;
-        }
-        case M4ENCODER_kAMRNB:
-        {
-            (*pEncoderInterface)->pFctInit = VideoEditorAudioEncoder_init_AMRNB;
-            break;
-        }
-        case M4ENCODER_kMP3:
-        {
-            (*pEncoderInterface)->pFctInit = VideoEditorAudioEncoder_init_MP3;
-            break;
-        }
-        default:
-        {
-            ALOGV("VideoEditorAudioEncoder_getInterface: unsupported format %d",
-                format);
-            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
-        break;
-        }
-    }
-    (*pEncoderInterface)->pFctCleanUp      = VideoEditorAudioEncoder_cleanup;
-    (*pEncoderInterface)->pFctOpen         = VideoEditorAudioEncoder_open;
-    (*pEncoderInterface)->pFctClose        = VideoEditorAudioEncoder_close;
-    (*pEncoderInterface)->pFctStep         = VideoEditorAudioEncoder_step;
-    (*pEncoderInterface)->pFctGetOption    = VideoEditorAudioEncoder_getOption;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorAudioEncoder_getInterface no error");
-    } else {
-        *pEncoderInterface = M4OSA_NULL;
-        ALOGV("VideoEditorAudioEncoder_getInterface ERROR 0x%X", err);
-    }
-    return err;
-}
-extern "C" {
-
-M4OSA_ERR VideoEditorAudioEncoder_getInterface_AAC(
-        M4ENCODER_AudioFormat* pFormat,
-        M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
-    return VideoEditorAudioEncoder_getInterface(
-        M4ENCODER_kAAC, pFormat, pEncoderInterface);
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_getInterface_AMRNB(
-        M4ENCODER_AudioFormat* pFormat,
-        M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
-
-    return VideoEditorAudioEncoder_getInterface(
-        M4ENCODER_kAMRNB, pFormat, pEncoderInterface);
-}
-
-M4OSA_ERR VideoEditorAudioEncoder_getInterface_MP3(
-        M4ENCODER_AudioFormat* pFormat,
-        M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
-    ALOGV("VideoEditorAudioEncoder_getInterface_MP3 no error");
-
-    return VideoEditorAudioEncoder_getInterface(
-        M4ENCODER_kMP3, pFormat, pEncoderInterface);
-}
-
-}  // extern "C"
-
-}  // namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorBuffer.c b/libvideoeditor/vss/stagefrightshells/src/VideoEditorBuffer.c
deleted file mode 100755
index 5a7b28e..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorBuffer.c
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorBuffer.c
-* @brief  StageFright shell Buffer
-*************************************************************************
-*/
-#undef M4OSA_TRACE_LEVEL
-#define M4OSA_TRACE_LEVEL 1
-
-#include <inttypes.h>
-
-#include "VideoEditorBuffer.h"
-#include "utils/Log.h"
-
-#define VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE 40
-
-#define VIDEOEDITOR_SAFE_FREE(p) \
-{ \
-    if(M4OSA_NULL != p) \
-    { \
-        free(p); \
-        p = M4OSA_NULL; \
-    } \
-}
-
-/**
- ************************************************************************
- M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
- *                                         M4OSA_UInt32 nbBuffers)
- * @brief   Allocate a pool of nbBuffers buffers
- *
- * @param   ppool      : IN The buffer pool to create
- * @param   nbBuffers  : IN The number of buffers in the pool
- * @param   poolName   : IN a name given to the pool
- * @return  Error code
- ************************************************************************
-*/
-M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
-        M4OSA_UInt32 nbBuffers, M4OSA_Char* poolName)
-{
-    M4OSA_ERR lerr = M4NO_ERROR;
-    VIDEOEDITOR_BUFFER_Pool* pool;
-    M4OSA_UInt32 index;
-
-    ALOGV("VIDEOEDITOR_BUFFER_allocatePool : ppool = %p nbBuffers = %" PRIu32,
-        ppool, nbBuffers);
-
-    pool = M4OSA_NULL;
-    pool = (VIDEOEDITOR_BUFFER_Pool*)M4OSA_32bitAlignedMalloc(
-            sizeof(VIDEOEDITOR_BUFFER_Pool), VIDEOEDITOR_BUFFER_EXTERNAL,
-            (M4OSA_Char*)("VIDEOEDITOR_BUFFER_allocatePool: pool"));
-    if (M4OSA_NULL == pool)
-    {
-        lerr = M4ERR_ALLOC;
-        goto VIDEOEDITOR_BUFFER_allocatePool_Cleanup;
-    }
-
-    ALOGV("VIDEOEDITOR_BUFFER_allocatePool : Allocating Pool buffers");
-    pool->pNXPBuffer = M4OSA_NULL;
-    pool->pNXPBuffer = (VIDEOEDITOR_BUFFER_Buffer*)M4OSA_32bitAlignedMalloc(
-                            sizeof(VIDEOEDITOR_BUFFER_Buffer)*nbBuffers,
-                            VIDEOEDITOR_BUFFER_EXTERNAL,
-                            (M4OSA_Char*)("BUFFER_allocatePool: pNXPBuffer"));
-    if(M4OSA_NULL == pool->pNXPBuffer)
-    {
-        lerr = M4ERR_ALLOC;
-        goto VIDEOEDITOR_BUFFER_allocatePool_Cleanup;
-    }
-
-    for (index = 0; index < nbBuffers; index++)
-    {
-        pool->pNXPBuffer[index].pData = M4OSA_NULL;
-    }
-
-    ALOGV("VIDEOEDITOR_BUFFER_allocatePool : Allocating Pool name buffer");
-    pool->poolName = M4OSA_NULL;
-    pool->poolName = (M4OSA_Char*)M4OSA_32bitAlignedMalloc(
-        VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE,VIDEOEDITOR_BUFFER_EXTERNAL,
-        (M4OSA_Char*)("VIDEOEDITOR_BUFFER_allocatePool: poolname"));
-    if(pool->poolName == M4OSA_NULL)
-    {
-        lerr = M4ERR_ALLOC;
-        goto VIDEOEDITOR_BUFFER_allocatePool_Cleanup;
-    }
-
-    ALOGV("VIDEOEDITOR_BUFFER_allocatePool : Assigning Pool name buffer");
-
-    memset((void *)pool->poolName, 0,VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE);
-    memcpy((void *)pool->poolName, (void *)poolName,
-        VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE-1);
-
-    pool->NB = nbBuffers;
-
-VIDEOEDITOR_BUFFER_allocatePool_Cleanup:
-    if(M4NO_ERROR != lerr)
-    {
-        VIDEOEDITOR_SAFE_FREE(pool->pNXPBuffer);
-        VIDEOEDITOR_SAFE_FREE(pool->poolName);
-        VIDEOEDITOR_SAFE_FREE(pool);
-    }
-    *ppool = pool;
-    ALOGV("VIDEOEDITOR_BUFFER_allocatePool END");
-
-    return lerr;
-}
-
-/**
- ************************************************************************
- M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(VIDEOEDITOR_BUFFER_Pool* ppool)
- * @brief   Deallocate a buffer pool
- *
- * @param   ppool      : IN The buffer pool to free
- * @return  Error code
- ************************************************************************
-*/
-M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(VIDEOEDITOR_BUFFER_Pool* ppool)
-{
-    M4OSA_ERR err;
-    M4OSA_UInt32  j = 0;
-
-    ALOGV("VIDEOEDITOR_BUFFER_freePool : ppool = %p", ppool);
-
-    err = M4NO_ERROR;
-
-    for (j = 0; j < ppool->NB; j++)
-    {
-        if(M4OSA_NULL != ppool->pNXPBuffer[j].pData)
-        {
-            free(ppool->pNXPBuffer[j].pData);
-            ppool->pNXPBuffer[j].pData = M4OSA_NULL;
-        }
-    }
-
-    if(ppool != M4OSA_NULL)
-    {
-        SAFE_FREE(ppool->pNXPBuffer);
-        SAFE_FREE(ppool->poolName);
-        SAFE_FREE(ppool);
-    }
-
-    return(err);
-}
-
-/**
- ************************************************************************
- M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
- *         VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
- * @brief   Returns a buffer in a given state
- *
- * @param   ppool      : IN The buffer pool
- * @param   desiredState : IN The buffer state
- * @param   pNXPBuffer : IN The selected buffer
- * @return  Error code
- ************************************************************************
-*/
-M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
-        VIDEOEDITOR_BUFFER_State desiredState,
-        VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
-{
-    M4OSA_ERR err = M4NO_ERROR;
-    M4OSA_Bool bFound = M4OSA_FALSE;
-    M4OSA_UInt32 i, ibuf;
-
-    ALOGV("VIDEOEDITOR_BUFFER_getBuffer from %s in state=%d",
-        ppool->poolName, desiredState);
-
-    ibuf = 0;
-
-    for (i=0; i < ppool->NB; i++)
-    {
-        bFound = (ppool->pNXPBuffer[i].state == desiredState);
-        if (bFound)
-        {
-            ibuf = i;
-            break;
-        }
-    }
-
-    if(!bFound)
-    {
-        ALOGV("VIDEOEDITOR_BUFFER_getBuffer No buffer available in state %d",
-            desiredState);
-        *pNXPBuffer = M4OSA_NULL;
-        return M4ERR_NO_BUFFER_AVAILABLE;
-    }
-
-    /* case where a buffer has been found */
-    *pNXPBuffer = &(ppool->pNXPBuffer[ibuf]);
-
-    ALOGV("VIDEOEDITOR_BUFFER_getBuffer: idx = %" PRIu32, ibuf);
-
-    return(err);
-}
-
-M4OSA_ERR VIDEOEDITOR_BUFFER_initPoolBuffers(VIDEOEDITOR_BUFFER_Pool* pool,
-    M4OSA_UInt32 lSize)
-{
-    M4OSA_ERR     err = M4NO_ERROR;
-    M4OSA_UInt32  index, j;
-
-    /**
-     * Initialize all the buffers in the pool */
-    for(index = 0; index < pool->NB; index++)
-    {
-        pool->pNXPBuffer[index].pData = M4OSA_NULL;
-        pool->pNXPBuffer[index].pData = (M4OSA_Void*)M4OSA_32bitAlignedMalloc(
-            lSize, VIDEOEDITOR_BUFFER_EXTERNAL,
-            (M4OSA_Char*)("BUFFER_initPoolBuffers: Buffer data"));
-        if(M4OSA_NULL == pool->pNXPBuffer[index].pData)
-        {
-            for (j = 0; j < index; j++)
-            {
-                if(M4OSA_NULL != pool->pNXPBuffer[j].pData)
-                {
-                    free(pool->pNXPBuffer[j].pData);
-                    pool->pNXPBuffer[j].pData = M4OSA_NULL;
-                }
-            }
-            err = M4ERR_ALLOC;
-            return err;
-        }
-        pool->pNXPBuffer[index].size = 0;
-        pool->pNXPBuffer[index].state = VIDEOEDITOR_BUFFER_kEmpty;
-        pool->pNXPBuffer[index].idx = index;
-        pool->pNXPBuffer[index].buffCTS = -1;
-    }
-    return err;
-}
-
-M4OSA_ERR VIDEOEDITOR_BUFFER_getOldestBuffer(VIDEOEDITOR_BUFFER_Pool *pool,
-        VIDEOEDITOR_BUFFER_State desiredState,
-        VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
-{
-    M4OSA_ERR     err = M4NO_ERROR;
-    M4OSA_UInt32  index, j;
-    M4_MediaTime  candidateTimeStamp = (M4_MediaTime)0x7ffffff;
-    M4OSA_Bool    bFound = M4OSA_FALSE;
-
-    *pNXPBuffer = M4OSA_NULL;
-    for(index = 0; index< pool->NB; index++)
-    {
-        if(pool->pNXPBuffer[index].state == desiredState)
-        {
-            if(pool->pNXPBuffer[index].buffCTS <= candidateTimeStamp)
-            {
-                bFound = M4OSA_TRUE;
-                candidateTimeStamp = pool->pNXPBuffer[index].buffCTS;
-                    *pNXPBuffer = &(pool->pNXPBuffer[index]);
-            }
-        }
-    }
-    if(M4OSA_FALSE == bFound)
-    {
-        ALOGV("VIDEOEDITOR_BUFFER_getOldestBuffer WARNING no buffer available");
-        err = M4ERR_NO_BUFFER_AVAILABLE;
-    }
-    return err;
-}
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorMp3Reader.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorMp3Reader.cpp
deleted file mode 100755
index 2e0d05d..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorMp3Reader.cpp
+++ /dev/null
@@ -1,803 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorMp3Reader.cpp
-* @brief  StageFright shell MP3 Reader
-*************************************************************************
-*/
-#define LOG_NDEBUG 1
-#define LOG_TAG "VIDEOEDITOR_MP3READER"
-
-/**
- * HEADERS
- *
- */
-#include "M4OSA_Debug.h"
-#include "M4SYS_AccessUnit.h"
-#include "VideoEditorMp3Reader.h"
-#include "VideoEditorUtils.h"
-
-#include "utils/Log.h"
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-
-/**
- * SOURCE CLASS
- */
-
-namespace android {
-/**
- * ENGINE INTERFACE
- */
-
-/**
- **************************************************************************
- * structure VideoEditorMp3Reader_Context
- * @brief    This structure defines the context of the SF MP3 reader shell.
- **************************************************************************
- */
-typedef struct {
-    sp<DataSource>              mDataSource;
-    sp<MediaExtractor>          mExtractor;
-    sp<MediaSource>             mMediaSource;
-    M4_AudioStreamHandler*      mAudioStreamHandler;
-    M4SYS_AccessUnit            mAudioAu;
-    M4OSA_Time                  mMaxDuration;
-    M4OSA_UInt8                 mStreamNumber;
-    M4OSA_Bool                  mSeeking;
-    M4OSA_Time                  mSeekTime;
-    uint32_t                    mExtractorFlags;
-} VideoEditorMp3Reader_Context;
-
-/**
- ****************************************************************************
- * @brief    create an instance of the MP3 reader
- * @note     allocates the context
- *
- * @param    pContext:        (OUT)    pointer on a reader context
- *
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_ALLOC                a memory allocation has failed
- * @return    M4ERR_PARAMETER            at least one parameter is not valid
- ****************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_create(M4OSA_Context *pContext) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorMp3Reader_Context *pReaderContext = M4OSA_NULL;
-
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    ALOGV("VideoEditorMp3Reader_create begin");
-
-    /* Context allocation & initialization */
-    SAFE_MALLOC(pReaderContext, VideoEditorMp3Reader_Context, 1,
-        "VideoEditorMp3Reader");
-
-    pReaderContext->mAudioStreamHandler  = M4OSA_NULL;
-    pReaderContext->mAudioAu.dataAddress = M4OSA_NULL;
-    pReaderContext->mMaxDuration = 0;
-    *pContext = pReaderContext;
-
-cleanUp:
-    if (M4NO_ERROR == err) {
-        ALOGV("VideoEditorMp3Reader_create no error");
-    } else {
-        ALOGV("VideoEditorMp3Reader_create ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorMp3Reader_create end");
-    return err;
-}
-
-/**
- *******************************************************************************
- * @brief     destroy the instance of the MP3 reader
- * @note      after this call the context is invalid
- * @param     context:        (IN)    Context of the reader
- * @return    M4NO_ERROR                 there is no error
- * @return    M4ERR_PARAMETER            The input parameter is not properly set
- *******************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_destroy(M4OSA_Context pContext) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorMp3Reader_Context *pReaderContext =
-        (VideoEditorMp3Reader_Context*)pContext;
-
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pReaderContext, M4ERR_PARAMETER);
-    ALOGV("VideoEditorMp3Reader_destroy begin");
-
-    SAFE_FREE(pReaderContext);
-cleanUp:
-    if (M4NO_ERROR == err) {
-        ALOGV("VideoEditorMp3Reader_destroy no error");
-    } else {
-        ALOGV("VideoEditorMp3Reader_destroy ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorMp3Reader_destroy end");
-    return err;
-}
-/**
- ******************************************************************************
- * @brief    open the reader and initializes its created instance
- * @note    this function opens the MP3 file
- * @param    context:            (IN)    Context of the reader
- * @param    pFileDescriptor:    (IN)    Pointer to proprietary data identifying
- *                                       the media to open
-
- * @return    M4NO_ERROR                     there is no error
- * @return    M4ERR_PARAMETER                the context is NULL
- * @return    M4ERR_BAD_CONTEXT              provided context is not a valid one
- * @return    M4ERR_UNSUPPORTED_MEDIA_TYPE   the media is DRM protected
- ******************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_open(M4OSA_Context context,
-        M4OSA_Void* pFileDescriptor){
-    VideoEditorMp3Reader_Context *pReaderContext =
-    (VideoEditorMp3Reader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    ALOGV("VideoEditorMp3Reader_open begin");
-    /* Check function parameters*/
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext),  M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_open: invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_open: invalid pointer pFileDescriptor");
-
-    ALOGV("VideoEditorMp3Reader_open Datasource start %s",
-        (char*)pFileDescriptor);
-    pReaderContext->mDataSource = new FileSource ((char*)pFileDescriptor);
-    ALOGV("VideoEditorMp3Reader_open Datasource end");
-
-    if (pReaderContext->mDataSource == NULL) {
-        ALOGV("VideoEditorMp3Reader_open Datasource error");
-        return UNKNOWN_ERROR;
-    }
-
-    ALOGV("VideoEditorMp3Reader_open extractor start");
-    pReaderContext->mExtractor = MediaExtractor::Create(
-        pReaderContext->mDataSource,MEDIA_MIMETYPE_AUDIO_MPEG);
-    ALOGV("VideoEditorMp3Reader_open extractor end");
-
-    if (pReaderContext->mExtractor == NULL)    {
-        ALOGV("VideoEditorMp3Reader_open extractor error");
-        return UNKNOWN_ERROR;
-    }
-    pReaderContext->mStreamNumber = 0;
-
-    int32_t isDRMProtected = 0;
-    sp<MetaData> meta = pReaderContext->mExtractor->getMetaData();
-    meta->findInt32(kKeyIsDRM, &isDRMProtected);
-    if (isDRMProtected) {
-        ALOGV("VideoEditorMp3Reader_open error - DRM Protected");
-        return M4ERR_UNSUPPORTED_MEDIA_TYPE;
-    }
-
-    ALOGV("VideoEditorMp3Reader_open end");
-    return err;
-}
-/**
- **************************************************************************
- * @brief    close the reader
- * @note    this function closes the MP3 reader
- * @param    context:        (IN)      Context of the reader
- * @return    M4NO_ERROR               there is no error
- * @return    M4ERR_PARAMETER          the context is NULL
- **************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_close(M4OSA_Context context) {
-    VideoEditorMp3Reader_Context *pReaderContext =
-        (VideoEditorMp3Reader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    ALOGV("VideoEditorMp3Reader_close begin");
-    /* Check function parameters */
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext), M4ERR_PARAMETER,
-            "VideoEditorMp3Reader_close: invalid context pointer");
-
-    if (pReaderContext->mAudioStreamHandler != NULL) {
-        if (M4OSA_NULL != pReaderContext->mAudioStreamHandler->\
-        m_basicProperties.m_pDecoderSpecificInfo) {
-            free(pReaderContext->mAudioStreamHandler->\
-                m_basicProperties.m_pDecoderSpecificInfo);
-            pReaderContext->mAudioStreamHandler->m_basicProperties.\
-                m_decoderSpecificInfoSize = 0;
-            pReaderContext->mAudioStreamHandler->m_basicProperties.\
-                m_pDecoderSpecificInfo = M4OSA_NULL;
-        }
-
-        /* Finally destroy the stream handler */
-        free(pReaderContext->mAudioStreamHandler);
-        pReaderContext->mAudioStreamHandler = M4OSA_NULL;
-
-        if (pReaderContext->mAudioAu.dataAddress != NULL) {
-            free(pReaderContext->mAudioAu.dataAddress);
-            pReaderContext->mAudioAu.dataAddress = NULL;
-        }
-    }
-
-    pReaderContext->mMediaSource->stop();
-    pReaderContext->mMediaSource.clear();
-    pReaderContext->mExtractor.clear();
-    pReaderContext->mDataSource.clear();
-
-    ALOGV("VideoEditorMp3Reader_close end ");
-    return err;
-}
-/**
- ******************************************************************************
- * @brief    get an option value from the reader
- * @note
- *          it allows the caller to retrieve a property value:
- *
- * @param    context:        (IN)    Context of the reader
- * @param    optionId:       (IN)    indicates the option to get
- * @param    pValue:         (OUT)   pointer to structure or value (allocated
- *                                   by user) where option is stored
- *
- * @return    M4NO_ERROR             there is no error
- * @return    M4ERR_PARAMETER        at least one parameter is not properly set
- * @return    M4ERR_BAD_OPTION_ID    when the option ID is not a valid one
- ******************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_getOption(M4OSA_Context context,
-          M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
-    VideoEditorMp3Reader_Context *pReaderContext =
-        (VideoEditorMp3Reader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    ALOGV("VideoEditorMp3Reader_getOption begin: optionId= %d ",(int)optionId);
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext), M4ERR_PARAMETER,
-        "invalid value pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
-        "invalid value pointer");
-
-    switch(optionId) {
-    case M4READER_kOptionID_Duration:
-        {
-            ALOGV("Mp3Reader duration=%ld",pReaderContext->mMaxDuration);
-            *(M4OSA_Time*)pValue = pReaderContext->mMaxDuration;
-        }
-        break;
-
-    case M4READER_kOptionID_Bitrate:
-        {
-            M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
-            if (M4OSA_NULL != pReaderContext->mAudioStreamHandler) {
-                *pBitrate = pReaderContext->mAudioStreamHandler->\
-                    m_basicProperties.m_averageBitRate;
-            } else {
-                pBitrate = 0;
-                err = M4ERR_PARAMETER;
-            }
-        }
-        break;
-
-    case M4READER_kOptionID_Mp3Id3v1Tag:
-        break;
-
-    case M4READER_kOptionID_Mp3Id3v2Tag:
-        break;
-
-    case M4READER_kOptionID_GetMetadata:
-        break;
-
-    default :
-        {
-            ALOGV("VideoEditorMp3Reader_getOption:  M4ERR_BAD_OPTION_ID");
-            err = M4ERR_BAD_OPTION_ID;
-        }
-    }
-    ALOGV("VideoEditorMp3Reader_getOption end ");
-    return err;
-}
-/**
- ******************************************************************************
- * @brief   set an option value of the reader
- * @note
- *          it allows the caller to set a property value:
- *
- * @param   context:    (IN)        Context of the reader
- * @param   optionId:   (IN)        Identifier indicating the option to set
- * @param   pValue:     (IN)        Pointer to structure or value (allocated
- *                                  by user) where option is stored
- *
- * @return  M4NO_ERROR              There is no error
- * @return  M4ERR_BAD_OPTION_ID     The option ID is not a valid one
- * @return  M4ERR_STATE             State automaton is not applied
- * @return  M4ERR_PARAMETER         The option parameter is invalid
- ******************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_setOption(M4OSA_Context context,
-        M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
-    VideoEditorMp3Reader_Context *pReaderContext =
-        (VideoEditorMp3Reader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-
-    ALOGV("VideoEditorMp3Reader_Context begin: optionId: %u Value: %p ",
-          optionId, pValue);
-
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext), M4ERR_PARAMETER,
-        "invalid context pointer");
-    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
-        "invalid value pointer");
-
-    switch(optionId) {
-        case M4READER_kOptionID_SetOsaFileReaderFctsPtr:
-        default :
-        {
-            err = M4NO_ERROR;
-        }
-    }
-    ALOGV("VideoEditorMp3Reader_Context end ");
-    return err;
-}
-/**
- ******************************************************************************
- * @brief    jump into the stream at the specified time
- * @note
- * @param    context:      (IN)   Context of the reader
- * @param    pStreamHandler(IN)   stream description of the stream to make jump
- * @param    pTime         (I/O)IN:the time to jump to (in ms)
- *                              OUT: the time to which the stream really jumped
- * @return    M4NO_ERROR           there is no error
- * @return    M4ERR_PARAMETER      at least one parameter is not properly set
- ******************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_jump(M4OSA_Context context,
-        M4_StreamHandler *pStreamHandler, M4OSA_Int32* pTime) {
-    VideoEditorMp3Reader_Context *pReaderContext =
-        (VideoEditorMp3Reader_Context*)context;
-    M4SYS_StreamID streamIdArray[2];
-    M4OSA_ERR err = M4NO_ERROR;
-    M4SYS_AccessUnit* pAu;
-    M4OSA_Time time64 = (M4OSA_Time)*pTime;
-
-    ALOGV("VideoEditorMp3Reader_jump begin");
-    M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_jump: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_jump: invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_jump: invalid time pointer");
-
-    if(pStreamHandler == (M4_StreamHandler*)pReaderContext->\
-        mAudioStreamHandler){
-        pAu = &pReaderContext->mAudioAu;
-    } else {
-        ALOGV("VideoEditorMp3Reader_jump: passed StreamHandler is not known");
-        return M4ERR_PARAMETER;
-    }
-
-    streamIdArray[0] = pStreamHandler->m_streamId;
-    streamIdArray[1] = 0;
-
-    ALOGV("VideoEditorMp3Reader_jump time ms %ld ", time64);
-
-    pAu->CTS = time64;
-    pAu->DTS = time64;
-
-    time64 = time64 * 1000; /* Convert the time into micro sec */
-    ALOGV("VideoEditorMp3Reader_jump time us %ld ", time64);
-
-    pReaderContext->mSeeking = M4OSA_TRUE;
-    pReaderContext->mSeekTime = time64;
-
-    time64 = time64 / 1000; /* Convert the time into milli sec */
-    *pTime = (M4OSA_Int32)time64;
-    ALOGV("VideoEditorMp3Reader_jump end ");
-    return err;
-}
-/**
- *******************************************************************************
- * @brief   Get the next stream found in the media file
- *
- * @param    context:        (IN)  Context of the reader
- * @param    pMediaFamily:   (OUT) pointer to a user allocated
- *                                 M4READER_MediaFamily that will be filled with
- *                                 the media family of the found stream
- * @param    pStreamHandler: (OUT) pointer to a stream handler that will be
- *                                 allocated and filled with stream description
- *
- * @return    M4NO_ERROR             there is no error
- * @return    M4WAR_NO_MORE_STREAM   no more available stream in the media
- * @return    M4ERR_PARAMETER        at least one parameter is not properly set
- *******************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_getNextStream(M4OSA_Context context,
-        M4READER_MediaFamily *pMediaFamily,
-        M4_StreamHandler **pStreamHandlerParam) {
-    VideoEditorMp3Reader_Context *pReaderContext =
-        (VideoEditorMp3Reader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4SYS_StreamID streamIdArray[2];
-    M4SYS_StreamDescription streamDesc;
-    M4_AudioStreamHandler* pAudioStreamHandler;
-    M4_StreamHandler* pStreamHandler;
-    M4OSA_UInt8 type, temp;
-    M4OSA_Bool haveAudio = M4OSA_FALSE;
-    sp<MetaData> meta = NULL;
-    int64_t Duration;
-
-    ALOGV("VideoEditorMp3Reader_getNextStream begin");
-    M4OSA_DEBUG_IF1((pReaderContext == 0),      M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_getNextStream: invalid context");
-    M4OSA_DEBUG_IF1((pMediaFamily == 0),        M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_getNextStream: invalid pointer to MediaFamily");
-    M4OSA_DEBUG_IF1((pStreamHandlerParam == 0), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_getNextStream: invalid pointer to StreamHandler");
-
-    ALOGV("VideoEditorMp3Reader_getNextStream stream number = %d",
-        pReaderContext->mStreamNumber);
-    if (pReaderContext->mStreamNumber >= 1) {
-        ALOGV("VideoEditorMp3Reader_getNextStream max number of stream reached");
-        return M4WAR_NO_MORE_STREAM;
-    }
-    pReaderContext->mStreamNumber = pReaderContext->mStreamNumber + 1;
-    ALOGV("VideoEditorMp3Reader_getNextStream number of Tracks%d",
-        pReaderContext->mExtractor->countTracks());
-    for (temp = 0; temp < pReaderContext->mExtractor->countTracks(); temp++) {
-        meta = pReaderContext->mExtractor->getTrackMetaData(temp);
-        const char *mime;
-        CHECK(meta->findCString(kKeyMIMEType, &mime));
-
-        if (!haveAudio && !strncasecmp(mime, "audio/", 6)) {
-            pReaderContext->mMediaSource =
-                pReaderContext->mExtractor->getTrack(temp);
-            pReaderContext->mMediaSource->start();
-            haveAudio = true;
-        }
-
-        if (haveAudio) {
-            break;
-        }
-    }
-
-    if (!haveAudio) {
-        ALOGV("VideoEditorMp3Reader_getNextStream no more stream ");
-        pReaderContext->mDataSource.clear();
-        return M4WAR_NO_MORE_STREAM;
-    }
-
-    pReaderContext->mExtractorFlags = pReaderContext->mExtractor->flags();
-    *pMediaFamily = M4READER_kMediaFamilyAudio;
-
-    streamDesc.duration = meta->findInt64(kKeyDuration, &Duration);
-    streamDesc.duration = (M4OSA_Time)Duration/1000;
-
-    meta->findInt32(kKeyBitRate, (int32_t*)&streamDesc.averageBitrate);
-    meta->findInt32(kKeySampleRate, (int32_t*)&streamDesc.timeScale);
-    ALOGV("Bitrate = %d, SampleRate = %d duration = %lld",
-        streamDesc.averageBitrate,streamDesc.timeScale,Duration/1000);
-
-    streamDesc.streamType = M4SYS_kMP3;
-    streamDesc.profileLevel = 0xFF ;
-    streamDesc.streamID = pReaderContext->mStreamNumber;
-    streamDesc.decoderSpecificInfo = M4OSA_NULL;
-    streamDesc.decoderSpecificInfoSize = 0;
-    streamDesc.maxBitrate = streamDesc.averageBitrate;
-
-    /*    Allocate the audio stream handler and set its parameters    */
-    pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_32bitAlignedMalloc(
-        sizeof(M4_AudioStreamHandler), M4READER_MP3,
-        (M4OSA_Char*)"M4_AudioStreamHandler");
-
-    if (pAudioStreamHandler == M4OSA_NULL) {
-        ALOGV("VideoEditorMp3Reader_getNextStream malloc failed");
-        pReaderContext->mMediaSource->stop();
-        pReaderContext->mMediaSource.clear();
-        pReaderContext->mDataSource.clear();
-
-        return M4ERR_ALLOC;
-    }
-    pStreamHandler =(M4_StreamHandler*)(pAudioStreamHandler);
-    *pStreamHandlerParam = pStreamHandler;
-    pReaderContext->mAudioStreamHandler = pAudioStreamHandler;
-
-    pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
-
-    if (meta == NULL) {
-        ALOGV("VideoEditorMp3Reader_getNextStream meta is NULL");
-    }
-
-    pAudioStreamHandler->m_samplingFrequency = streamDesc.timeScale;
-    pStreamHandler->m_pDecoderSpecificInfo =
-        (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
-    pStreamHandler->m_decoderSpecificInfoSize =
-        streamDesc.decoderSpecificInfoSize;
-
-    meta->findInt32(kKeyChannelCount,
-        (int32_t*)&pAudioStreamHandler->m_nbChannels);
-    pAudioStreamHandler->m_byteFrameLength = 1152;
-    pAudioStreamHandler->m_byteSampleSize = 2;
-
-    pStreamHandler->m_pUserData = NULL;
-    pStreamHandler->m_streamId = streamDesc.streamID;
-    pStreamHandler->m_duration = streamDesc.duration;
-    pReaderContext->mMaxDuration = streamDesc.duration;
-    pStreamHandler->m_averageBitRate = streamDesc.averageBitrate;
-
-    pStreamHandler->m_maxAUSize = 0;
-    pStreamHandler->m_streamType = M4DA_StreamTypeAudioMp3;
-
-    ALOGV("VideoEditorMp3Reader_getNextStream end ");
-    return err;
-}
-
-/**
- *******************************************************************************
- * @brief    fill the access unit structure with initialization values
- * @param    context:        (IN)     Context of the reader
- * @param    pStreamHandler: (IN)     pointer to the stream handler to which
- *                                    the access unit will be associated
- * @param    pAccessUnit:    (IN/OUT) pointer to the access unit (allocated by
- *                                    the caller) to initialize
- * @return   M4NO_ERROR               there is no error
- * @return   M4ERR_PARAMETER          at least one parameter is not properly set
- *******************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_fillAuStruct(M4OSA_Context context,
-        M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
-    VideoEditorMp3Reader_Context *pReaderContext =
-        (VideoEditorMp3Reader_Context*)context;
-    M4SYS_AccessUnit *pAu;
-
-    M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_fillAuStruct: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_fillAuStruct invalid pointer to StreamHandler");
-    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_fillAuStruct: invalid pointer to M4_AccessUnit");
-
-    ALOGV("VideoEditorMp3Reader_fillAuStruct start ");
-    if(pStreamHandler == (M4_StreamHandler*)pReaderContext->\
-        mAudioStreamHandler){
-        pAu = &pReaderContext->mAudioAu;
-    } else {
-        ALOGV("VideoEditorMp3Reader_fillAuStruct StreamHandler is not known");
-        return M4ERR_PARAMETER;
-    }
-
-    /* Initialize pAu structure */
-    pAu->dataAddress = M4OSA_NULL;
-    pAu->size        = 0;
-    pAu->CTS         = 0;
-    pAu->DTS         = 0;
-    pAu->attribute   = 0;
-    pAu->nbFrag      = 0;
-
-    /* Initialize pAccessUnit structure */
-    pAccessUnit->m_size         = 0;
-    pAccessUnit->m_CTS          = 0;
-    pAccessUnit->m_DTS          = 0;
-    pAccessUnit->m_attribute    = 0;
-    pAccessUnit->m_dataAddress  = M4OSA_NULL;
-    pAccessUnit->m_maxsize      = pStreamHandler->m_maxAUSize;
-    pAccessUnit->m_streamID     = pStreamHandler->m_streamId;
-    pAccessUnit->m_structSize   = sizeof(M4_AccessUnit);
-
-    ALOGV("VideoEditorMp3Reader_fillAuStruct end");
-    return M4NO_ERROR;
-}
-
-/**
- *******************************************************************************
- * @brief    reset the stream, i.e seek it to the beginning
- * @note
- * @param     context:          (IN)  Context of the reader
- * @param     pStreamHandler    (IN)  The stream handler of the stream to reset
- * @return    M4NO_ERROR              there is no error
- * @return    M4ERR_PARAMETER         at least one parameter is not properly set
- *******************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_reset(M4OSA_Context context,
-        M4_StreamHandler *pStreamHandler) {
-    VideoEditorMp3Reader_Context *pReaderContext =
-        (VideoEditorMp3Reader_Context*)context;
-
-    M4OSA_ERR err = M4NO_ERROR;
-    M4SYS_StreamID streamIdArray[2];
-    M4SYS_AccessUnit* pAu;
-    M4OSA_Time time64 = 0;
-
-    ALOGV("VideoEditorMp3Reader_reset start");
-    M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_reset: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_reset: invalid pointer to M4_StreamHandler");
-
-    if (pStreamHandler == (M4_StreamHandler*)pReaderContext->\
-        mAudioStreamHandler) {
-        pAu = &pReaderContext->mAudioAu;
-    } else {
-        ALOGV("VideoEditorMp3Reader_reset StreamHandler is not known");
-        return M4ERR_PARAMETER;
-    }
-    streamIdArray[0] = pStreamHandler->m_streamId;
-    streamIdArray[1] = 0;
-    pAu->CTS = time64;
-    pAu->DTS = time64;
-
-    pReaderContext->mSeeking = M4OSA_TRUE;
-    pReaderContext->mSeekTime = time64;
-
-    ALOGV("VideoEditorMp3Reader_reset end");
-    return err;
-}
-/**
- *******************************************************************************
- * @brief   Gets an access unit (AU) from the stream handler source.
- * @note    AU is the smallest possible amount of data to be decoded by decoder
- *
- * @param   context:       (IN) Context of the reader
- * @param   pStreamHandler (IN) The stream handler of the stream to make jump
- * @param   pAccessUnit    (I/O)Pointer to an access unit to fill with read data
- * @return    M4NO_ERROR        there is no error
- * @return    M4ERR_PARAMETER   at least one parameter is not properly set
- * @returns   M4ERR_ALLOC       memory allocation failed
- * @returns   M4WAR_NO_MORE_AU  there are no more access unit in the stream
- *******************************************************************************
-*/
-M4OSA_ERR VideoEditorMp3Reader_getNextAu(M4OSA_Context context,
-        M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
-    VideoEditorMp3Reader_Context *pReaderContext =
-        (VideoEditorMp3Reader_Context*)context;
-    M4OSA_ERR err = M4NO_ERROR;
-    M4SYS_AccessUnit* pAu;
-    MediaBuffer *mAudioBuffer;
-    MediaSource::ReadOptions options;
-
-    ALOGV("VideoEditorMp3Reader_getNextAu start");
-    M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_getNextAu: invalid context");
-    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_getNextAu: invalid pointer to M4_StreamHandler");
-    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
-        "VideoEditorMp3Reader_getNextAu: invalid pointer to M4_AccessUnit");
-
-    if (pStreamHandler == (M4_StreamHandler*)pReaderContext->\
-        mAudioStreamHandler) {
-        pAu = &pReaderContext->mAudioAu;
-    } else {
-        ALOGV("VideoEditorMp3Reader_getNextAu: StreamHandler is not known\n");
-        return M4ERR_PARAMETER;
-    }
-
-    if (pReaderContext->mSeeking) {
-        options.setSeekTo(pReaderContext->mSeekTime);
-    }
-
-    pReaderContext->mMediaSource->read(&mAudioBuffer, &options);
-
-    if (mAudioBuffer != NULL) {
-        if ((pAu->dataAddress == NULL) ||
-            (pAu->size < mAudioBuffer->range_length())) {
-            if (pAu->dataAddress != NULL) {
-                free((M4OSA_Int32*)pAu->dataAddress);
-                pAu->dataAddress = NULL;
-            }
-            pAu->dataAddress = (M4OSA_Int32*)M4OSA_32bitAlignedMalloc(
-                (mAudioBuffer->range_length() + 3) & ~0x3,
-                M4READER_MP3, (M4OSA_Char*)"pAccessUnit->m_dataAddress" );
-
-            if (pAu->dataAddress == NULL) {
-                ALOGV("VideoEditorMp3Reader_getNextAu malloc failed");
-                pReaderContext->mMediaSource->stop();
-                pReaderContext->mMediaSource.clear();
-                pReaderContext->mDataSource.clear();
-
-                return M4ERR_ALLOC;
-            }
-        }
-        pAu->size = mAudioBuffer->range_length();
-        memcpy((M4OSA_MemAddr8)pAu->dataAddress,
-            (const char *)mAudioBuffer->data() + mAudioBuffer->range_offset(),
-            mAudioBuffer->range_length());
-
-        mAudioBuffer->meta_data()->findInt64(kKeyTime, (int64_t*)&pAu->CTS);
-
-
-        pAu->CTS = pAu->CTS / 1000; /*converting the microsec to millisec */
-        pAu->DTS  = pAu->CTS;
-        pAu->attribute = M4SYS_kFragAttrOk;
-        mAudioBuffer->release();
-
-        ALOGV("VideoEditorMp3Reader_getNextAu AU CTS = %ld",pAu->CTS);
-
-        pAccessUnit->m_dataAddress = (M4OSA_Int8*) pAu->dataAddress;
-        pAccessUnit->m_size = pAu->size;
-        pAccessUnit->m_CTS = pAu->CTS;
-        pAccessUnit->m_DTS = pAu->DTS;
-        pAccessUnit->m_attribute = pAu->attribute;
-    } else {
-        ALOGV("VideoEditorMp3Reader_getNextAu EOS reached.");
-        pAccessUnit->m_size=0;
-        err = M4WAR_NO_MORE_AU;
-    }
-    pAu->nbFrag = 0;
-
-    options.clearSeekTo();
-    pReaderContext->mSeeking = M4OSA_FALSE;
-    mAudioBuffer = NULL;
-    ALOGV("VideoEditorMp3Reader_getNextAu end");
-
-    return err;
-}
-
-extern "C" {
-
-M4OSA_ERR VideoEditorMp3Reader_getInterface(
-        M4READER_MediaType *pMediaType,
-        M4READER_GlobalInterface **pRdrGlobalInterface,
-        M4READER_DataInterface **pRdrDataInterface) {
-    M4OSA_ERR err = M4NO_ERROR;
-
-    ALOGV("VideoEditorMp3Reader_getInterface: begin");
-    /* Input parameters check */
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pMediaType,      M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrGlobalInterface, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrDataInterface, M4ERR_PARAMETER);
-
-    SAFE_MALLOC(*pRdrGlobalInterface, M4READER_GlobalInterface, 1,
-        "VideoEditorMp3Reader_getInterface");
-    SAFE_MALLOC(*pRdrDataInterface, M4READER_DataInterface, 1,
-        "VideoEditorMp3Reader_getInterface");
-
-    *pMediaType = M4READER_kMediaTypeMP3;
-
-    (*pRdrGlobalInterface)->m_pFctCreate       = VideoEditorMp3Reader_create;
-    (*pRdrGlobalInterface)->m_pFctDestroy      = VideoEditorMp3Reader_destroy;
-    (*pRdrGlobalInterface)->m_pFctOpen         = VideoEditorMp3Reader_open;
-    (*pRdrGlobalInterface)->m_pFctClose        = VideoEditorMp3Reader_close;
-    (*pRdrGlobalInterface)->m_pFctGetOption    = VideoEditorMp3Reader_getOption;
-    (*pRdrGlobalInterface)->m_pFctSetOption    = VideoEditorMp3Reader_setOption;
-    (*pRdrGlobalInterface)->m_pFctGetNextStream =
-        VideoEditorMp3Reader_getNextStream;
-    (*pRdrGlobalInterface)->m_pFctFillAuStruct =
-        VideoEditorMp3Reader_fillAuStruct;
-    (*pRdrGlobalInterface)->m_pFctStart        = M4OSA_NULL;
-    (*pRdrGlobalInterface)->m_pFctStop         = M4OSA_NULL;
-    (*pRdrGlobalInterface)->m_pFctJump         = VideoEditorMp3Reader_jump;
-    (*pRdrGlobalInterface)->m_pFctReset        = VideoEditorMp3Reader_reset;
-    (*pRdrGlobalInterface)->m_pFctGetPrevRapTime = M4OSA_NULL;
-
-    (*pRdrDataInterface)->m_pFctGetNextAu      = VideoEditorMp3Reader_getNextAu;
-    (*pRdrDataInterface)->m_readerContext      = M4OSA_NULL;
-
-cleanUp:
-    if( M4NO_ERROR == err )
-    {
-        ALOGV("VideoEditorMp3Reader_getInterface no error");
-    }
-    else
-    {
-        SAFE_FREE(*pRdrGlobalInterface);
-        SAFE_FREE(*pRdrDataInterface);
-
-        ALOGV("VideoEditorMp3Reader_getInterface ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorMp3Reader_getInterface: end");
-    return err;
-}
-}  /* extern "C" */
-}  /* namespace android */
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp
deleted file mode 100755
index d264a2e..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
-*************************************************************************
-* @file   VideoEditorUtils.cpp
-* @brief  StageFright shell Utilities
-*************************************************************************
-*/
-#define LOG_NDEBUG 0
-#define LOG_TAG "SF_utils"
-#include "utils/Log.h"
-
-#include "VideoEditorUtils.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXCodec.h>
-
-/* Android includes*/
-#include <utils/Log.h>
-#include <memory.h>
-
-/*---------------------*/
-/*  DEBUG LEVEL SETUP  */
-/*---------------------*/
-#define LOG1 ALOGE    /*ERRORS Logging*/
-#define LOG2 ALOGI    /*WARNING Logging*/
-#define LOG3 //ALOGV  /*COMMENTS Logging*/
-
-namespace android {
-
-void displayMetaData(const sp<MetaData> meta) {
-
-    const char* charData;
-    int32_t int32Data;
-    int64_t int64Data;
-    uint32_t type;
-    const void* data;
-    void* ptr;
-    size_t size;
-
-    if (meta->findCString(kKeyMIMEType, &charData)) {
-        LOG1("displayMetaData kKeyMIMEType %s", charData);
-    }
-    if (meta->findInt32(kKeyWidth, &int32Data)) {
-        LOG1("displayMetaData kKeyWidth %d", int32Data);
-    }
-    if (meta->findInt32(kKeyHeight, &int32Data)) {
-        LOG1("displayMetaData kKeyHeight %d", int32Data);
-    }
-    if (meta->findInt32(kKeyIFramesInterval, &int32Data)) {
-        LOG1("displayMetaData kKeyIFramesInterval %d", int32Data);
-    }
-    if (meta->findInt32(kKeyStride, &int32Data)) {
-        LOG1("displayMetaData kKeyStride %d", int32Data);
-    }
-    if (meta->findInt32(kKeySliceHeight, &int32Data)) {
-        LOG1("displayMetaData kKeySliceHeight %d", int32Data);
-    }
-    if (meta->findInt32(kKeyChannelCount, &int32Data)) {
-        LOG1("displayMetaData kKeyChannelCount %d", int32Data);
-    }
-    if (meta->findInt32(kKeySampleRate, &int32Data)) {
-        LOG1("displayMetaData kKeySampleRate %d", int32Data);
-    }
-    if (meta->findInt32(kKeyBitRate, &int32Data)) {
-        LOG1("displayMetaData kKeyBitRate %d", int32Data);
-    }
-    if (meta->findData(kKeyESDS, &type, &data, &size)) {
-        LOG1("displayMetaData kKeyESDS type=%d size=%zu", type, size);
-    }
-    if (meta->findData(kKeyAVCC, &type, &data, &size)) {
-        LOG1("displayMetaData kKeyAVCC data=0x%X type=%d size=%zu",
-            *((unsigned int*)data), type, size);
-    }
-    if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) {
-        LOG1("displayMetaData kKeyVorbisInfo type=%d size=%zu", type, size);
-    }
-    if (meta->findData(kKeyVorbisBooks, &type, &data, &size)) {
-        LOG1("displayMetaData kKeyVorbisBooks type=%d size=%zu", type, size);
-    }
-    if (meta->findInt32(kKeyWantsNALFragments, &int32Data)) {
-        LOG1("displayMetaData kKeyWantsNALFragments %d", int32Data);
-    }
-    if (meta->findInt32(kKeyIsSyncFrame, &int32Data)) {
-        LOG1("displayMetaData kKeyIsSyncFrame %d", int32Data);
-    }
-    if (meta->findInt32(kKeyIsCodecConfig, &int32Data)) {
-        LOG1("displayMetaData kKeyIsCodecConfig %d", int32Data);
-    }
-    if (meta->findInt64(kKeyTime, &int64Data)) {
-        LOG1("displayMetaData kKeyTime %lld", int64Data);
-    }
-    if (meta->findInt32(kKeyDuration, &int32Data)) {
-        LOG1("displayMetaData kKeyDuration %d", int32Data);
-    }
-    if (meta->findInt32(kKeyColorFormat, &int32Data)) {
-        LOG1("displayMetaData kKeyColorFormat %d", int32Data);
-    }
-    if (meta->findPointer(kKeyPlatformPrivate, &ptr)) {
-        LOG1("displayMetaData kKeyPlatformPrivate pointer=%p", ptr);
-    }
-    if (meta->findCString(kKeyDecoderComponent, &charData)) {
-        LOG1("displayMetaData kKeyDecoderComponent %s", charData);
-    }
-    if (meta->findInt32(kKeyBufferID, &int32Data)) {
-        LOG1("displayMetaData kKeyBufferID %d", int32Data);
-    }
-    if (meta->findInt32(kKeyMaxInputSize, &int32Data)) {
-        LOG1("displayMetaData kKeyMaxInputSize %d", int32Data);
-    }
-    if (meta->findInt64(kKeyThumbnailTime, &int64Data)) {
-        LOG1("displayMetaData kKeyThumbnailTime %lld", int64Data);
-    }
-    if (meta->findCString(kKeyAlbum, &charData)) {
-        LOG1("displayMetaData kKeyAlbum %s", charData);
-    }
-    if (meta->findCString(kKeyArtist, &charData)) {
-        LOG1("displayMetaData kKeyArtist %s", charData);
-    }
-    if (meta->findCString(kKeyAlbumArtist, &charData)) {
-        LOG1("displayMetaData kKeyAlbumArtist %s", charData);
-    }
-    if (meta->findCString(kKeyComposer, &charData)) {
-        LOG1("displayMetaData kKeyComposer %s", charData);
-    }
-    if (meta->findCString(kKeyGenre, &charData)) {
-        LOG1("displayMetaData kKeyGenre %s", charData);
-    }
-    if (meta->findCString(kKeyTitle, &charData)) {
-        LOG1("displayMetaData kKeyTitle %s", charData);
-    }
-    if (meta->findCString(kKeyYear, &charData)) {
-        LOG1("displayMetaData kKeyYear %s", charData);
-    }
-    if (meta->findData(kKeyAlbumArt, &type, &data, &size)) {
-        LOG1("displayMetaData kKeyAlbumArt type=%d size=%zu", type, size);
-    }
-    if (meta->findCString(kKeyAlbumArtMIME, &charData)) {
-        LOG1("displayMetaData kKeyAlbumArtMIME %s", charData);
-    }
-    if (meta->findCString(kKeyAuthor, &charData)) {
-        LOG1("displayMetaData kKeyAuthor %s", charData);
-    }
-    if (meta->findCString(kKeyCDTrackNumber, &charData)) {
-        LOG1("displayMetaData kKeyCDTrackNumber %s", charData);
-    }
-    if (meta->findCString(kKeyDiscNumber, &charData)) {
-        LOG1("displayMetaData kKeyDiscNumber %s", charData);
-    }
-    if (meta->findCString(kKeyDate, &charData)) {
-        LOG1("displayMetaData kKeyDate %s", charData);
-    }
-    if (meta->findCString(kKeyWriter, &charData)) {
-        LOG1("displayMetaData kKeyWriter %s", charData);
-    }
-    if (meta->findInt32(kKeyTimeScale, &int32Data)) {
-        LOG1("displayMetaData kKeyTimeScale %d", int32Data);
-    }
-    if (meta->findInt32(kKeyVideoProfile, &int32Data)) {
-        LOG1("displayMetaData kKeyVideoProfile %d", int32Data);
-    }
-    if (meta->findInt32(kKeyVideoLevel, &int32Data)) {
-        LOG1("displayMetaData kKeyVideoLevel %d", int32Data);
-    }
-    if (meta->findInt32(kKey64BitFileOffset, &int32Data)) {
-        LOG1("displayMetaData kKey64BitFileOffset %d", int32Data);
-    }
-    if (meta->findInt32(kKeyFileType, &int32Data)) {
-        LOG1("displayMetaData kKeyFileType %d", int32Data);
-    }
-    if (meta->findInt64(kKeyTrackTimeStatus, &int64Data)) {
-        LOG1("displayMetaData kKeyTrackTimeStatus %lld", int64Data);
-    }
-    if (meta->findInt32(kKeyRealTimeRecording, &int32Data)) {
-        LOG1("displayMetaData kKeyRealTimeRecording %d", int32Data);
-    }
-}
-
-/**
- * This code was extracted from StageFright MPEG4 writer
- * Is is used to parse and format the AVC codec specific info received
- * from StageFright encoders
- */
-static const uint8_t kNalUnitTypeSeqParamSet = 0x07;
-static const uint8_t kNalUnitTypePicParamSet = 0x08;
-struct AVCParamSet {
-    AVCParamSet(uint16_t length, const uint8_t *data)
-        : mLength(length), mData(data) {}
-
-    uint16_t mLength;
-    const uint8_t *mData;
-};
-struct AVCCodecSpecificContext {
-    List<AVCParamSet> mSeqParamSets;
-    List<AVCParamSet> mPicParamSets;
-    uint8_t mProfileIdc;
-    uint8_t mProfileCompatible;
-    uint8_t mLevelIdc;
-};
-
-const uint8_t *parseParamSet(AVCCodecSpecificContext* pC,
-        const uint8_t *data, size_t length, int type, size_t *paramSetLen) {
-    CHECK(type == kNalUnitTypeSeqParamSet ||
-          type == kNalUnitTypePicParamSet);
-
-    size_t bytesLeft = length;
-    while (bytesLeft > 4  &&
-            memcmp("\x00\x00\x00\x01", &data[length - bytesLeft], 4)) {
-        --bytesLeft;
-    }
-    if (bytesLeft <= 4) {
-        bytesLeft = 0; // Last parameter set
-    }
-    const uint8_t *nextStartCode = &data[length - bytesLeft];
-    *paramSetLen = nextStartCode - data;
-    if (*paramSetLen == 0) {
-        ALOGE("Param set is malformed, since its length is 0");
-        return NULL;
-    }
-
-    AVCParamSet paramSet(*paramSetLen, data);
-    if (type == kNalUnitTypeSeqParamSet) {
-        if (*paramSetLen < 4) {
-            ALOGE("Seq parameter set malformed");
-            return NULL;
-        }
-        if (pC->mSeqParamSets.empty()) {
-            pC->mProfileIdc = data[1];
-            pC->mProfileCompatible = data[2];
-            pC->mLevelIdc = data[3];
-        } else {
-            if (pC->mProfileIdc != data[1] ||
-                pC->mProfileCompatible != data[2] ||
-                pC->mLevelIdc != data[3]) {
-                ALOGV("Inconsistent profile/level found in seq parameter sets");
-                return NULL;
-            }
-        }
-        pC->mSeqParamSets.push_back(paramSet);
-    } else {
-        pC->mPicParamSets.push_back(paramSet);
-    }
-    return nextStartCode;
-}
-
-status_t buildAVCCodecSpecificData(uint8_t **pOutputData, size_t *pOutputSize,
-        const uint8_t *data, size_t size, MetaData *param)
-{
-    //ALOGV("buildAVCCodecSpecificData");
-
-    if ( (pOutputData == NULL) || (pOutputSize == NULL) ) {
-        ALOGE("output is invalid");
-        return ERROR_MALFORMED;
-    }
-
-    if (*pOutputData != NULL) {
-        ALOGE("Already have codec specific data");
-        return ERROR_MALFORMED;
-    }
-
-    if (size < 4) {
-        ALOGE("Codec specific data length too short: %zu", size);
-        return ERROR_MALFORMED;
-    }
-
-    // Data is in the form of AVCCodecSpecificData
-    if (memcmp("\x00\x00\x00\x01", data, 4)) {
-        // 2 bytes for each of the parameter set length field
-        // plus the 7 bytes for the header
-        if (size < 4 + 7) {
-            ALOGE("Codec specific data length too short: %zu", size);
-            return ERROR_MALFORMED;
-        }
-
-        *pOutputSize = size;
-        *pOutputData = (uint8_t*)malloc(size);
-        memcpy(*pOutputData, data, size);
-        return OK;
-    }
-
-    AVCCodecSpecificContext ctx;
-    uint8_t *outputData = NULL;
-    size_t outputSize = 0;
-
-    // Check if the data is valid
-    uint8_t type = kNalUnitTypeSeqParamSet;
-    bool gotSps = false;
-    bool gotPps = false;
-    const uint8_t *tmp = data;
-    const uint8_t *nextStartCode = data;
-    size_t bytesLeft = size;
-    size_t paramSetLen = 0;
-    outputSize = 0;
-    while (bytesLeft > 4 && !memcmp("\x00\x00\x00\x01", tmp, 4)) {
-        type = (*(tmp + 4)) & 0x1F;
-        if (type == kNalUnitTypeSeqParamSet) {
-            if (gotPps) {
-                ALOGE("SPS must come before PPS");
-                return ERROR_MALFORMED;
-            }
-            if (!gotSps) {
-                gotSps = true;
-            }
-            nextStartCode = parseParamSet(&ctx, tmp + 4, bytesLeft - 4, type,
-                &paramSetLen);
-        } else if (type == kNalUnitTypePicParamSet) {
-            if (!gotSps) {
-                ALOGE("SPS must come before PPS");
-                return ERROR_MALFORMED;
-            }
-            if (!gotPps) {
-                gotPps = true;
-            }
-            nextStartCode = parseParamSet(&ctx, tmp + 4, bytesLeft - 4, type,
-                &paramSetLen);
-        } else {
-            ALOGE("Only SPS and PPS Nal units are expected");
-            return ERROR_MALFORMED;
-        }
-
-        if (nextStartCode == NULL) {
-            return ERROR_MALFORMED;
-        }
-
-        // Move on to find the next parameter set
-        bytesLeft -= nextStartCode - tmp;
-        tmp = nextStartCode;
-        outputSize += (2 + paramSetLen);
-    }
-
-    {
-        // Check on the number of seq parameter sets
-        size_t nSeqParamSets = ctx.mSeqParamSets.size();
-        if (nSeqParamSets == 0) {
-            ALOGE("Cound not find sequence parameter set");
-            return ERROR_MALFORMED;
-        }
-
-        if (nSeqParamSets > 0x1F) {
-            ALOGE("Too many seq parameter sets (%zu) found", nSeqParamSets);
-            return ERROR_MALFORMED;
-        }
-    }
-
-    {
-        // Check on the number of pic parameter sets
-        size_t nPicParamSets = ctx.mPicParamSets.size();
-        if (nPicParamSets == 0) {
-            ALOGE("Cound not find picture parameter set");
-            return ERROR_MALFORMED;
-        }
-        if (nPicParamSets > 0xFF) {
-            ALOGE("Too many pic parameter sets (%zu) found", nPicParamSets);
-            return ERROR_MALFORMED;
-        }
-    }
-
-    // ISO 14496-15: AVC file format
-    outputSize += 7;  // 7 more bytes in the header
-    outputData = (uint8_t *)malloc(outputSize);
-    uint8_t *header = outputData;
-    header[0] = 1;                     // version
-    header[1] = ctx.mProfileIdc;           // profile indication
-    header[2] = ctx.mProfileCompatible;    // profile compatibility
-    header[3] = ctx.mLevelIdc;
-
-    // 6-bit '111111' followed by 2-bit to lengthSizeMinuusOne
-    int32_t use2ByteNalLength = 0;
-    if (param &&
-        param->findInt32(kKey2ByteNalLength, &use2ByteNalLength) &&
-        use2ByteNalLength) {
-        header[4] = 0xfc | 1;  // length size == 2 bytes
-    } else {
-        header[4] = 0xfc | 3;  // length size == 4 bytes
-    }
-
-    // 3-bit '111' followed by 5-bit numSequenceParameterSets
-    int nSequenceParamSets = ctx.mSeqParamSets.size();
-    header[5] = 0xe0 | nSequenceParamSets;
-    header += 6;
-    for (List<AVCParamSet>::iterator it = ctx.mSeqParamSets.begin();
-         it != ctx.mSeqParamSets.end(); ++it) {
-        // 16-bit sequence parameter set length
-        uint16_t seqParamSetLength = it->mLength;
-        header[0] = seqParamSetLength >> 8;
-        header[1] = seqParamSetLength & 0xff;
-        //ALOGE("### SPS %d %d %d", seqParamSetLength, header[0], header[1]);
-
-        // SPS NAL unit (sequence parameter length bytes)
-        memcpy(&header[2], it->mData, seqParamSetLength);
-        header += (2 + seqParamSetLength);
-    }
-
-    // 8-bit nPictureParameterSets
-    int nPictureParamSets = ctx.mPicParamSets.size();
-    header[0] = nPictureParamSets;
-    header += 1;
-    for (List<AVCParamSet>::iterator it = ctx.mPicParamSets.begin();
-         it != ctx.mPicParamSets.end(); ++it) {
-        // 16-bit picture parameter set length
-        uint16_t picParamSetLength = it->mLength;
-        header[0] = picParamSetLength >> 8;
-        header[1] = picParamSetLength & 0xff;
-//ALOGE("### PPS %d %d %d", picParamSetLength, header[0], header[1]);
-
-        // PPS Nal unit (picture parameter set length bytes)
-        memcpy(&header[2], it->mData, picParamSetLength);
-        header += (2 + picParamSetLength);
-    }
-
-    *pOutputSize = outputSize;
-    *pOutputData = outputData;
-    return OK;
-}
-}// namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
deleted file mode 100755
index de91731..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
+++ /dev/null
@@ -1,1784 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorVideoDecoder.cpp
-* @brief  StageFright shell video decoder
-*************************************************************************
-*/
-#define LOG_NDEBUG 1
-#define LOG_TAG "VIDEOEDITOR_VIDEODECODER"
-/*******************
- *     HEADERS     *
- *******************/
-
-#include "VideoEditorVideoDecoder_internal.h"
-#include "VideoEditorUtils.h"
-#include "M4VD_Tools.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaDefs.h>
-/********************
- *   DEFINITIONS    *
- ********************/
-#define MAX_DEC_BUFFERS 10
-
-/********************
- *   SOURCE CLASS   *
- ********************/
-using namespace android;
-static M4OSA_ERR copyBufferToQueue(
-    VideoEditorVideoDecoder_Context* pDecShellContext,
-    MediaBuffer* pDecodedBuffer);
-
-class VideoEditorVideoDecoderSource : public MediaSource {
-    public:
-
-        VideoEditorVideoDecoderSource(
-            const sp<MetaData> &format,
-            VIDEOEDITOR_CodecType codecType,
-            void *decoderShellContext);
-
-        virtual status_t start(MetaData *params = NULL);
-        virtual status_t stop();
-        virtual sp<MetaData> getFormat();
-        virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-    protected :
-        virtual ~VideoEditorVideoDecoderSource();
-
-    private:
-        sp<MetaData> mFormat;
-        MediaBuffer* mBuffer;
-        MediaBufferGroup* mGroup;
-        Mutex mLock;
-        VideoEditorVideoDecoder_Context* mpDecShellContext;
-        int32_t mMaxAUSize;
-        bool mStarted;
-        VIDEOEDITOR_CodecType mCodecType;
-
-        // Don't call me
-        VideoEditorVideoDecoderSource(const VideoEditorVideoDecoderSource &);
-        VideoEditorVideoDecoderSource &operator=(
-            const VideoEditorVideoDecoderSource &);
-};
-
-VideoEditorVideoDecoderSource::VideoEditorVideoDecoderSource(
-        const sp<MetaData> &format, VIDEOEDITOR_CodecType codecType,
-        void *decoderShellContext) :
-        mFormat(format),
-        mBuffer(NULL),
-        mGroup(NULL),
-        mStarted(false),
-        mCodecType(codecType) {
-    mpDecShellContext = (VideoEditorVideoDecoder_Context*) decoderShellContext;
-}
-
-VideoEditorVideoDecoderSource::~VideoEditorVideoDecoderSource() {
-    if (mStarted == true) {
-        stop();
-    }
-}
-
-status_t VideoEditorVideoDecoderSource::start(
-        MetaData *params) {
-
-    if (!mStarted) {
-        if (mFormat->findInt32(kKeyMaxInputSize, &mMaxAUSize) == false) {
-            ALOGE("Could not find kKeyMaxInputSize");
-            return ERROR_MALFORMED;
-        }
-
-        mGroup = new MediaBufferGroup;
-        if (mGroup == NULL) {
-            ALOGE("FATAL: memory limitation ! ");
-            return NO_MEMORY;
-        }
-
-        mGroup->add_buffer(new MediaBuffer(mMaxAUSize));
-
-        mStarted = true;
-    }
-    return OK;
-}
-
-status_t VideoEditorVideoDecoderSource::stop() {
-    if (mStarted) {
-        if (mBuffer != NULL) {
-
-            // FIXME:
-            // Why do we need to check on the ref count?
-            int ref_count = mBuffer->refcount();
-            ALOGV("MediaBuffer refcount is %d",ref_count);
-            for (int i = 0; i < ref_count; ++i) {
-                mBuffer->release();
-            }
-
-            mBuffer = NULL;
-        }
-        delete mGroup;
-        mGroup = NULL;
-        mStarted = false;
-    }
-    return OK;
-}
-
-sp<MetaData> VideoEditorVideoDecoderSource::getFormat() {
-    Mutex::Autolock autolock(mLock);
-
-    return mFormat;
-}
-
-status_t VideoEditorVideoDecoderSource::read(MediaBuffer** buffer_out,
-        const ReadOptions *options) {
-
-    Mutex::Autolock autolock(mLock);
-    if (options != NULL) {
-        int64_t time_us;
-        MediaSource::ReadOptions::SeekMode mode;
-        options->getSeekTo(&time_us, &mode);
-        if (mode != MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC) {
-            ALOGE("Unexpected read options");
-            return BAD_VALUE;
-        }
-
-        M4OSA_ERR err;
-        M4OSA_Int32 rapTime = time_us / 1000;
-
-        /*--- Retrieve the previous RAP time ---*/
-        err = mpDecShellContext->m_pReaderGlobal->m_pFctGetPrevRapTime(
-                  mpDecShellContext->m_pReader->m_readerContext,
-                  (M4_StreamHandler*)mpDecShellContext->m_pVideoStreamhandler,
-                  &rapTime);
-
-        if (err == M4WAR_READER_INFORMATION_NOT_PRESENT) {
-            /* No RAP table, jump backward and predecode */
-            rapTime -= 40000;
-            if(rapTime < 0) rapTime = 0;
-        } else if (err != OK) {
-            ALOGE("get rap time error = 0x%x\n", (uint32_t)err);
-            return UNKNOWN_ERROR;
-        }
-
-        err = mpDecShellContext->m_pReaderGlobal->m_pFctJump(
-                   mpDecShellContext->m_pReader->m_readerContext,
-                   (M4_StreamHandler*)mpDecShellContext->m_pVideoStreamhandler,
-                   &rapTime);
-
-        if (err != OK) {
-            ALOGE("jump err = 0x%x\n", (uint32_t)err);
-            return BAD_VALUE;
-        }
-    }
-
-    *buffer_out = NULL;
-
-    M4OSA_ERR lerr = mGroup->acquire_buffer(&mBuffer);
-    if (lerr != OK) {
-        return lerr;
-    }
-    mBuffer->meta_data()->clear();  // clear all the meta data
-
-    if (mStarted) {
-        //getNext AU from reader.
-        M4_AccessUnit* pAccessUnit = mpDecShellContext->m_pNextAccessUnitToDecode;
-        lerr = mpDecShellContext->m_pReader->m_pFctGetNextAu(
-                   mpDecShellContext->m_pReader->m_readerContext,
-                   (M4_StreamHandler*)mpDecShellContext->m_pVideoStreamhandler,
-                   pAccessUnit);
-        if (lerr == M4WAR_NO_DATA_YET || lerr == M4WAR_NO_MORE_AU) {
-            *buffer_out = NULL;
-            return ERROR_END_OF_STREAM;
-        }
-
-        //copy the reader AU buffer to mBuffer
-        M4OSA_UInt32 lSize  = (pAccessUnit->m_size > (M4OSA_UInt32)mMaxAUSize)\
-            ? (M4OSA_UInt32)mMaxAUSize : pAccessUnit->m_size;
-        memcpy((void *)mBuffer->data(),(void *)pAccessUnit->m_dataAddress,
-            lSize);
-
-        mBuffer->set_range(0, lSize);
-        int64_t frameTimeUs = (int64_t) (pAccessUnit->m_CTS * 1000);
-        mBuffer->meta_data()->setInt64(kKeyTime, frameTimeUs);
-
-        // Replace the AU start code for H264
-        if (VIDEOEDITOR_kH264VideoDec == mCodecType) {
-            uint8_t *data =(uint8_t *)mBuffer->data() + mBuffer->range_offset();
-            data[0]=0;
-            data[1]=0;
-            data[2]=0;
-            data[3]=1;
-        }
-        mBuffer->meta_data()->setInt32(kKeyIsSyncFrame,
-            (pAccessUnit->m_attribute == 0x04)? 1 : 0);
-        *buffer_out = mBuffer;
-    }
-    return OK;
-}
-
-static M4OSA_UInt32 VideoEditorVideoDecoder_GetBitsFromMemory(
-        VIDEOEDITOR_VIDEO_Bitstream_ctxt* parsingCtxt, M4OSA_UInt32 nb_bits) {
-    return (M4VD_Tools_GetBitsFromMemory((M4VS_Bitstream_ctxt*) parsingCtxt,
-            nb_bits));
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_internalParseVideoDSI(M4OSA_UInt8* pVol,
-        M4OSA_Int32 aVolSize, M4DECODER_MPEG4_DecoderConfigInfo* pDci,
-        M4DECODER_VideoSize* pVideoSize) {
-
-    VIDEOEDITOR_VIDEO_Bitstream_ctxt parsingCtxt;
-    M4OSA_UInt32 code, j;
-    M4OSA_MemAddr8 start;
-    M4OSA_UInt8 i;
-    M4OSA_UInt32 time_incr_length;
-    M4OSA_UInt8 vol_verid=0, b_hierarchy_type;
-
-    /* Parsing variables */
-    M4OSA_UInt8 video_object_layer_shape = 0;
-    M4OSA_UInt8 sprite_enable = 0;
-    M4OSA_UInt8 reduced_resolution_vop_enable = 0;
-    M4OSA_UInt8 scalability = 0;
-    M4OSA_UInt8 enhancement_type = 0;
-    M4OSA_UInt8 complexity_estimation_disable = 0;
-    M4OSA_UInt8 interlaced = 0;
-    M4OSA_UInt8 sprite_warping_points = 0;
-    M4OSA_UInt8 sprite_brightness_change = 0;
-    M4OSA_UInt8 quant_precision = 0;
-
-    /* Fill the structure with default parameters */
-    pVideoSize->m_uiWidth      = 0;
-    pVideoSize->m_uiHeight     = 0;
-
-    pDci->uiTimeScale          = 0;
-    pDci->uiProfile            = 0;
-    pDci->uiUseOfResynchMarker = 0;
-    pDci->bDataPartition       = M4OSA_FALSE;
-    pDci->bUseOfRVLC           = M4OSA_FALSE;
-
-    /* Reset the bitstream context */
-    parsingCtxt.stream_byte = 0;
-    parsingCtxt.stream_index = 8;
-    parsingCtxt.in = (M4OSA_MemAddr8) pVol;
-
-    start = (M4OSA_MemAddr8) pVol;
-
-    /* Start parsing */
-    while (parsingCtxt.in - start < aVolSize) {
-        code = VideoEditorVideoDecoder_GetBitsFromMemory(&parsingCtxt, 8);
-        if (code == 0) {
-            code = VideoEditorVideoDecoder_GetBitsFromMemory(&parsingCtxt, 8);
-            if (code == 0) {
-                code = VideoEditorVideoDecoder_GetBitsFromMemory(&parsingCtxt,8);
-                if (code == 1) {
-                    /* start code found */
-                    code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                        &parsingCtxt, 8);
-
-                    /* ----- 0x20..0x2F : video_object_layer_start_code ----- */
-
-                    if ((code > 0x1F) && (code < 0x30)) {
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 8);
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);
-                        if (code == 1) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 4);
-                            vol_verid = (M4OSA_UInt8)code;
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 3);
-                        }
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 4);
-                        if (code == 15) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 16);
-                        }
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);
-                        if (code == 1) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 3);
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);
-                            if (code == 1) {
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 32);
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 31);
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 16);
-                            }
-                        }
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 2);
-                        /* Need to save it for vop parsing */
-                        video_object_layer_shape = (M4OSA_UInt8)code;
-
-                        if (code != 0) {
-                            return 0;    /* only rectangular case supported */
-                        }
-
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 16);
-                        pDci->uiTimeScale = code;
-
-                        /* Computes time increment length */
-                        j    = code - 1;
-                        for (i = 0; (i < 32) && (j != 0); j >>=1) {
-                            i++;
-                        }
-                        time_incr_length = (i == 0) ? 1 : i;
-
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);
-                        if (code == 1) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, time_incr_length);
-                        }
-
-                        if(video_object_layer_shape != 1) { /* 1 = Binary */
-                            if(video_object_layer_shape == 0) {
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 1);/* Marker bit */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 13);/* Width */
-                                pVideoSize->m_uiWidth = code;
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 1);/* Marker bit */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 13);/* Height */
-                                pVideoSize->m_uiHeight = code;
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 1);/* Marker bit */
-                            }
-                        }
-
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);/* interlaced */
-                        interlaced = (M4OSA_UInt8)code;
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);/* OBMC disable */
-
-                        if(vol_verid == 1) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);/* sprite enable */
-                            sprite_enable = (M4OSA_UInt8)code;
-                        } else {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 2);/* sprite enable */
-                            sprite_enable = (M4OSA_UInt8)code;
-                        }
-                        if ((sprite_enable == 1) || (sprite_enable == 2)) {
-                            if (sprite_enable != 2) {
-
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 13);/* sprite width */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 1);/* Marker bit */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 13);/* sprite height */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 1);/* Marker bit */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 13);/* sprite l coordinate */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 1);/* Marker bit */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 13);/* sprite top coordinate */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 1);/* Marker bit */
-                            }
-
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 6);/* sprite warping points */
-                            sprite_warping_points = (M4OSA_UInt8)code;
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 2);/* sprite warping accuracy */
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);/* sprite brightness change */
-                            sprite_brightness_change = (M4OSA_UInt8)code;
-                            if (sprite_enable != 2) {
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 1);
-                            }
-                        }
-                        if ((vol_verid != 1) && (video_object_layer_shape != 0)){
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);/* sadct disable */
-                        }
-
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1); /* not 8 bits */
-                        if (code) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 4);/* quant precision */
-                            quant_precision = (M4OSA_UInt8)code;
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 4);/* bits per pixel */
-                        }
-
-                        /* greyscale not supported */
-                        if(video_object_layer_shape == 3) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 3);
-                        }
-
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);/* quant type */
-                        if (code) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);/* load intra quant mat */
-                            if (code) {
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 8);/* */
-                                i    = 1;
-                                while (i < 64) {
-                                    code =
-                                        VideoEditorVideoDecoder_GetBitsFromMemory(
-                                            &parsingCtxt, 8);
-                                    if (code == 0) {
-                                        break;
-                                    }
-                                    i++;
-                                }
-                            }
-
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);/* load non intra quant mat */
-                            if (code) {
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 8);/* */
-                                i    = 1;
-                                while (i < 64) {
-                                    code =
-                                        VideoEditorVideoDecoder_GetBitsFromMemory(
-                                        &parsingCtxt, 8);
-                                    if (code == 0) {
-                                        break;
-                                    }
-                                    i++;
-                                }
-                            }
-                        }
-
-                        if (vol_verid != 1) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);/* quarter sample */
-                        }
-
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);/* complexity estimation disable */
-                        complexity_estimation_disable = (M4OSA_UInt8)code;
-                        if (!code) {
-                            //return M4ERR_NOT_IMPLEMENTED;
-                        }
-
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);/* resync marker disable */
-                        pDci->uiUseOfResynchMarker = (code) ? 0 : 1;
-
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);/* data partitionned */
-                        pDci->bDataPartition = (code) ? M4OSA_TRUE : M4OSA_FALSE;
-                        if (code) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);/* reversible VLC */
-                            pDci->bUseOfRVLC = (code) ? M4OSA_TRUE : M4OSA_FALSE;
-                        }
-
-                        if (vol_verid != 1) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);/* newpred */
-                            if (code) {
-                                //return M4ERR_PARAMETER;
-                            }
-
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);
-                            reduced_resolution_vop_enable = (M4OSA_UInt8)code;
-                        }
-
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);/* scalability */
-                        scalability = (M4OSA_UInt8)code;
-                        if (code) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);/* hierarchy type */
-                            b_hierarchy_type = (M4OSA_UInt8)code;
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 4);/* ref layer id */
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);/* ref sampling direct */
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 5);/* hor sampling factor N */
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 5);/* hor sampling factor M */
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 5);/* vert sampling factor N */
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 5);/* vert sampling factor M */
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 1);/* enhancement type */
-                            enhancement_type = (M4OSA_UInt8)code;
-                            if ((!b_hierarchy_type) &&
-                                    (video_object_layer_shape == 1)) {
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 1);/* use ref shape */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 1);/* use ref texture */
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 5);
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 5);
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 5);
-                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                    &parsingCtxt, 5);
-                            }
-                        }
-                        break;
-                    }
-
-                    /* ----- 0xB0 : visual_object_sequence_start_code ----- */
-
-                    else if(code == 0xB0) {
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 8);/* profile_and_level_indication */
-                        pDci->uiProfile = (M4OSA_UInt8)code;
-                    }
-
-                    /* ----- 0xB5 : visual_object_start_code ----- */
-
-                    else if(code == 0xB5) {
-                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                            &parsingCtxt, 1);/* is object layer identifier */
-                        if (code == 1) {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 4); /* visual object verid */
-                            vol_verid = (M4OSA_UInt8)code;
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 3);
-                        } else {
-                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
-                                &parsingCtxt, 7); /* Realign on byte */
-                            vol_verid = 1;
-                        }
-                    }
-
-                    /* ----- end ----- */
-                } else {
-                    if ((code >> 2) == 0x20) {
-                        /* H263 ...-> wrong*/
-                        break;
-                    }
-                }
-            }
-        }
-    }
-    return M4NO_ERROR;
-}
-
-M4VIFI_UInt8 M4VIFI_SemiplanarYVU420toYUV420(void *user_data,
-        M4VIFI_UInt8 *inyuv, M4VIFI_ImagePlane *PlaneOut ) {
-    M4VIFI_UInt8 return_code = M4VIFI_OK;
-    M4VIFI_UInt8 *outyuv =
-        ((M4VIFI_UInt8*)&(PlaneOut[0].pac_data[PlaneOut[0].u_topleft]));
-    int32_t width = PlaneOut[0].u_width;
-    int32_t height = PlaneOut[0].u_height;
-
-    int32_t outYsize = width * height;
-    uint32_t *outy =  (uint32_t *) outyuv;
-    uint16_t *outcb =
-        (uint16_t *) &(PlaneOut[1].pac_data[PlaneOut[1].u_topleft]);
-    uint16_t *outcr =
-        (uint16_t *) &(PlaneOut[2].pac_data[PlaneOut[2].u_topleft]);
-
-    /* Y copying */
-    memcpy((void *)outy, (void *)inyuv, outYsize);
-
-    /* U & V copying */
-    uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
-    for (int32_t i = height >> 1; i > 0; --i) {
-        for (int32_t j = width >> 2; j > 0; --j) {
-            uint32_t temp = *inyuv_4++;
-            uint32_t tempU = temp & 0xFF;
-            tempU = tempU | ((temp >> 8) & 0xFF00);
-
-            uint32_t tempV = (temp >> 8) & 0xFF;
-            tempV = tempV | ((temp >> 16) & 0xFF00);
-
-            // Flip U and V
-            *outcb++ = tempV;
-            *outcr++ = tempU;
-        }
-    }
-    return return_code;
-}
-void logSupportDecodersAndCapabilities(M4DECODER_VideoDecoders* decoders) {
-    VideoDecoder *pDecoder;
-    VideoComponentCapabilities *pOmxComponents = NULL;
-    VideoProfileLevel *pProfileLevel = NULL;
-    pDecoder = decoders->decoder;
-    for (size_t i = 0; i< decoders->decoderNumber; i++) {
-        ALOGV("Supported Codec[%d] :%d", i, pDecoder->codec);
-        pOmxComponents = pDecoder->component;
-        for(size_t j = 0; j <  pDecoder->componentNumber; j++) {
-           pProfileLevel = pOmxComponents->profileLevel;
-           ALOGV("-->component %d", j);
-           for(size_t k = 0; k < pOmxComponents->profileNumber; k++) {
-               ALOGV("-->profile:%ld maxLevel:%ld", pProfileLevel->mProfile,
-                   pProfileLevel->mLevel);
-               pProfileLevel++;
-           }
-           pOmxComponents++;
-        }
-        pDecoder++;
-    }
-}
-
-M4OSA_ERR queryVideoDecoderCapabilities
-    (M4DECODER_VideoDecoders** decoders) {
-    M4OSA_ERR err = M4NO_ERROR;
-    const char *kMimeTypes[] = {
-        MEDIA_MIMETYPE_VIDEO_AVC, MEDIA_MIMETYPE_VIDEO_MPEG4,
-        MEDIA_MIMETYPE_VIDEO_H263
-    };
-
-    int32_t supportFormats = sizeof(kMimeTypes) / sizeof(kMimeTypes[0]);
-    M4DECODER_VideoDecoders *pDecoders;
-    VideoDecoder *pDecoder;
-    VideoComponentCapabilities *pOmxComponents = NULL;
-    VideoProfileLevel *pProfileLevel = NULL;
-    OMXClient client;
-    status_t status = OK;
-    SAFE_MALLOC(pDecoders, M4DECODER_VideoDecoders, 1, "VideoDecoders");
-    SAFE_MALLOC(pDecoder, VideoDecoder, supportFormats,
-        "VideoDecoder");
-    pDecoders->decoder = pDecoder;
-
-    pDecoders->decoderNumber= supportFormats;
-    status = client.connect();
-    CHECK(status == OK);
-    for (size_t k = 0; k < sizeof(kMimeTypes) / sizeof(kMimeTypes[0]);
-             ++k) {
-            Vector<CodecCapabilities> results;
-            CHECK_EQ(QueryCodecs(client.interface(), kMimeTypes[k],
-                                 true, // queryDecoders
-                                 &results), (status_t)OK);
-
-            if (results.size()) {
-                SAFE_MALLOC(pOmxComponents, VideoComponentCapabilities,
-                    results.size(), "VideoComponentCapabilities");
-                ALOGV("K=%d",k);
-                pDecoder->component = pOmxComponents;
-                pDecoder->componentNumber = results.size();
-            }
-
-            for (size_t i = 0; i < results.size(); ++i) {
-                ALOGV("  decoder '%s' supports ",
-                       results[i].mComponentName.string());
-
-                if (results[i].mProfileLevels.size() == 0) {
-                    ALOGV("NOTHING.\n");
-                    continue;
-                }
-
-#if 0
-                // FIXME:
-                // We should ignore the software codecs and make IsSoftwareCodec()
-                // part of pubic API from OMXCodec.cpp
-                if (IsSoftwareCodec(results[i].mComponentName.string())) {
-                    ALOGV("Ignore software codec %s", results[i].mComponentName.string());
-                    continue;
-                }
-#endif
-
-                // Count the supported profiles
-                int32_t profileNumber = 0;
-                int32_t profile = -1;
-                for (size_t j = 0; j < results[i].mProfileLevels.size(); ++j) {
-                    const CodecProfileLevel &profileLevel =
-                        results[i].mProfileLevels[j];
-                    // FIXME: assume that the profiles are ordered
-                    if (profileLevel.mProfile != profile) {
-                        profile = profileLevel.mProfile;
-                        profileNumber++;
-                    }
-                }
-                SAFE_MALLOC(pProfileLevel, VideoProfileLevel,
-                    profileNumber, "VideoProfileLevel");
-                pOmxComponents->profileLevel = pProfileLevel;
-                pOmxComponents->profileNumber = profileNumber;
-
-                // Get the max Level for each profile.
-                int32_t maxLevel = -1;
-                profile = -1;
-                profileNumber = 0;
-                for (size_t j = 0; j < results[i].mProfileLevels.size(); ++j) {
-                    const CodecProfileLevel &profileLevel =
-                        results[i].mProfileLevels[j];
-                    if (profile == -1 && maxLevel == -1) {
-                        profile = profileLevel.mProfile;
-                        maxLevel = profileLevel.mLevel;
-                        pProfileLevel->mProfile = profile;
-                        pProfileLevel->mLevel = maxLevel;
-                        ALOGV("%d profile: %ld, max level: %ld",
-                            __LINE__, pProfileLevel->mProfile, pProfileLevel->mLevel);
-                    }
-                    if (profileLevel.mProfile != profile) {
-                        profile = profileLevel.mProfile;
-                        maxLevel = profileLevel.mLevel;
-                        profileNumber++;
-                        pProfileLevel++;
-                        pProfileLevel->mProfile = profile;
-                        pProfileLevel->mLevel = maxLevel;
-                        ALOGV("%d profile: %ld, max level: %ld",
-                            __LINE__, pProfileLevel->mProfile, pProfileLevel->mLevel);
-                    } else if (profileLevel.mLevel > maxLevel) {
-                        maxLevel = profileLevel.mLevel;
-                        pProfileLevel->mLevel = maxLevel;
-                        ALOGV("%d profile: %ld, max level: %ld",
-                            __LINE__, pProfileLevel->mProfile, pProfileLevel->mLevel);
-                    }
-
-                }
-                pOmxComponents++;
-            }
-            if (!strcmp(MEDIA_MIMETYPE_VIDEO_AVC, kMimeTypes[k]))
-                pDecoder->codec = M4DA_StreamTypeVideoMpeg4Avc;
-            if (!strcmp(MEDIA_MIMETYPE_VIDEO_MPEG4, kMimeTypes[k]))
-                pDecoder->codec = M4DA_StreamTypeVideoMpeg4;
-            if (!strcmp(MEDIA_MIMETYPE_VIDEO_H263, kMimeTypes[k]))
-                pDecoder->codec = M4DA_StreamTypeVideoH263;
-
-            pDecoder++;
-    }
-
-    logSupportDecodersAndCapabilities(pDecoders);
-    *decoders = pDecoders;
-cleanUp:
-    return err;
-}
-/********************
- * ENGINE INTERFACE *
- ********************/
-M4OSA_ERR VideoEditorVideoDecoder_configureFromMetadata(M4OSA_Context pContext,
-        MetaData* meta) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoDecoder_Context* pDecShellContext = M4OSA_NULL;
-    bool success = OK;
-    int32_t width = 0;
-    int32_t height = 0;
-    int32_t frameSize = 0;
-    int32_t vWidth, vHeight;
-    int32_t cropLeft, cropTop, cropRight, cropBottom;
-
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != meta,     M4ERR_PARAMETER);
-
-    ALOGV("VideoEditorVideoDecoder_configureFromMetadata begin");
-
-    pDecShellContext = (VideoEditorVideoDecoder_Context*)pContext;
-
-    success = meta->findInt32(kKeyWidth, &vWidth);
-    VIDEOEDITOR_CHECK(TRUE == success, M4ERR_PARAMETER);
-    success = meta->findInt32(kKeyHeight, &vHeight);
-    VIDEOEDITOR_CHECK(TRUE == success, M4ERR_PARAMETER);
-
-    ALOGV("vWidth = %d, vHeight = %d", vWidth, vHeight);
-
-    pDecShellContext->mGivenWidth = vWidth;
-    pDecShellContext->mGivenHeight = vHeight;
-
-    if (!meta->findRect(
-                kKeyCropRect, &cropLeft, &cropTop, &cropRight, &cropBottom)) {
-
-        cropLeft = cropTop = 0;
-        cropRight = vWidth - 1;
-        cropBottom = vHeight - 1;
-
-        ALOGV("got dimensions only %d x %d", width, height);
-    } else {
-        ALOGV("got crop rect %d, %d, %d, %d",
-             cropLeft, cropTop, cropRight, cropBottom);
-    }
-
-    pDecShellContext->mCropRect.left = cropLeft;
-    pDecShellContext->mCropRect.right = cropRight;
-    pDecShellContext->mCropRect.top = cropTop;
-    pDecShellContext->mCropRect.bottom = cropBottom;
-
-    width = cropRight - cropLeft + 1;
-    height = cropBottom - cropTop + 1;
-
-    ALOGV("VideoDecoder_configureFromMetadata : W=%d H=%d", width, height);
-    VIDEOEDITOR_CHECK((0 != width) && (0 != height), M4ERR_PARAMETER);
-
-    if( (M4OSA_NULL != pDecShellContext->m_pDecBufferPool) &&
-        (pDecShellContext->m_pVideoStreamhandler->m_videoWidth  == \
-            (uint32_t)width) &&
-        (pDecShellContext->m_pVideoStreamhandler->m_videoHeight == \
-            (uint32_t)height) ) {
-        // No need to reconfigure
-        goto cleanUp;
-    }
-    ALOGV("VideoDecoder_configureFromMetadata  reset: W=%d H=%d", width, height);
-    // Update the stream handler parameters
-    pDecShellContext->m_pVideoStreamhandler->m_videoWidth  = width;
-    pDecShellContext->m_pVideoStreamhandler->m_videoHeight = height;
-    frameSize = (width * height * 3) / 2;
-
-    // Configure the buffer pool
-    if( M4OSA_NULL != pDecShellContext->m_pDecBufferPool ) {
-        ALOGV("VideoDecoder_configureFromMetadata : reset the buffer pool");
-        VIDEOEDITOR_BUFFER_freePool(pDecShellContext->m_pDecBufferPool);
-        pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
-    }
-    err =  VIDEOEDITOR_BUFFER_allocatePool(&pDecShellContext->m_pDecBufferPool,
-        MAX_DEC_BUFFERS, (M4OSA_Char*)"VIDEOEDITOR_DecodedBufferPool");
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-    err = VIDEOEDITOR_BUFFER_initPoolBuffers(pDecShellContext->m_pDecBufferPool,
-                frameSize + pDecShellContext->mGivenWidth * 2);
-
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoDecoder_configureFromMetadata no error");
-    } else {
-        if( M4OSA_NULL != pDecShellContext->m_pDecBufferPool ) {
-            VIDEOEDITOR_BUFFER_freePool(pDecShellContext->m_pDecBufferPool);
-            pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
-        }
-        ALOGV("VideoEditorVideoDecoder_configureFromMetadata ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoDecoder_configureFromMetadata end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_destroy(M4OSA_Context pContext) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoDecoder_Context* pDecShellContext =
-        (VideoEditorVideoDecoder_Context*)pContext;
-
-    // Input parameters check
-    ALOGV("VideoEditorVideoDecoder_destroy begin");
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    // Release the color converter
-    delete pDecShellContext->mI420ColorConverter;
-
-    // Destroy the graph
-    if( pDecShellContext->mVideoDecoder != NULL ) {
-        ALOGV("### VideoEditorVideoDecoder_destroy : releasing decoder");
-        pDecShellContext->mVideoDecoder->stop();
-        pDecShellContext->mVideoDecoder.clear();
-    }
-    pDecShellContext->mClient.disconnect();
-    pDecShellContext->mReaderSource.clear();
-
-    // Release memory
-    if( pDecShellContext->m_pDecBufferPool != M4OSA_NULL ) {
-        VIDEOEDITOR_BUFFER_freePool(pDecShellContext->m_pDecBufferPool);
-        pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
-    }
-    SAFE_FREE(pDecShellContext);
-    pContext = NULL;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoDecoder_destroy no error");
-    } else {
-        ALOGV("VideoEditorVideoDecoder_destroy ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoDecoder_destroy end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_create(M4OSA_Context *pContext,
-        M4_StreamHandler *pStreamHandler,
-        M4READER_GlobalInterface *pReaderGlobalInterface,
-        M4READER_DataInterface *pReaderDataInterface,
-        M4_AccessUnit *pAccessUnit, M4OSA_Void *pUserData) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoDecoder_Context* pDecShellContext = M4OSA_NULL;
-    status_t status = OK;
-    bool success = TRUE;
-    int32_t colorFormat = 0;
-    M4OSA_UInt32 size = 0;
-    sp<MetaData> decoderMetadata = NULL;
-    int decoderOutput = OMX_COLOR_FormatYUV420Planar;
-
-    ALOGV("VideoEditorVideoDecoder_create begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,             M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pStreamHandler,       M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pReaderDataInterface, M4ERR_PARAMETER);
-
-    // Context allocation & initialization
-    SAFE_MALLOC(pDecShellContext, VideoEditorVideoDecoder_Context, 1,
-        "VideoEditorVideoDecoder");
-    pDecShellContext->m_pVideoStreamhandler =
-        (M4_VideoStreamHandler*)pStreamHandler;
-    pDecShellContext->m_pNextAccessUnitToDecode = pAccessUnit;
-    pDecShellContext->m_pReaderGlobal = pReaderGlobalInterface;
-    pDecShellContext->m_pReader = pReaderDataInterface;
-    pDecShellContext->m_lastDecodedCTS = -1;
-    pDecShellContext->m_lastRenderCts = -1;
-    switch( pStreamHandler->m_streamType ) {
-        case M4DA_StreamTypeVideoH263:
-            pDecShellContext->mDecoderType = VIDEOEDITOR_kH263VideoDec;
-            break;
-        case M4DA_StreamTypeVideoMpeg4:
-            pDecShellContext->mDecoderType = VIDEOEDITOR_kMpeg4VideoDec;
-            // Parse the VOL header
-            err = VideoEditorVideoDecoder_internalParseVideoDSI(
-                (M4OSA_UInt8*)pDecShellContext->m_pVideoStreamhandler->\
-                    m_basicProperties.m_pDecoderSpecificInfo,
-                pDecShellContext->m_pVideoStreamhandler->\
-                    m_basicProperties.m_decoderSpecificInfoSize,
-                &pDecShellContext->m_Dci, &pDecShellContext->m_VideoSize);
-            VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-            break;
-        case M4DA_StreamTypeVideoMpeg4Avc:
-            pDecShellContext->mDecoderType = VIDEOEDITOR_kH264VideoDec;
-            break;
-        default:
-            VIDEOEDITOR_CHECK(!"VideoDecoder_create : incorrect stream type",
-                M4ERR_PARAMETER);
-            break;
-    }
-
-    pDecShellContext->mNbInputFrames     = 0;
-    pDecShellContext->mFirstInputCts     = -1.0;
-    pDecShellContext->mLastInputCts      = -1.0;
-    pDecShellContext->mNbRenderedFrames  = 0;
-    pDecShellContext->mFirstRenderedCts  = -1.0;
-    pDecShellContext->mLastRenderedCts   = -1.0;
-    pDecShellContext->mNbOutputFrames    = 0;
-    pDecShellContext->mFirstOutputCts    = -1;
-    pDecShellContext->mLastOutputCts     = -1;
-    pDecShellContext->m_pDecBufferPool   = M4OSA_NULL;
-
-    // Calculate the interval between two video frames.
-    CHECK(pDecShellContext->m_pVideoStreamhandler->m_averageFrameRate > 0);
-    pDecShellContext->mFrameIntervalMs =
-            1000.0 / pDecShellContext->m_pVideoStreamhandler->m_averageFrameRate;
-
-    /**
-     * StageFright graph building
-     */
-    decoderMetadata = new MetaData;
-    switch( pDecShellContext->mDecoderType ) {
-        case VIDEOEDITOR_kH263VideoDec:
-            decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
-            break;
-        case VIDEOEDITOR_kMpeg4VideoDec:
-            decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
-            decoderMetadata->setData(kKeyESDS, kTypeESDS,
-                pStreamHandler->m_pESDSInfo,
-                pStreamHandler->m_ESDSInfoSize);
-            break;
-        case VIDEOEDITOR_kH264VideoDec:
-            decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
-            decoderMetadata->setData(kKeyAVCC, kTypeAVCC,
-                pStreamHandler->m_pH264DecoderSpecificInfo,
-                pStreamHandler->m_H264decoderSpecificInfoSize);
-            break;
-        default:
-            VIDEOEDITOR_CHECK(!"VideoDecoder_create : incorrect stream type",
-                M4ERR_PARAMETER);
-            break;
-    }
-
-    decoderMetadata->setInt32(kKeyMaxInputSize, pStreamHandler->m_maxAUSize);
-    decoderMetadata->setInt32(kKeyWidth,
-        pDecShellContext->m_pVideoStreamhandler->m_videoWidth);
-    decoderMetadata->setInt32(kKeyHeight,
-        pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
-
-    // Create the decoder source
-    pDecShellContext->mReaderSource = new VideoEditorVideoDecoderSource(
-        decoderMetadata, pDecShellContext->mDecoderType,
-        (void *)pDecShellContext);
-    VIDEOEDITOR_CHECK(NULL != pDecShellContext->mReaderSource.get(),
-        M4ERR_SF_DECODER_RSRC_FAIL);
-
-    // Connect to the OMX client
-    status = pDecShellContext->mClient.connect();
-    VIDEOEDITOR_CHECK(OK == status, M4ERR_SF_DECODER_RSRC_FAIL);
-
-    // Create the decoder
-    pDecShellContext->mVideoDecoder = OMXCodec::Create(
-        pDecShellContext->mClient.interface(),
-        decoderMetadata, false, pDecShellContext->mReaderSource);
-    VIDEOEDITOR_CHECK(NULL != pDecShellContext->mVideoDecoder.get(),
-        M4ERR_SF_DECODER_RSRC_FAIL);
-
-
-    // Get the output color format
-    success = pDecShellContext->mVideoDecoder->getFormat()->findInt32(
-        kKeyColorFormat, &colorFormat);
-    VIDEOEDITOR_CHECK(TRUE == success, M4ERR_PARAMETER);
-    pDecShellContext->decOuputColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
-
-    pDecShellContext->mVideoDecoder->getFormat()->setInt32(kKeyWidth,
-        pDecShellContext->m_pVideoStreamhandler->m_videoWidth);
-    pDecShellContext->mVideoDecoder->getFormat()->setInt32(kKeyHeight,
-        pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
-
-    // Get the color converter
-    pDecShellContext->mI420ColorConverter = new I420ColorConverter;
-    if (pDecShellContext->mI420ColorConverter->isLoaded()) {
-        decoderOutput = pDecShellContext->mI420ColorConverter->getDecoderOutputFormat();
-    }
-
-    if (decoderOutput == OMX_COLOR_FormatYUV420Planar) {
-        delete pDecShellContext->mI420ColorConverter;
-        pDecShellContext->mI420ColorConverter = NULL;
-    }
-
-    ALOGI("decoder output format = 0x%X\n", decoderOutput);
-
-    // Configure the buffer pool from the metadata
-    err = VideoEditorVideoDecoder_configureFromMetadata(pDecShellContext,
-        pDecShellContext->mVideoDecoder->getFormat().get());
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-    // Start the graph
-    status = pDecShellContext->mVideoDecoder->start();
-    VIDEOEDITOR_CHECK(OK == status, M4ERR_SF_DECODER_RSRC_FAIL);
-
-    *pContext = (M4OSA_Context)pDecShellContext;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoDecoder_create no error");
-    } else {
-        VideoEditorVideoDecoder_destroy(pDecShellContext);
-        *pContext = M4OSA_NULL;
-        ALOGV("VideoEditorVideoDecoder_create ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoDecoder_create : DONE");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoSoftwareDecoder_create(M4OSA_Context *pContext,
-        M4_StreamHandler *pStreamHandler,
-        M4READER_GlobalInterface *pReaderGlobalInterface,
-        M4READER_DataInterface *pReaderDataInterface,
-        M4_AccessUnit *pAccessUnit, M4OSA_Void *pUserData) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoDecoder_Context* pDecShellContext = M4OSA_NULL;
-    status_t status = OK;
-    bool success = TRUE;
-    int32_t colorFormat = 0;
-    M4OSA_UInt32 size = 0;
-    sp<MetaData> decoderMetadata = NULL;
-
-    ALOGV("VideoEditorVideoDecoder_create begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,             M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pStreamHandler,       M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pReaderDataInterface, M4ERR_PARAMETER);
-
-    // Context allocation & initialization
-    SAFE_MALLOC(pDecShellContext, VideoEditorVideoDecoder_Context, 1,
-        "VideoEditorVideoDecoder");
-    pDecShellContext->m_pVideoStreamhandler =
-        (M4_VideoStreamHandler*)pStreamHandler;
-    pDecShellContext->m_pNextAccessUnitToDecode = pAccessUnit;
-    pDecShellContext->m_pReaderGlobal = pReaderGlobalInterface;
-    pDecShellContext->m_pReader = pReaderDataInterface;
-    pDecShellContext->m_lastDecodedCTS = -1;
-    pDecShellContext->m_lastRenderCts = -1;
-    switch( pStreamHandler->m_streamType ) {
-        case M4DA_StreamTypeVideoH263:
-            pDecShellContext->mDecoderType = VIDEOEDITOR_kH263VideoDec;
-            break;
-        case M4DA_StreamTypeVideoMpeg4:
-            pDecShellContext->mDecoderType = VIDEOEDITOR_kMpeg4VideoDec;
-            // Parse the VOL header
-            err = VideoEditorVideoDecoder_internalParseVideoDSI(
-                (M4OSA_UInt8*)pDecShellContext->m_pVideoStreamhandler->\
-                    m_basicProperties.m_pDecoderSpecificInfo,
-                pDecShellContext->m_pVideoStreamhandler->\
-                    m_basicProperties.m_decoderSpecificInfoSize,
-                &pDecShellContext->m_Dci, &pDecShellContext->m_VideoSize);
-            VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-            break;
-        case M4DA_StreamTypeVideoMpeg4Avc:
-            pDecShellContext->mDecoderType = VIDEOEDITOR_kH264VideoDec;
-            break;
-        default:
-            VIDEOEDITOR_CHECK(!"VideoDecoder_create : incorrect stream type",
-                M4ERR_PARAMETER);
-            break;
-    }
-
-    pDecShellContext->mNbInputFrames     = 0;
-    pDecShellContext->mFirstInputCts     = -1.0;
-    pDecShellContext->mLastInputCts      = -1.0;
-    pDecShellContext->mNbRenderedFrames  = 0;
-    pDecShellContext->mFirstRenderedCts  = -1.0;
-    pDecShellContext->mLastRenderedCts   = -1.0;
-    pDecShellContext->mNbOutputFrames    = 0;
-    pDecShellContext->mFirstOutputCts    = -1;
-    pDecShellContext->mLastOutputCts     = -1;
-    pDecShellContext->m_pDecBufferPool   = M4OSA_NULL;
-
-    // Calculate the interval between two video frames.
-    if(pDecShellContext->m_pVideoStreamhandler->m_averageFrameRate > 0){
-        pDecShellContext->mFrameIntervalMs =
-            1000.0 / pDecShellContext->m_pVideoStreamhandler->m_averageFrameRate;
-    }
-
-    /**
-     * StageFright graph building
-     */
-    decoderMetadata = new MetaData;
-    switch( pDecShellContext->mDecoderType ) {
-        case VIDEOEDITOR_kH263VideoDec:
-            decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
-            break;
-        case VIDEOEDITOR_kMpeg4VideoDec:
-            decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
-            decoderMetadata->setData(kKeyESDS, kTypeESDS,
-                pStreamHandler->m_pESDSInfo,
-                pStreamHandler->m_ESDSInfoSize);
-            break;
-        case VIDEOEDITOR_kH264VideoDec:
-            decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
-            decoderMetadata->setData(kKeyAVCC, kTypeAVCC,
-                pStreamHandler->m_pH264DecoderSpecificInfo,
-                pStreamHandler->m_H264decoderSpecificInfoSize);
-            break;
-        default:
-            VIDEOEDITOR_CHECK(!"VideoDecoder_create : incorrect stream type",
-                M4ERR_PARAMETER);
-            break;
-    }
-
-    decoderMetadata->setInt32(kKeyMaxInputSize, pStreamHandler->m_maxAUSize);
-    decoderMetadata->setInt32(kKeyWidth,
-        pDecShellContext->m_pVideoStreamhandler->m_videoWidth);
-    decoderMetadata->setInt32(kKeyHeight,
-        pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
-
-    // Create the decoder source
-    pDecShellContext->mReaderSource = new VideoEditorVideoDecoderSource(
-        decoderMetadata, pDecShellContext->mDecoderType,
-        (void *)pDecShellContext);
-    VIDEOEDITOR_CHECK(NULL != pDecShellContext->mReaderSource.get(),
-        M4ERR_SF_DECODER_RSRC_FAIL);
-
-    // Connect to the OMX client
-    status = pDecShellContext->mClient.connect();
-    VIDEOEDITOR_CHECK(OK == status, M4ERR_SF_DECODER_RSRC_FAIL);
-
-     ALOGI("Using software codecs only");
-    // Create the decoder
-    pDecShellContext->mVideoDecoder = OMXCodec::Create(
-        pDecShellContext->mClient.interface(),
-        decoderMetadata, false, pDecShellContext->mReaderSource,NULL,OMXCodec::kSoftwareCodecsOnly);
-    VIDEOEDITOR_CHECK(NULL != pDecShellContext->mVideoDecoder.get(),
-        M4ERR_SF_DECODER_RSRC_FAIL);
-
-    // Get the output color format
-    success = pDecShellContext->mVideoDecoder->getFormat()->findInt32(
-        kKeyColorFormat, &colorFormat);
-    VIDEOEDITOR_CHECK(TRUE == success, M4ERR_PARAMETER);
-    pDecShellContext->decOuputColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
-
-    pDecShellContext->mVideoDecoder->getFormat()->setInt32(kKeyWidth,
-        pDecShellContext->m_pVideoStreamhandler->m_videoWidth);
-    pDecShellContext->mVideoDecoder->getFormat()->setInt32(kKeyHeight,
-        pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
-
-    // Configure the buffer pool from the metadata
-    err = VideoEditorVideoDecoder_configureFromMetadata(pDecShellContext,
-        pDecShellContext->mVideoDecoder->getFormat().get());
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-    // Start the graph
-    status = pDecShellContext->mVideoDecoder->start();
-    VIDEOEDITOR_CHECK(OK == status, M4ERR_SF_DECODER_RSRC_FAIL);
-
-    *pContext = (M4OSA_Context)pDecShellContext;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoDecoder_create no error");
-    } else {
-        VideoEditorVideoDecoder_destroy(pDecShellContext);
-        *pContext = M4OSA_NULL;
-        ALOGV("VideoEditorVideoDecoder_create ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoDecoder_create : DONE");
-    return err;
-}
-
-
-M4OSA_ERR VideoEditorVideoDecoder_getOption(M4OSA_Context context,
-        M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
-    M4OSA_ERR lerr = M4NO_ERROR;
-    VideoEditorVideoDecoder_Context* pDecShellContext =
-        (VideoEditorVideoDecoder_Context*) context;
-    M4_VersionInfo* pVersionInfo;
-    M4DECODER_VideoSize* pVideoSize;
-    M4OSA_UInt32* pNextFrameCts;
-    M4OSA_UInt32 *plastDecodedFrameCts;
-    M4DECODER_AVCProfileLevel* profile;
-    M4DECODER_MPEG4_DecoderConfigInfo* pDecConfInfo;
-
-    ALOGV("VideoEditorVideoDecoder_getOption begin");
-
-    switch (optionId) {
-        case M4DECODER_kOptionID_AVCLastDecodedFrameCTS:
-             plastDecodedFrameCts = (M4OSA_UInt32 *) pValue;
-             *plastDecodedFrameCts = pDecShellContext->m_lastDecodedCTS;
-             break;
-
-        case M4DECODER_kOptionID_Version:
-            pVersionInfo = (M4_VersionInfo*)pValue;
-
-            pVersionInfo->m_major = VIDEOEDITOR_VIDEC_SHELL_VER_MAJOR;
-            pVersionInfo->m_minor= VIDEOEDITOR_VIDEC_SHELL_VER_MINOR;
-            pVersionInfo->m_revision = VIDEOEDITOR_VIDEC_SHELL_VER_REVISION;
-            pVersionInfo->m_structSize=sizeof(M4_VersionInfo);
-            break;
-
-        case M4DECODER_kOptionID_VideoSize:
-            /** Only VPS uses this Option ID. */
-            pVideoSize = (M4DECODER_VideoSize*)pValue;
-            pDecShellContext->mVideoDecoder->getFormat()->findInt32(kKeyWidth,
-                (int32_t*)(&pVideoSize->m_uiWidth));
-            pDecShellContext->mVideoDecoder->getFormat()->findInt32(kKeyHeight,
-                (int32_t*)(&pVideoSize->m_uiHeight));
-            ALOGV("VideoEditorVideoDecoder_getOption : W=%d H=%d",
-                pVideoSize->m_uiWidth, pVideoSize->m_uiHeight);
-            break;
-
-        case M4DECODER_kOptionID_NextRenderedFrameCTS:
-            /** How to get this information. SF decoder does not provide this. *
-            ** Let us provide last decoded frame CTS as of now. *
-            ** Only VPS uses this Option ID. */
-            pNextFrameCts = (M4OSA_UInt32 *)pValue;
-            *pNextFrameCts = pDecShellContext->m_lastDecodedCTS;
-            break;
-        case M4DECODER_MPEG4_kOptionID_DecoderConfigInfo:
-            if(pDecShellContext->mDecoderType == VIDEOEDITOR_kMpeg4VideoDec) {
-                (*(M4DECODER_MPEG4_DecoderConfigInfo*)pValue) =
-                    pDecShellContext->m_Dci;
-            }
-            break;
-        default:
-            lerr = M4ERR_BAD_OPTION_ID;
-            break;
-
-    }
-
-    ALOGV("VideoEditorVideoDecoder_getOption: end with err = 0x%x", lerr);
-    return lerr;
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_setOption(M4OSA_Context context,
-        M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
-    M4OSA_ERR lerr = M4NO_ERROR;
-    VideoEditorVideoDecoder_Context *pDecShellContext =
-        (VideoEditorVideoDecoder_Context*) context;
-
-    ALOGV("VideoEditorVideoDecoder_setOption begin");
-
-    switch (optionId) {
-        case M4DECODER_kOptionID_OutputFilter: {
-                M4DECODER_OutputFilter* pOutputFilter =
-                    (M4DECODER_OutputFilter*) pValue;
-                pDecShellContext->m_pFilter =
-                    (M4VIFI_PlanConverterFunctionType*)pOutputFilter->\
-                    m_pFilterFunction;
-                pDecShellContext->m_pFilterUserData =
-                    pOutputFilter->m_pFilterUserData;
-            }
-            break;
-        case M4DECODER_kOptionID_DeblockingFilter:
-            break;
-        default:
-            lerr = M4ERR_BAD_CONTEXT;
-            break;
-    }
-
-    ALOGV("VideoEditorVideoDecoder_setOption: end with err = 0x%x", lerr);
-    return lerr;
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_decode(M4OSA_Context context,
-        M4_MediaTime* pTime, M4OSA_Bool bJump, M4OSA_UInt32 tolerance) {
-    M4OSA_ERR lerr = M4NO_ERROR;
-    VideoEditorVideoDecoder_Context* pDecShellContext =
-        (VideoEditorVideoDecoder_Context*) context;
-    int64_t lFrameTime;
-    MediaBuffer* pDecoderBuffer = NULL;
-    MediaBuffer* pNextBuffer = NULL;
-    status_t errStatus;
-    bool needSeek = bJump;
-
-    ALOGV("VideoEditorVideoDecoder_decode begin");
-
-    if( M4OSA_TRUE == pDecShellContext->mReachedEOS ) {
-        // Do not call read(), it could lead to a freeze
-        ALOGV("VideoEditorVideoDecoder_decode : EOS already reached");
-        lerr = M4WAR_NO_MORE_AU;
-        goto VIDEOEDITOR_VideoDecode_cleanUP;
-    }
-    if(pDecShellContext->m_lastDecodedCTS >= *pTime) {
-        ALOGV("VideoDecoder_decode: Already decoded up to this time CTS = %lf.",
-            pDecShellContext->m_lastDecodedCTS);
-        goto VIDEOEDITOR_VideoDecode_cleanUP;
-    }
-    if(M4OSA_TRUE == bJump) {
-        ALOGV("VideoEditorVideoDecoder_decode: Jump called");
-        pDecShellContext->m_lastDecodedCTS = -1;
-        pDecShellContext->m_lastRenderCts = -1;
-    }
-
-    pDecShellContext->mNbInputFrames++;
-    if (0 > pDecShellContext->mFirstInputCts){
-        pDecShellContext->mFirstInputCts = *pTime;
-    }
-    pDecShellContext->mLastInputCts = *pTime;
-
-    while (pDecoderBuffer == NULL || pDecShellContext->m_lastDecodedCTS + tolerance < *pTime) {
-        ALOGV("VideoEditorVideoDecoder_decode, frameCTS = %lf, DecodeUpTo = %lf",
-            pDecShellContext->m_lastDecodedCTS, *pTime);
-
-        // Read the buffer from the stagefright decoder
-        if (needSeek) {
-            MediaSource::ReadOptions options;
-            int64_t time_us = *pTime * 1000;
-            options.setSeekTo(time_us, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-            errStatus = pDecShellContext->mVideoDecoder->read(&pNextBuffer, &options);
-            needSeek = false;
-        } else {
-            errStatus = pDecShellContext->mVideoDecoder->read(&pNextBuffer);
-        }
-
-        // Handle EOS and format change
-        if (errStatus == ERROR_END_OF_STREAM) {
-            ALOGV("End of stream reached, returning M4WAR_NO_MORE_AU ");
-            pDecShellContext->mReachedEOS = M4OSA_TRUE;
-            lerr = M4WAR_NO_MORE_AU;
-            // If we decoded a buffer before EOS, we still need to put it
-            // into the queue.
-            if (pDecoderBuffer && bJump) {
-                copyBufferToQueue(pDecShellContext, pDecoderBuffer);
-            }
-            goto VIDEOEDITOR_VideoDecode_cleanUP;
-        } else if (INFO_FORMAT_CHANGED == errStatus) {
-            ALOGV("VideoDecoder_decode : source returns INFO_FORMAT_CHANGED");
-            lerr = VideoEditorVideoDecoder_configureFromMetadata(
-                pDecShellContext,
-                pDecShellContext->mVideoDecoder->getFormat().get());
-            if( M4NO_ERROR != lerr ) {
-                ALOGV("!!! VideoEditorVideoDecoder_decode ERROR : "
-                    "VideoDecoder_configureFromMetadata returns 0x%X", lerr);
-                break;
-            }
-            continue;
-        } else if (errStatus != OK) {
-            ALOGE("VideoEditorVideoDecoder_decode ERROR:0x%x(%d)",
-                errStatus,errStatus);
-            lerr = errStatus;
-            goto VIDEOEDITOR_VideoDecode_cleanUP;
-        }
-
-        // The OMXCodec client should expect to receive 0-length buffers
-        // and drop the 0-length buffers.
-        if (pNextBuffer->range_length() == 0) {
-            pNextBuffer->release();
-            continue;
-        }
-
-        // Now we have a good next buffer, release the previous one.
-        if (pDecoderBuffer != NULL) {
-            pDecoderBuffer->release();
-            pDecoderBuffer = NULL;
-        }
-        pDecoderBuffer = pNextBuffer;
-
-        // Record the timestamp of last decoded buffer
-        pDecoderBuffer->meta_data()->findInt64(kKeyTime, &lFrameTime);
-        pDecShellContext->m_lastDecodedCTS = (M4_MediaTime)(lFrameTime/1000);
-        ALOGV("VideoEditorVideoDecoder_decode,decoded frametime = %lf,size = %d",
-            (M4_MediaTime)lFrameTime, pDecoderBuffer->size() );
-
-        /*
-         * We need to save a buffer if bJump == false to a queue. These
-         * buffers have a timestamp >= the target time, *pTime (for instance,
-         * the transition between two videos, or a trimming postion inside
-         * one video), since they are part of the transition clip or the
-         * trimmed video.
-         *
-         * If *pTime does not have the same value as any of the existing
-         * video frames, we would like to get the buffer right before *pTime
-         * and in the transcoding phrase, this video frame will be encoded
-         * as a key frame and becomes the first video frame for the transition or the
-         * trimmed video to be generated. This buffer must also be queued.
-         *
-         */
-        int64_t targetTimeMs =
-                pDecShellContext->m_lastDecodedCTS +
-                pDecShellContext->mFrameIntervalMs +
-                tolerance;
-        if (!bJump || targetTimeMs > *pTime) {
-            lerr = copyBufferToQueue(pDecShellContext, pDecoderBuffer);
-            if (lerr != M4NO_ERROR) {
-                goto VIDEOEDITOR_VideoDecode_cleanUP;
-            }
-        }
-    }
-
-    pDecShellContext->mNbOutputFrames++;
-    if ( 0 > pDecShellContext->mFirstOutputCts ) {
-        pDecShellContext->mFirstOutputCts = *pTime;
-    }
-    pDecShellContext->mLastOutputCts = *pTime;
-
-VIDEOEDITOR_VideoDecode_cleanUP:
-    *pTime = pDecShellContext->m_lastDecodedCTS;
-    if (pDecoderBuffer != NULL) {
-        pDecoderBuffer->release();
-        pDecoderBuffer = NULL;
-    }
-
-    ALOGV("VideoEditorVideoDecoder_decode: end with 0x%x", lerr);
-    return lerr;
-}
-
-static M4OSA_ERR copyBufferToQueue(
-    VideoEditorVideoDecoder_Context* pDecShellContext,
-    MediaBuffer* pDecoderBuffer) {
-
-    M4OSA_ERR lerr = M4NO_ERROR;
-    VIDEOEDITOR_BUFFER_Buffer* tmpDecBuffer;
-
-    // Get a buffer from the queue
-    lerr = VIDEOEDITOR_BUFFER_getBuffer(pDecShellContext->m_pDecBufferPool,
-        VIDEOEDITOR_BUFFER_kEmpty, &tmpDecBuffer);
-    if (lerr == (M4OSA_UInt32)M4ERR_NO_BUFFER_AVAILABLE) {
-        lerr = VIDEOEDITOR_BUFFER_getOldestBuffer(
-            pDecShellContext->m_pDecBufferPool,
-            VIDEOEDITOR_BUFFER_kFilled, &tmpDecBuffer);
-        tmpDecBuffer->state = VIDEOEDITOR_BUFFER_kEmpty;
-        lerr = M4NO_ERROR;
-    }
-
-    if (lerr != M4NO_ERROR) return lerr;
-
-    // Color convert or copy from the given MediaBuffer to our buffer
-    if (pDecShellContext->mI420ColorConverter) {
-        if (pDecShellContext->mI420ColorConverter->convertDecoderOutputToI420(
-            (uint8_t *)pDecoderBuffer->data(),// ?? + pDecoderBuffer->range_offset(),   // decoderBits
-            pDecShellContext->mGivenWidth,  // decoderWidth
-            pDecShellContext->mGivenHeight,  // decoderHeight
-            pDecShellContext->mCropRect,  // decoderRect
-            tmpDecBuffer->pData /* dstBits */) < 0) {
-            ALOGE("convertDecoderOutputToI420 failed");
-            lerr = M4ERR_NOT_IMPLEMENTED;
-        }
-    } else if (pDecShellContext->decOuputColorFormat == OMX_COLOR_FormatYUV420Planar) {
-        int32_t width = pDecShellContext->m_pVideoStreamhandler->m_videoWidth;
-        int32_t height = pDecShellContext->m_pVideoStreamhandler->m_videoHeight;
-        int32_t yPlaneSize = width * height;
-        int32_t uvPlaneSize = width * height / 4;
-        int32_t offsetSrc = 0;
-
-        if (( width == pDecShellContext->mGivenWidth )  &&
-            ( height == pDecShellContext->mGivenHeight ))
-        {
-            M4OSA_MemAddr8 pTmpBuff = (M4OSA_MemAddr8)pDecoderBuffer->data() + pDecoderBuffer->range_offset();
-
-            memcpy((void *)tmpDecBuffer->pData, (void *)pTmpBuff, yPlaneSize);
-
-            offsetSrc += pDecShellContext->mGivenWidth * pDecShellContext->mGivenHeight;
-            memcpy((void *)((M4OSA_MemAddr8)tmpDecBuffer->pData + yPlaneSize),
-                (void *)(pTmpBuff + offsetSrc), uvPlaneSize);
-
-            offsetSrc += (pDecShellContext->mGivenWidth >> 1) * (pDecShellContext->mGivenHeight >> 1);
-            memcpy((void *)((M4OSA_MemAddr8)tmpDecBuffer->pData + yPlaneSize + uvPlaneSize),
-                (void *)(pTmpBuff + offsetSrc), uvPlaneSize);
-        }
-        else
-        {
-            M4OSA_MemAddr8 pTmpBuff = (M4OSA_MemAddr8)pDecoderBuffer->data() + pDecoderBuffer->range_offset();
-            M4OSA_MemAddr8 pTmpBuffDst = (M4OSA_MemAddr8)tmpDecBuffer->pData;
-            int32_t index;
-
-            for ( index = 0; index < height; index++)
-            {
-                memcpy((void *)pTmpBuffDst, (void *)pTmpBuff, width);
-                pTmpBuffDst += width;
-                pTmpBuff += pDecShellContext->mGivenWidth;
-            }
-
-            pTmpBuff += (pDecShellContext->mGivenWidth * ( pDecShellContext->mGivenHeight - height));
-            for ( index = 0; index < height >> 1; index++)
-            {
-                memcpy((void *)pTmpBuffDst, (void *)pTmpBuff, width >> 1);
-                pTmpBuffDst += width >> 1;
-                pTmpBuff += pDecShellContext->mGivenWidth >> 1;
-            }
-
-            pTmpBuff += ((pDecShellContext->mGivenWidth * (pDecShellContext->mGivenHeight - height)) / 4);
-            for ( index = 0; index < height >> 1; index++)
-            {
-                memcpy((void *)pTmpBuffDst, (void *)pTmpBuff, width >> 1);
-                pTmpBuffDst += width >> 1;
-                pTmpBuff += pDecShellContext->mGivenWidth >> 1;
-            }
-        }
-    } else {
-        ALOGE("VideoDecoder_decode: unexpected color format 0x%X",
-            pDecShellContext->decOuputColorFormat);
-        lerr = M4ERR_PARAMETER;
-    }
-
-    tmpDecBuffer->buffCTS = pDecShellContext->m_lastDecodedCTS;
-    tmpDecBuffer->state = VIDEOEDITOR_BUFFER_kFilled;
-    tmpDecBuffer->size = pDecoderBuffer->size();
-
-    return lerr;
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_render(M4OSA_Context context,
-        M4_MediaTime* pTime, M4VIFI_ImagePlane* pOutputPlane,
-        M4OSA_Bool bForceRender) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoDecoder_Context* pDecShellContext =
-        (VideoEditorVideoDecoder_Context*) context;
-    M4OSA_UInt32 lindex, i;
-    M4OSA_UInt8* p_buf_src, *p_buf_dest;
-    M4VIFI_ImagePlane tmpPlaneIn, tmpPlaneOut;
-    VIDEOEDITOR_BUFFER_Buffer* pTmpVIDEOEDITORBuffer, *pRenderVIDEOEDITORBuffer
-                                                                  = M4OSA_NULL;
-    M4_MediaTime candidateTimeStamp = -1;
-    M4OSA_Bool bFound = M4OSA_FALSE;
-
-    ALOGV("VideoEditorVideoDecoder_render begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != context, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pTime, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pOutputPlane, M4ERR_PARAMETER);
-
-    // The output buffer is already allocated, just copy the data
-    if ( (*pTime <= pDecShellContext->m_lastRenderCts) &&
-            (M4OSA_FALSE == bForceRender) ) {
-        ALOGV("VIDEOEDITOR_VIDEO_render Frame in the past");
-        err = M4WAR_VIDEORENDERER_NO_NEW_FRAME;
-        goto cleanUp;
-    }
-    ALOGV("VideoDecoder_render: lastRendered time = %lf,requested render time = "
-        "%lf", pDecShellContext->m_lastRenderCts, *pTime);
-
-    /**
-     * Find the buffer appropriate for rendering.  */
-    for (i=0; i < pDecShellContext->m_pDecBufferPool->NB; i++) {
-        pTmpVIDEOEDITORBuffer = &pDecShellContext->m_pDecBufferPool\
-            ->pNXPBuffer[i];
-        if (pTmpVIDEOEDITORBuffer->state == VIDEOEDITOR_BUFFER_kFilled) {
-            /** Free all those buffers older than last rendered frame. */
-            if (pTmpVIDEOEDITORBuffer->buffCTS < pDecShellContext->\
-                    m_lastRenderCts) {
-                pTmpVIDEOEDITORBuffer->state = VIDEOEDITOR_BUFFER_kEmpty;
-            }
-
-            /** Get the buffer with appropriate timestamp  */
-            if ( (pTmpVIDEOEDITORBuffer->buffCTS >= pDecShellContext->\
-                    m_lastRenderCts) &&
-                (pTmpVIDEOEDITORBuffer->buffCTS <= *pTime) &&
-                (pTmpVIDEOEDITORBuffer->buffCTS > candidateTimeStamp)) {
-                bFound = M4OSA_TRUE;
-                pRenderVIDEOEDITORBuffer = pTmpVIDEOEDITORBuffer;
-                candidateTimeStamp = pTmpVIDEOEDITORBuffer->buffCTS;
-                ALOGV("VideoDecoder_render: found a buffer with timestamp = %lf",
-                    candidateTimeStamp);
-            }
-        }
-    }
-    if (M4OSA_FALSE == bFound) {
-        err = M4WAR_VIDEORENDERER_NO_NEW_FRAME;
-        goto cleanUp;
-    }
-
-    ALOGV("VideoEditorVideoDecoder_render 3 ouput %d %d %d %d",
-        pOutputPlane[0].u_width, pOutputPlane[0].u_height,
-        pOutputPlane[0].u_topleft, pOutputPlane[0].u_stride);
-
-    pDecShellContext->m_lastRenderCts = candidateTimeStamp;
-
-    if( M4OSA_NULL != pDecShellContext->m_pFilter ) {
-        // Filtering was requested
-        M4VIFI_ImagePlane tmpPlane[3];
-        // Prepare the output image for conversion
-        tmpPlane[0].u_width   =
-            pDecShellContext->m_pVideoStreamhandler->m_videoWidth;
-        tmpPlane[0].u_height  =
-            pDecShellContext->m_pVideoStreamhandler->m_videoHeight;
-        tmpPlane[0].u_topleft = 0;
-        tmpPlane[0].u_stride  = tmpPlane[0].u_width;
-        tmpPlane[0].pac_data  = (M4VIFI_UInt8*)pRenderVIDEOEDITORBuffer->pData;
-        tmpPlane[1].u_width   = tmpPlane[0].u_width/2;
-        tmpPlane[1].u_height  = tmpPlane[0].u_height/2;
-        tmpPlane[1].u_topleft = 0;
-        tmpPlane[1].u_stride  = tmpPlane[0].u_stride/2;
-        tmpPlane[1].pac_data  = tmpPlane[0].pac_data +
-            (tmpPlane[0].u_stride * tmpPlane[0].u_height);
-        tmpPlane[2].u_width   = tmpPlane[1].u_width;
-        tmpPlane[2].u_height  = tmpPlane[1].u_height;
-        tmpPlane[2].u_topleft = 0;
-        tmpPlane[2].u_stride  = tmpPlane[1].u_stride;
-        tmpPlane[2].pac_data  = tmpPlane[1].pac_data +
-            (tmpPlane[1].u_stride * tmpPlane[1].u_height);
-
-        ALOGV("VideoEditorVideoDecoder_render w = %d H = %d",
-            tmpPlane[0].u_width,tmpPlane[0].u_height);
-        pDecShellContext->m_pFilter(M4OSA_NULL, &tmpPlane[0], pOutputPlane);
-    } else {
-        // Just copy the YUV420P buffer
-        M4OSA_MemAddr8 tempBuffPtr =
-            (M4OSA_MemAddr8)pRenderVIDEOEDITORBuffer->pData;
-        M4OSA_UInt32 tempWidth =
-            pDecShellContext->m_pVideoStreamhandler->m_videoWidth;
-        M4OSA_UInt32 tempHeight =
-            pDecShellContext->m_pVideoStreamhandler->m_videoHeight;
-
-        memcpy((void *) pOutputPlane[0].pac_data, (void *)tempBuffPtr,
-            tempWidth * tempHeight);
-        tempBuffPtr += (tempWidth * tempHeight);
-        memcpy((void *) pOutputPlane[1].pac_data, (void *)tempBuffPtr,
-            (tempWidth/2) * (tempHeight/2));
-        tempBuffPtr += ((tempWidth/2) * (tempHeight/2));
-        memcpy((void *) pOutputPlane[2].pac_data, (void *)tempBuffPtr,
-            (tempWidth/2) * (tempHeight/2));
-    }
-
-    pDecShellContext->mNbRenderedFrames++;
-    if ( 0 > pDecShellContext->mFirstRenderedCts ) {
-        pDecShellContext->mFirstRenderedCts = *pTime;
-    }
-    pDecShellContext->mLastRenderedCts = *pTime;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        *pTime = pDecShellContext->m_lastRenderCts;
-        ALOGV("VideoEditorVideoDecoder_render no error");
-    } else {
-        ALOGV("VideoEditorVideoDecoder_render ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoDecoder_render end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_getInterface(M4DECODER_VideoType decoderType,
-        M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
-    M4DECODER_VideoInterface* pDecoderInterface = M4OSA_NULL;
-
-    pDecoderInterface = (M4DECODER_VideoInterface*)M4OSA_32bitAlignedMalloc(
-        sizeof(M4DECODER_VideoInterface), M4DECODER_EXTERNAL,
-        (M4OSA_Char*)"VideoEditorVideoDecoder_getInterface" );
-    if (M4OSA_NULL == pDecoderInterface) {
-        return M4ERR_ALLOC;
-    }
-
-    *pDecoderType = decoderType;
-
-    pDecoderInterface->m_pFctCreate    = VideoEditorVideoDecoder_create;
-    pDecoderInterface->m_pFctDestroy   = VideoEditorVideoDecoder_destroy;
-    pDecoderInterface->m_pFctGetOption = VideoEditorVideoDecoder_getOption;
-    pDecoderInterface->m_pFctSetOption = VideoEditorVideoDecoder_setOption;
-    pDecoderInterface->m_pFctDecode    = VideoEditorVideoDecoder_decode;
-    pDecoderInterface->m_pFctRender    = VideoEditorVideoDecoder_render;
-
-    *pDecInterface = (M4OSA_Context)pDecoderInterface;
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_getSoftwareInterface(M4DECODER_VideoType decoderType,
-        M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
-    M4DECODER_VideoInterface* pDecoderInterface = M4OSA_NULL;
-
-    pDecoderInterface = (M4DECODER_VideoInterface*)M4OSA_32bitAlignedMalloc(
-        sizeof(M4DECODER_VideoInterface), M4DECODER_EXTERNAL,
-        (M4OSA_Char*)"VideoEditorVideoDecoder_getInterface" );
-    if (M4OSA_NULL == pDecoderInterface) {
-        return M4ERR_ALLOC;
-    }
-
-    *pDecoderType = decoderType;
-
-    pDecoderInterface->m_pFctCreate    = VideoEditorVideoSoftwareDecoder_create;
-    pDecoderInterface->m_pFctDestroy   = VideoEditorVideoDecoder_destroy;
-    pDecoderInterface->m_pFctGetOption = VideoEditorVideoDecoder_getOption;
-    pDecoderInterface->m_pFctSetOption = VideoEditorVideoDecoder_setOption;
-    pDecoderInterface->m_pFctDecode    = VideoEditorVideoDecoder_decode;
-    pDecoderInterface->m_pFctRender    = VideoEditorVideoDecoder_render;
-
-    *pDecInterface = (M4OSA_Context)pDecoderInterface;
-    return M4NO_ERROR;
-}
-extern "C" {
-
-M4OSA_ERR VideoEditorVideoDecoder_getInterface_MPEG4(
-        M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
-    return VideoEditorVideoDecoder_getInterface(M4DECODER_kVideoTypeMPEG4,
-        pDecoderType, pDecInterface);
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_getInterface_H264(
-        M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
-    return VideoEditorVideoDecoder_getInterface(M4DECODER_kVideoTypeAVC,
-        pDecoderType, pDecInterface);
-
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_getSoftwareInterface_MPEG4(
-        M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
-    return VideoEditorVideoDecoder_getSoftwareInterface(M4DECODER_kVideoTypeMPEG4,
-        pDecoderType, pDecInterface);
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_getSoftwareInterface_H264(
-        M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
-    return VideoEditorVideoDecoder_getSoftwareInterface(M4DECODER_kVideoTypeAVC,
-        pDecoderType, pDecInterface);
-
-}
-
-M4OSA_ERR VideoEditorVideoDecoder_getVideoDecodersAndCapabilities(
-    M4DECODER_VideoDecoders** decoders) {
-    return queryVideoDecoderCapabilities(decoders);
-}
-
-}  // extern "C"
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoEncoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoEncoder.cpp
deleted file mode 100755
index ca7db68..0000000
--- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoEncoder.cpp
+++ /dev/null
@@ -1,1304 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
-*************************************************************************
-* @file   VideoEditorVideoEncoder.cpp
-* @brief  StageFright shell video encoder
-*************************************************************************
-*/
-#define LOG_NDEBUG 1
-#define LOG_TAG "VIDEOEDITOR_VIDEOENCODER"
-
-/*******************
- *     HEADERS     *
- *******************/
-#include "M4OSA_Debug.h"
-#include "M4SYS_AccessUnit.h"
-#include "VideoEditorVideoEncoder.h"
-#include "VideoEditorUtils.h"
-#include "MediaBufferPuller.h"
-#include <I420ColorConverter.h>
-
-#include <unistd.h>
-#include "utils/Log.h"
-#include "utils/Vector.h"
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
-#include <media/MediaProfiles.h>
-#include "OMX_Video.h"
-
-/********************
- *   DEFINITIONS    *
- ********************/
-
-// Force using hardware encoder
-#define VIDEOEDITOR_FORCECODEC kHardwareCodecsOnly
-
-#if !defined(VIDEOEDITOR_FORCECODEC)
-    #error "Cannot force DSI retrieval if codec type is not fixed"
-#endif
-
-/********************
- *   SOURCE CLASS   *
- ********************/
-
-namespace android {
-
-struct VideoEditorVideoEncoderSource : public MediaSource {
-    public:
-        static sp<VideoEditorVideoEncoderSource> Create(
-            const sp<MetaData> &format);
-        virtual status_t start(MetaData *params = NULL);
-        virtual status_t stop();
-        virtual sp<MetaData> getFormat();
-        virtual status_t read(MediaBuffer **buffer,
-            const ReadOptions *options = NULL);
-        virtual int32_t storeBuffer(MediaBuffer *buffer);
-        virtual int32_t getNumberOfBuffersInQueue();
-
-    protected:
-        virtual ~VideoEditorVideoEncoderSource();
-
-    private:
-        struct MediaBufferChain {
-            MediaBuffer* buffer;
-            MediaBufferChain* nextLink;
-        };
-        enum State {
-            CREATED,
-            STARTED,
-            ERROR
-        };
-        VideoEditorVideoEncoderSource(const sp<MetaData> &format);
-
-        // Don't call me
-        VideoEditorVideoEncoderSource(const VideoEditorVideoEncoderSource &);
-        VideoEditorVideoEncoderSource &operator=(
-                const VideoEditorVideoEncoderSource &);
-
-        MediaBufferChain* mFirstBufferLink;
-        MediaBufferChain* mLastBufferLink;
-        int32_t           mNbBuffer;
-        bool              mIsEOS;
-        State             mState;
-        sp<MetaData>      mEncFormat;
-        Mutex             mLock;
-        Condition         mBufferCond;
-};
-
-sp<VideoEditorVideoEncoderSource> VideoEditorVideoEncoderSource::Create(
-    const sp<MetaData> &format) {
-
-    sp<VideoEditorVideoEncoderSource> aSource =
-        new VideoEditorVideoEncoderSource(format);
-    return aSource;
-}
-
-VideoEditorVideoEncoderSource::VideoEditorVideoEncoderSource(
-    const sp<MetaData> &format):
-        mFirstBufferLink(NULL),
-        mLastBufferLink(NULL),
-        mNbBuffer(0),
-        mIsEOS(false),
-        mState(CREATED),
-        mEncFormat(format) {
-    ALOGV("VideoEditorVideoEncoderSource::VideoEditorVideoEncoderSource");
-}
-
-VideoEditorVideoEncoderSource::~VideoEditorVideoEncoderSource() {
-
-    // Safety clean up
-    if( STARTED == mState ) {
-        stop();
-    }
-}
-
-status_t VideoEditorVideoEncoderSource::start(MetaData *meta) {
-    status_t err = OK;
-
-    ALOGV("VideoEditorVideoEncoderSource::start() begin");
-
-    if( CREATED != mState ) {
-        ALOGV("VideoEditorVideoEncoderSource::start: invalid state %d", mState);
-        return UNKNOWN_ERROR;
-    }
-    mState = STARTED;
-
-    ALOGV("VideoEditorVideoEncoderSource::start() END (0x%x)", err);
-    return err;
-}
-
-status_t VideoEditorVideoEncoderSource::stop() {
-    status_t err = OK;
-
-    ALOGV("VideoEditorVideoEncoderSource::stop() begin");
-
-    if( STARTED != mState ) {
-        ALOGV("VideoEditorVideoEncoderSource::stop: invalid state %d", mState);
-        return UNKNOWN_ERROR;
-    }
-
-    // Release the buffer chain
-    int32_t i = 0;
-    MediaBufferChain* tmpLink = NULL;
-    while( mFirstBufferLink ) {
-        i++;
-        tmpLink = mFirstBufferLink;
-        mFirstBufferLink = mFirstBufferLink->nextLink;
-        delete tmpLink;
-    }
-    ALOGV("VideoEditorVideoEncoderSource::stop : %d buffer remained", i);
-    mFirstBufferLink = NULL;
-    mLastBufferLink = NULL;
-
-    mState = CREATED;
-
-    ALOGV("VideoEditorVideoEncoderSource::stop() END (0x%x)", err);
-    return err;
-}
-
-sp<MetaData> VideoEditorVideoEncoderSource::getFormat() {
-
-    ALOGV("VideoEditorVideoEncoderSource::getFormat");
-    return mEncFormat;
-}
-
-status_t VideoEditorVideoEncoderSource::read(MediaBuffer **buffer,
-        const ReadOptions *options) {
-    Mutex::Autolock autolock(mLock);
-    MediaSource::ReadOptions readOptions;
-    status_t err = OK;
-    MediaBufferChain* tmpLink = NULL;
-
-    ALOGV("VideoEditorVideoEncoderSource::read() begin");
-
-    if ( STARTED != mState ) {
-        ALOGV("VideoEditorVideoEncoderSource::read: invalid state %d", mState);
-        return UNKNOWN_ERROR;
-    }
-
-    while (mFirstBufferLink == NULL && !mIsEOS) {
-        mBufferCond.wait(mLock);
-    }
-
-    // End of stream?
-    if (mFirstBufferLink == NULL) {
-        *buffer = NULL;
-        ALOGV("VideoEditorVideoEncoderSource::read : EOS");
-        return ERROR_END_OF_STREAM;
-    }
-
-    // Get a buffer from the chain
-    *buffer = mFirstBufferLink->buffer;
-    tmpLink = mFirstBufferLink;
-    mFirstBufferLink = mFirstBufferLink->nextLink;
-
-    if ( NULL == mFirstBufferLink ) {
-        mLastBufferLink = NULL;
-    }
-    delete tmpLink;
-    mNbBuffer--;
-
-    ALOGV("VideoEditorVideoEncoderSource::read() END (0x%x)", err);
-    return err;
-}
-
-int32_t VideoEditorVideoEncoderSource::storeBuffer(MediaBuffer *buffer) {
-    Mutex::Autolock autolock(mLock);
-    status_t err = OK;
-
-    ALOGV("VideoEditorVideoEncoderSource::storeBuffer() begin");
-
-    if( NULL == buffer ) {
-        ALOGV("VideoEditorVideoEncoderSource::storeBuffer : reached EOS");
-        mIsEOS = true;
-    } else {
-        MediaBufferChain* newLink = new MediaBufferChain;
-        newLink->buffer = buffer;
-        newLink->nextLink = NULL;
-        if( NULL != mLastBufferLink ) {
-            mLastBufferLink->nextLink = newLink;
-        } else {
-            mFirstBufferLink = newLink;
-        }
-        mLastBufferLink = newLink;
-        mNbBuffer++;
-    }
-    mBufferCond.signal();
-    ALOGV("VideoEditorVideoEncoderSource::storeBuffer() end");
-    return mNbBuffer;
-}
-
-int32_t VideoEditorVideoEncoderSource::getNumberOfBuffersInQueue() {
-    Mutex::Autolock autolock(mLock);
-    return mNbBuffer;
-}
-
-/**
- ******************************************************************************
- * structure VideoEditorVideoEncoder_Context
- * @brief    This structure defines the context of the StageFright video encoder
- *           shell
- ******************************************************************************
-*/
-typedef enum {
-    CREATED   = 0x1,
-    OPENED    = 0x2,
-    STARTED   = 0x4,
-    BUFFERING = 0x8,
-    READING   = 0x10
-} VideoEditorVideoEncoder_State;
-
-typedef struct {
-    VideoEditorVideoEncoder_State     mState;
-    M4ENCODER_Format                  mFormat;
-    M4WRITER_DataInterface*           mWriterDataInterface;
-    M4VPP_apply_fct*                  mPreProcFunction;
-    M4VPP_Context                     mPreProcContext;
-    M4SYS_AccessUnit*                 mAccessUnit;
-    M4ENCODER_Params*                 mCodecParams;
-    M4ENCODER_Header                  mHeader;
-    H264MCS_ProcessEncodedNALU_fct*   mH264NALUPostProcessFct;
-    M4OSA_Context                     mH264NALUPostProcessCtx;
-    M4OSA_UInt32                      mLastCTS;
-    sp<VideoEditorVideoEncoderSource> mEncoderSource;
-    OMXClient                         mClient;
-    sp<MediaSource>                   mEncoder;
-    OMX_COLOR_FORMATTYPE              mEncoderColorFormat;
-    MediaBufferPuller*                mPuller;
-    I420ColorConverter*               mI420ColorConverter;
-
-    uint32_t                          mNbInputFrames;
-    double                            mFirstInputCts;
-    double                            mLastInputCts;
-    uint32_t                          mNbOutputFrames;
-    int64_t                           mFirstOutputCts;
-    int64_t                           mLastOutputCts;
-
-    MediaProfiles *mVideoEditorProfile;
-    int32_t mMaxPrefetchFrames;
-} VideoEditorVideoEncoder_Context;
-
-/********************
- *      TOOLS       *
- ********************/
-
-M4OSA_ERR VideoEditorVideoEncoder_getDSI(M4ENCODER_Context pContext,
-        sp<MetaData> metaData) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context*  pEncoderContext = M4OSA_NULL;
-    status_t result = OK;
-    int32_t nbBuffer = 0;
-    int32_t stride = 0;
-    int32_t height = 0;
-    int32_t framerate = 0;
-    int32_t isCodecConfig = 0;
-    size_t size = 0;
-    uint32_t codecFlags = 0;
-    MediaBuffer* inputBuffer = NULL;
-    MediaBuffer* outputBuffer = NULL;
-    sp<VideoEditorVideoEncoderSource> encoderSource = NULL;
-    sp<MediaSource> encoder = NULL;;
-    OMXClient client;
-
-    ALOGV("VideoEditorVideoEncoder_getDSI begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,       M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != metaData.get(), M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-    VIDEOEDITOR_CHECK(CREATED == pEncoderContext->mState, M4ERR_STATE);
-
-    // Create the encoder source
-    encoderSource = VideoEditorVideoEncoderSource::Create(metaData);
-    VIDEOEDITOR_CHECK(NULL != encoderSource.get(), M4ERR_STATE);
-
-    // Connect to the OMX client
-    result = client.connect();
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-
-    // Create the OMX codec
-    // VIDEOEDITOR_FORCECODEC MUST be defined here
-    codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
-    encoder = OMXCodec::Create(client.interface(), metaData, true,
-        encoderSource, NULL, codecFlags);
-    VIDEOEDITOR_CHECK(NULL != encoder.get(), M4ERR_STATE);
-
-    /**
-     * Send fake frames and retrieve the DSI
-     */
-    // Send a fake frame to the source
-    metaData->findInt32(kKeyStride,     &stride);
-    metaData->findInt32(kKeyHeight,     &height);
-    metaData->findInt32(kKeySampleRate, &framerate);
-    size = (size_t)(stride*height*3)/2;
-    inputBuffer = new MediaBuffer(size);
-    inputBuffer->meta_data()->setInt64(kKeyTime, 0);
-    nbBuffer = encoderSource->storeBuffer(inputBuffer);
-    encoderSource->storeBuffer(NULL); // Signal EOS
-
-    // Call read once to get the DSI
-    result = encoder->start();;
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-    result = encoder->read(&outputBuffer, NULL);
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-    VIDEOEDITOR_CHECK(outputBuffer->meta_data()->findInt32(
-        kKeyIsCodecConfig, &isCodecConfig) && isCodecConfig, M4ERR_STATE);
-
-    VIDEOEDITOR_CHECK(M4OSA_NULL == pEncoderContext->mHeader.pBuf, M4ERR_STATE);
-    if ( M4ENCODER_kH264 == pEncoderContext->mFormat ) {
-        // For H264, format the DSI
-        result = buildAVCCodecSpecificData(
-            (uint8_t**)(&(pEncoderContext->mHeader.pBuf)),
-            (size_t*)(&(pEncoderContext->mHeader.Size)),
-            (const uint8_t*)outputBuffer->data() + outputBuffer->range_offset(),
-            outputBuffer->range_length(), encoder->getFormat().get());
-        outputBuffer->release();
-        VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-    } else {
-        // For MPEG4, just copy the DSI
-        pEncoderContext->mHeader.Size =
-            (M4OSA_UInt32)outputBuffer->range_length();
-        SAFE_MALLOC(pEncoderContext->mHeader.pBuf, M4OSA_Int8,
-            pEncoderContext->mHeader.Size, "Encoder header");
-        memcpy((void *)pEncoderContext->mHeader.pBuf,
-            (void *)((M4OSA_MemAddr8)(outputBuffer->data())+outputBuffer->range_offset()),
-            pEncoderContext->mHeader.Size);
-        outputBuffer->release();
-    }
-
-    result = encoder->stop();
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-
-cleanUp:
-    // Destroy the graph
-    if ( encoder != NULL ) { encoder.clear(); }
-    client.disconnect();
-    if ( encoderSource != NULL ) { encoderSource.clear(); }
-    if ( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_getDSI no error");
-    } else {
-        ALOGV("VideoEditorVideoEncoder_getDSI ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_getDSI end");
-    return err;
-}
-/********************
- * ENGINE INTERFACE *
- ********************/
-
-M4OSA_ERR VideoEditorVideoEncoder_cleanup(M4ENCODER_Context pContext) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-
-    ALOGV("VideoEditorVideoEncoder_cleanup begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-    VIDEOEDITOR_CHECK(CREATED == pEncoderContext->mState, M4ERR_STATE);
-
-    // Release memory
-    SAFE_FREE(pEncoderContext->mHeader.pBuf);
-    SAFE_FREE(pEncoderContext);
-    pContext = M4OSA_NULL;
-
-cleanUp:
-    if ( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_cleanup no error");
-    } else {
-        ALOGV("VideoEditorVideoEncoder_cleanup ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_cleanup end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_init(M4ENCODER_Format format,
-        M4ENCODER_Context* pContext,
-        M4WRITER_DataInterface* pWriterDataInterface,
-        M4VPP_apply_fct* pVPPfct, M4VPP_Context pVPPctxt,
-        M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData) {
-
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-    int encoderInput = OMX_COLOR_FormatYUV420Planar;
-
-    ALOGV("VideoEditorVideoEncoder_init begin: format  %d", format);
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pWriterDataInterface, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pVPPfct, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pVPPctxt, M4ERR_PARAMETER);
-
-    // Context allocation & initialization
-    SAFE_MALLOC(pEncoderContext, VideoEditorVideoEncoder_Context, 1,
-        "VideoEditorVideoEncoder");
-    pEncoderContext->mState = CREATED;
-    pEncoderContext->mFormat = format;
-    pEncoderContext->mWriterDataInterface = pWriterDataInterface;
-    pEncoderContext->mPreProcFunction = pVPPfct;
-    pEncoderContext->mPreProcContext = pVPPctxt;
-    pEncoderContext->mPuller = NULL;
-
-    // Get color converter and determine encoder input format
-    pEncoderContext->mI420ColorConverter = new I420ColorConverter;
-    if (pEncoderContext->mI420ColorConverter->isLoaded()) {
-        encoderInput = pEncoderContext->mI420ColorConverter->getEncoderInputFormat();
-    }
-    if (encoderInput == OMX_COLOR_FormatYUV420Planar) {
-        delete pEncoderContext->mI420ColorConverter;
-        pEncoderContext->mI420ColorConverter = NULL;
-    }
-    pEncoderContext->mEncoderColorFormat = (OMX_COLOR_FORMATTYPE)encoderInput;
-    ALOGI("encoder input format = 0x%X\n", encoderInput);
-
-    *pContext = pEncoderContext;
-
-cleanUp:
-    if ( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_init no error");
-    } else {
-        VideoEditorVideoEncoder_cleanup(pEncoderContext);
-        *pContext = M4OSA_NULL;
-        ALOGV("VideoEditorVideoEncoder_init ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_init end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_init_H263(M4ENCODER_Context* pContext,
-        M4WRITER_DataInterface* pWriterDataInterface, M4VPP_apply_fct* pVPPfct,
-        M4VPP_Context pVPPctxt, M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData)
-        {
-
-    return VideoEditorVideoEncoder_init(M4ENCODER_kH263, pContext,
-        pWriterDataInterface, pVPPfct, pVPPctxt, pExternalAPI, pUserData);
-}
-
-
-M4OSA_ERR VideoEditorVideoEncoder_init_MPEG4(M4ENCODER_Context* pContext,
-        M4WRITER_DataInterface* pWriterDataInterface, M4VPP_apply_fct* pVPPfct,
-        M4VPP_Context pVPPctxt, M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData)
-        {
-
-    return VideoEditorVideoEncoder_init(M4ENCODER_kMPEG4, pContext,
-        pWriterDataInterface, pVPPfct, pVPPctxt, pExternalAPI, pUserData);
-}
-
-
-M4OSA_ERR VideoEditorVideoEncoder_init_H264(M4ENCODER_Context* pContext,
-        M4WRITER_DataInterface* pWriterDataInterface, M4VPP_apply_fct* pVPPfct,
-        M4VPP_Context pVPPctxt, M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData)
-        {
-
-    return VideoEditorVideoEncoder_init(M4ENCODER_kH264, pContext,
-        pWriterDataInterface, pVPPfct, pVPPctxt, pExternalAPI, pUserData);
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_close(M4ENCODER_Context pContext) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-
-    ALOGV("VideoEditorVideoEncoder_close begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-    VIDEOEDITOR_CHECK(OPENED == pEncoderContext->mState, M4ERR_STATE);
-
-    // Release memory
-    SAFE_FREE(pEncoderContext->mCodecParams);
-
-    // Destroy the graph
-    pEncoderContext->mEncoder.clear();
-    pEncoderContext->mClient.disconnect();
-    pEncoderContext->mEncoderSource.clear();
-
-    delete pEncoderContext->mPuller;
-    pEncoderContext->mPuller = NULL;
-
-    delete pEncoderContext->mI420ColorConverter;
-    pEncoderContext->mI420ColorConverter = NULL;
-
-    // Set the new state
-    pEncoderContext->mState = CREATED;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_close no error");
-    } else {
-        ALOGV("VideoEditorVideoEncoder_close ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_close end");
-    return err;
-}
-
-
-M4OSA_ERR VideoEditorVideoEncoder_open(M4ENCODER_Context pContext,
-        M4SYS_AccessUnit* pAU, M4OSA_Void* pParams) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-    M4ENCODER_Params* pCodecParams = M4OSA_NULL;
-    status_t result = OK;
-    sp<MetaData> encoderMetadata = NULL;
-    const char* mime = NULL;
-    int32_t iProfile = 0;
-    int32_t iLevel = 0;
-
-    int32_t iFrameRate = 0;
-    uint32_t codecFlags = 0;
-
-    ALOGV(">>> VideoEditorVideoEncoder_open begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pAU,      M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pParams,  M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-    pCodecParams = (M4ENCODER_Params*)pParams;
-    VIDEOEDITOR_CHECK(CREATED == pEncoderContext->mState, M4ERR_STATE);
-
-    // Context initialization
-    pEncoderContext->mAccessUnit = pAU;
-    pEncoderContext->mVideoEditorProfile = MediaProfiles::getInstance();
-    pEncoderContext->mMaxPrefetchFrames =
-        pEncoderContext->mVideoEditorProfile->getVideoEditorCapParamByName(
-        "maxPrefetchYUVFrames");
-
-    // Allocate & initialize the encoding parameters
-    SAFE_MALLOC(pEncoderContext->mCodecParams, M4ENCODER_Params, 1,
-        "VideoEditorVideoEncoder");
-
-
-    pEncoderContext->mCodecParams->InputFormat = pCodecParams->InputFormat;
-    pEncoderContext->mCodecParams->InputFrameWidth =
-        pCodecParams->InputFrameWidth;
-    pEncoderContext->mCodecParams->InputFrameHeight =
-        pCodecParams->InputFrameHeight;
-    pEncoderContext->mCodecParams->FrameWidth = pCodecParams->FrameWidth;
-    pEncoderContext->mCodecParams->FrameHeight = pCodecParams->FrameHeight;
-    pEncoderContext->mCodecParams->Bitrate = pCodecParams->Bitrate;
-    pEncoderContext->mCodecParams->FrameRate = pCodecParams->FrameRate;
-    pEncoderContext->mCodecParams->Format = pCodecParams->Format;
-    pEncoderContext->mCodecParams->videoProfile = pCodecParams->videoProfile;
-    pEncoderContext->mCodecParams->videoLevel= pCodecParams->videoLevel;
-
-    // Check output format consistency and resolution
-    VIDEOEDITOR_CHECK(
-        pEncoderContext->mCodecParams->Format == pEncoderContext->mFormat,
-        M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(0 == pEncoderContext->mCodecParams->FrameWidth  % 16,
-        M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(0 == pEncoderContext->mCodecParams->FrameHeight % 16,
-        M4ERR_PARAMETER);
-
-    /**
-     * StageFright graph building
-     */
-
-    // Create the meta data for the encoder
-    encoderMetadata = new MetaData;
-    switch( pEncoderContext->mCodecParams->Format ) {
-        case M4ENCODER_kH263:
-            mime     = MEDIA_MIMETYPE_VIDEO_H263;
-            break;
-        case M4ENCODER_kMPEG4:
-            mime     = MEDIA_MIMETYPE_VIDEO_MPEG4;
-            break;
-        case M4ENCODER_kH264:
-            mime     = MEDIA_MIMETYPE_VIDEO_AVC;
-            break;
-        default:
-            VIDEOEDITOR_CHECK(!"VideoEncoder_open : incorrect input format",
-                M4ERR_PARAMETER);
-            break;
-    }
-    iProfile = pEncoderContext->mCodecParams->videoProfile;
-    iLevel = pEncoderContext->mCodecParams->videoLevel;
-    ALOGV("Encoder mime %s profile %d, level %d",
-        mime,iProfile, iLevel);
-    ALOGV("Encoder w %d, h %d, bitrate %d, fps %d",
-        pEncoderContext->mCodecParams->FrameWidth,
-        pEncoderContext->mCodecParams->FrameHeight,
-        pEncoderContext->mCodecParams->Bitrate,
-        pEncoderContext->mCodecParams->FrameRate);
-    CHECK(iProfile != 0x7fffffff);
-    CHECK(iLevel != 0x7fffffff);
-
-    encoderMetadata->setCString(kKeyMIMEType, mime);
-    encoderMetadata->setInt32(kKeyVideoProfile, iProfile);
-    //FIXME:
-    // Temp: Do not set the level for Mpeg4 / H.263 Enc
-    // as OMX.Nvidia.mp4.encoder and OMX.Nvidia.h263.encoder
-    // return 0x80001019
-    if (pEncoderContext->mCodecParams->Format == M4ENCODER_kH264) {
-        encoderMetadata->setInt32(kKeyVideoLevel, iLevel);
-    }
-    encoderMetadata->setInt32(kKeyWidth,
-        (int32_t)pEncoderContext->mCodecParams->FrameWidth);
-    encoderMetadata->setInt32(kKeyStride,
-        (int32_t)pEncoderContext->mCodecParams->FrameWidth);
-    encoderMetadata->setInt32(kKeyHeight,
-        (int32_t)pEncoderContext->mCodecParams->FrameHeight);
-    encoderMetadata->setInt32(kKeySliceHeight,
-        (int32_t)pEncoderContext->mCodecParams->FrameHeight);
-
-    switch( pEncoderContext->mCodecParams->FrameRate ) {
-        case M4ENCODER_k5_FPS:    iFrameRate = 5;  break;
-        case M4ENCODER_k7_5_FPS:  iFrameRate = 8;  break;
-        case M4ENCODER_k10_FPS:   iFrameRate = 10; break;
-        case M4ENCODER_k12_5_FPS: iFrameRate = 13; break;
-        case M4ENCODER_k15_FPS:   iFrameRate = 15; break;
-        case M4ENCODER_k20_FPS:   iFrameRate = 20; break;
-        case M4ENCODER_k25_FPS:   iFrameRate = 25; break;
-        case M4ENCODER_k30_FPS:   iFrameRate = 30; break;
-        case M4ENCODER_kVARIABLE_FPS:
-            iFrameRate = 30;
-            ALOGI("Frame rate set to M4ENCODER_kVARIABLE_FPS: set to 30");
-          break;
-        case M4ENCODER_kUSE_TIMESCALE:
-            iFrameRate = 30;
-            ALOGI("Frame rate set to M4ENCODER_kUSE_TIMESCALE:  set to 30");
-            break;
-
-        default:
-            VIDEOEDITOR_CHECK(!"VideoEncoder_open:incorrect framerate",
-                M4ERR_STATE);
-            break;
-    }
-    encoderMetadata->setInt32(kKeyFrameRate, iFrameRate);
-    encoderMetadata->setInt32(kKeyBitRate,
-        (int32_t)pEncoderContext->mCodecParams->Bitrate);
-    encoderMetadata->setInt32(kKeyIFramesInterval, 1);
-
-    encoderMetadata->setInt32(kKeyColorFormat,
-        pEncoderContext->mEncoderColorFormat);
-
-    if (pEncoderContext->mCodecParams->Format != M4ENCODER_kH263) {
-        // Get the encoder DSI
-        err = VideoEditorVideoEncoder_getDSI(pEncoderContext, encoderMetadata);
-        VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-    }
-
-    // Create the encoder source
-    pEncoderContext->mEncoderSource = VideoEditorVideoEncoderSource::Create(
-        encoderMetadata);
-    VIDEOEDITOR_CHECK(
-        NULL != pEncoderContext->mEncoderSource.get(), M4ERR_STATE);
-
-    // Connect to the OMX client
-    result = pEncoderContext->mClient.connect();
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-
-    // Create the OMX codec
-#ifdef VIDEOEDITOR_FORCECODEC
-    codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
-#endif /* VIDEOEDITOR_FORCECODEC */
-    pEncoderContext->mEncoder = OMXCodec::Create(
-        pEncoderContext->mClient.interface(), encoderMetadata, true,
-        pEncoderContext->mEncoderSource, NULL, codecFlags);
-    VIDEOEDITOR_CHECK(NULL != pEncoderContext->mEncoder.get(), M4ERR_STATE);
-    ALOGV("VideoEditorVideoEncoder_open : DONE");
-    pEncoderContext->mPuller = new MediaBufferPuller(
-        pEncoderContext->mEncoder);
-
-    // Set the new state
-    pEncoderContext->mState = OPENED;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_open no error");
-    } else {
-        VideoEditorVideoEncoder_close(pEncoderContext);
-        ALOGV("VideoEditorVideoEncoder_open ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_open end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_processInputBuffer(
-        M4ENCODER_Context pContext, M4OSA_Double Cts,
-        M4OSA_Bool bReachedEOS) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-    M4VIFI_ImagePlane pOutPlane[3];
-    MediaBuffer* buffer = NULL;
-    int32_t nbBuffer = 0;
-
-    ALOGV("VideoEditorVideoEncoder_processInputBuffer begin: cts  %f", Cts);
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-    pOutPlane[0].pac_data = M4OSA_NULL;
-    pOutPlane[1].pac_data = M4OSA_NULL;
-    pOutPlane[2].pac_data = M4OSA_NULL;
-
-    if ( M4OSA_FALSE == bReachedEOS ) {
-        M4OSA_UInt32 sizeY = pEncoderContext->mCodecParams->FrameWidth *
-            pEncoderContext->mCodecParams->FrameHeight;
-        M4OSA_UInt32 sizeU = sizeY >> 2;
-        M4OSA_UInt32 size  = sizeY + 2*sizeU;
-        M4OSA_UInt8* pData = M4OSA_NULL;
-        buffer = new MediaBuffer((size_t)size);
-        pData = (M4OSA_UInt8*)buffer->data() + buffer->range_offset();
-
-        // Prepare the output image for pre-processing
-        pOutPlane[0].u_width   = pEncoderContext->mCodecParams->FrameWidth;
-        pOutPlane[0].u_height  = pEncoderContext->mCodecParams->FrameHeight;
-        pOutPlane[0].u_topleft = 0;
-        pOutPlane[0].u_stride  = pOutPlane[0].u_width;
-        pOutPlane[1].u_width   = pOutPlane[0].u_width/2;
-        pOutPlane[1].u_height  = pOutPlane[0].u_height/2;
-        pOutPlane[1].u_topleft = 0;
-        pOutPlane[1].u_stride  = pOutPlane[0].u_stride/2;
-        pOutPlane[2].u_width   = pOutPlane[1].u_width;
-        pOutPlane[2].u_height  = pOutPlane[1].u_height;
-        pOutPlane[2].u_topleft = 0;
-        pOutPlane[2].u_stride  = pOutPlane[1].u_stride;
-
-        pOutPlane[0].pac_data = pData;
-        pOutPlane[1].pac_data = pData + sizeY;
-        pOutPlane[2].pac_data = pData + sizeY + sizeU;
-
-        // Apply pre-processing
-        err = pEncoderContext->mPreProcFunction(
-            pEncoderContext->mPreProcContext, M4OSA_NULL, pOutPlane);
-        VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-        // Convert MediaBuffer to the encoder input format if necessary
-        if (pEncoderContext->mI420ColorConverter) {
-            I420ColorConverter* converter = pEncoderContext->mI420ColorConverter;
-            int actualWidth = pEncoderContext->mCodecParams->FrameWidth;
-            int actualHeight = pEncoderContext->mCodecParams->FrameHeight;
-
-            int encoderWidth, encoderHeight;
-            ARect encoderRect;
-            int encoderBufferSize;
-
-            if (converter->getEncoderInputBufferInfo(
-                actualWidth, actualHeight,
-                &encoderWidth, &encoderHeight,
-                &encoderRect, &encoderBufferSize) == 0) {
-
-                MediaBuffer* newBuffer = new MediaBuffer(encoderBufferSize);
-
-                if (converter->convertI420ToEncoderInput(
-                    pData,  // srcBits
-                    actualWidth, actualHeight,
-                    encoderWidth, encoderHeight,
-                    encoderRect,
-                    (uint8_t*)newBuffer->data() + newBuffer->range_offset()) < 0) {
-                    ALOGE("convertI420ToEncoderInput failed");
-                }
-
-                // switch to new buffer
-                buffer->release();
-                buffer = newBuffer;
-            }
-        }
-
-        // Set the metadata
-        buffer->meta_data()->setInt64(kKeyTime, (int64_t)(Cts*1000));
-    }
-
-    // Push the buffer to the source, a NULL buffer, notifies the source of EOS
-    nbBuffer = pEncoderContext->mEncoderSource->storeBuffer(buffer);
-
-cleanUp:
-    if ( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_processInputBuffer error 0x%X", err);
-    } else {
-        if( NULL != buffer ) {
-            buffer->release();
-        }
-        ALOGV("VideoEditorVideoEncoder_processInputBuffer ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_processInputBuffer end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_processOutputBuffer(
-        M4ENCODER_Context pContext, MediaBuffer* buffer) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-    M4OSA_UInt32 Cts = 0;
-    int32_t i32Tmp = 0;
-    int64_t i64Tmp = 0;
-    status_t result = OK;
-
-    ALOGV("VideoEditorVideoEncoder_processOutputBuffer begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != buffer,   M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-
-    // Process the returned AU
-    if ( 0 == buffer->range_length() ) {
-        // Encoder has no data yet, nothing unusual
-        ALOGV("VideoEditorVideoEncoder_processOutputBuffer : buffer is empty");
-        goto cleanUp;
-    }
-    VIDEOEDITOR_CHECK(0 == (((intptr_t)buffer->data())%4), M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(buffer->meta_data().get(), M4ERR_PARAMETER);
-    if ( buffer->meta_data()->findInt32(kKeyIsCodecConfig, &i32Tmp) && i32Tmp ){
-        {   // Display the DSI
-            ALOGV("VideoEditorVideoEncoder_processOutputBuffer DSI %d",
-                buffer->range_length());
-            uint8_t* tmp = (uint8_t*)(buffer->data());
-            for( uint32_t i=0; i<buffer->range_length(); i++ ) {
-                ALOGV("DSI [%d] %.2X", i, tmp[i]);
-            }
-        }
-    } else {
-        // Check the CTS
-        VIDEOEDITOR_CHECK(buffer->meta_data()->findInt64(kKeyTime, &i64Tmp),
-            M4ERR_STATE);
-
-        pEncoderContext->mNbOutputFrames++;
-        if ( 0 > pEncoderContext->mFirstOutputCts ) {
-            pEncoderContext->mFirstOutputCts = i64Tmp;
-        }
-        pEncoderContext->mLastOutputCts = i64Tmp;
-
-        Cts = (M4OSA_Int32)(i64Tmp/1000);
-        ALOGV("[TS_CHECK] VI/ENC WRITE frame %d @ %lld -> %d (last %d)",
-            pEncoderContext->mNbOutputFrames, i64Tmp, Cts,
-            pEncoderContext->mLastCTS);
-        if ( Cts < pEncoderContext->mLastCTS ) {
-            ALOGV("VideoEncoder_processOutputBuffer WARNING : Cts is going "
-            "backwards %d < %d", Cts, pEncoderContext->mLastCTS);
-            goto cleanUp;
-        }
-        ALOGV("VideoEditorVideoEncoder_processOutputBuffer : %d %d",
-            Cts, pEncoderContext->mLastCTS);
-
-        // Retrieve the AU container
-        err = pEncoderContext->mWriterDataInterface->pStartAU(
-            pEncoderContext->mWriterDataInterface->pWriterContext,
-            pEncoderContext->mAccessUnit->stream->streamID,
-            pEncoderContext->mAccessUnit);
-        VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-        // Format the AU
-        VIDEOEDITOR_CHECK(
-            buffer->range_length() <= pEncoderContext->mAccessUnit->size,
-            M4ERR_PARAMETER);
-        // Remove H264 AU start code
-        if ( M4ENCODER_kH264 == pEncoderContext->mFormat ) {
-            if (!memcmp((const uint8_t *)buffer->data() + \
-                    buffer->range_offset(), "\x00\x00\x00\x01", 4) ) {
-                buffer->set_range(buffer->range_offset() + 4,
-                    buffer->range_length() - 4);
-            }
-        }
-
-        if ( (M4ENCODER_kH264 == pEncoderContext->mFormat) &&
-            (M4OSA_NULL != pEncoderContext->mH264NALUPostProcessFct) ) {
-        // H264 trimming case, NALU post processing is needed
-        M4OSA_Int32 outputSize = pEncoderContext->mAccessUnit->size;
-        err = pEncoderContext->mH264NALUPostProcessFct(
-            pEncoderContext->mH264NALUPostProcessCtx,
-            (M4OSA_UInt8*)buffer->data()+buffer->range_offset(),
-            buffer->range_length(),
-            (M4OSA_UInt8*)pEncoderContext->mAccessUnit->dataAddress,
-            &outputSize);
-        VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-        pEncoderContext->mAccessUnit->size = (M4OSA_UInt32)outputSize;
-        } else {
-            // The AU can just be copied
-            memcpy((void *)pEncoderContext->mAccessUnit->\
-                dataAddress, (void *)((M4OSA_MemAddr8)(buffer->data())+buffer->\
-                range_offset()), buffer->range_length());
-            pEncoderContext->mAccessUnit->size =
-                (M4OSA_UInt32)buffer->range_length();
-        }
-
-        if ( buffer->meta_data()->findInt32(kKeyIsSyncFrame,&i32Tmp) && i32Tmp){
-            pEncoderContext->mAccessUnit->attribute = AU_RAP;
-        } else {
-            pEncoderContext->mAccessUnit->attribute = AU_P_Frame;
-        }
-        pEncoderContext->mLastCTS = Cts;
-        pEncoderContext->mAccessUnit->CTS = Cts;
-        pEncoderContext->mAccessUnit->DTS = Cts;
-
-        ALOGV("VideoEditorVideoEncoder_processOutputBuffer: AU @ 0x%X 0x%X %d %d",
-            pEncoderContext->mAccessUnit->dataAddress,
-            *pEncoderContext->mAccessUnit->dataAddress,
-            pEncoderContext->mAccessUnit->size,
-            pEncoderContext->mAccessUnit->CTS);
-
-        // Write the AU
-        err = pEncoderContext->mWriterDataInterface->pProcessAU(
-            pEncoderContext->mWriterDataInterface->pWriterContext,
-            pEncoderContext->mAccessUnit->stream->streamID,
-            pEncoderContext->mAccessUnit);
-        VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-    }
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_processOutputBuffer no error");
-    } else {
-        SAFE_FREE(pEncoderContext->mHeader.pBuf);
-        pEncoderContext->mHeader.Size = 0;
-        ALOGV("VideoEditorVideoEncoder_processOutputBuffer ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_processOutputBuffer end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_encode(M4ENCODER_Context pContext,
-        M4VIFI_ImagePlane* pInPlane, M4OSA_Double Cts,
-        M4ENCODER_FrameMode FrameMode) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-    status_t result = OK;
-    MediaBuffer* outputBuffer = NULL;
-
-    ALOGV("VideoEditorVideoEncoder_encode 0x%X %f %d", pInPlane, Cts, FrameMode);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-    if ( STARTED == pEncoderContext->mState ) {
-        pEncoderContext->mState = BUFFERING;
-    }
-    VIDEOEDITOR_CHECK(
-        (BUFFERING | READING) & pEncoderContext->mState, M4ERR_STATE);
-
-    pEncoderContext->mNbInputFrames++;
-    if ( 0 > pEncoderContext->mFirstInputCts ) {
-        pEncoderContext->mFirstInputCts = Cts;
-    }
-    pEncoderContext->mLastInputCts = Cts;
-
-    ALOGV("VideoEditorVideoEncoder_encode 0x%X %d %f (%d)", pInPlane, FrameMode,
-        Cts, pEncoderContext->mLastCTS);
-
-    // Push the input buffer to the encoder source
-    err = VideoEditorVideoEncoder_processInputBuffer(pEncoderContext, Cts,
-        M4OSA_FALSE);
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-    // Notify the source in case of EOS
-    if ( M4ENCODER_kLastFrame == FrameMode ) {
-        err = VideoEditorVideoEncoder_processInputBuffer(
-            pEncoderContext, 0, M4OSA_TRUE);
-        VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-    }
-
-    if ( BUFFERING == pEncoderContext->mState ) {
-        // Prefetch is complete, start reading
-        pEncoderContext->mState = READING;
-    }
-    // Read
-    while (1)  {
-        MediaBuffer *outputBuffer =
-                pEncoderContext->mPuller->getBufferNonBlocking();
-
-        if (outputBuffer == NULL) {
-            int32_t YUVBufferNumber =
-                    pEncoderContext->mEncoderSource->getNumberOfBuffersInQueue();
-            /* Make sure that the configured maximum number of prefetch YUV frames is
-             * not exceeded. This is to limit the amount of memory usage of video editor engine.
-             * The value of maximum prefetch Yuv frames is defined in media_profiles.xml */
-            if ((YUVBufferNumber < pEncoderContext->mMaxPrefetchFrames) ||
-                (pEncoderContext->mPuller->hasMediaSourceReturnedError()
-                    == true)) {
-                break;
-            }
-        } else {
-            // Provide the encoded AU to the writer
-            err = VideoEditorVideoEncoder_processOutputBuffer(pEncoderContext,
-                outputBuffer);
-            VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-            pEncoderContext->mPuller->putBuffer(outputBuffer);
-        }
-    }
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_encode no error");
-    } else {
-        ALOGV("VideoEditorVideoEncoder_encode ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_encode end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_start(M4ENCODER_Context pContext) {
-    M4OSA_ERR                  err             = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-    status_t                   result          = OK;
-
-    ALOGV("VideoEditorVideoEncoder_start begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-    VIDEOEDITOR_CHECK(OPENED == pEncoderContext->mState, M4ERR_STATE);
-
-    pEncoderContext->mNbInputFrames  = 0;
-    pEncoderContext->mFirstInputCts  = -1.0;
-    pEncoderContext->mLastInputCts   = -1.0;
-    pEncoderContext->mNbOutputFrames = 0;
-    pEncoderContext->mFirstOutputCts = -1;
-    pEncoderContext->mLastOutputCts  = -1;
-
-    result = pEncoderContext->mEncoder->start();
-    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
-
-    pEncoderContext->mPuller->start();
-
-    // Set the new state
-    pEncoderContext->mState = STARTED;
-
-cleanUp:
-    if ( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_start no error");
-    } else {
-        ALOGV("VideoEditorVideoEncoder_start ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_start end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_stop(M4ENCODER_Context pContext) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-    MediaBuffer* outputBuffer = NULL;
-    status_t result = OK;
-
-    ALOGV("VideoEditorVideoEncoder_stop begin");
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-
-    // Send EOS again to make sure the source doesn't block.
-    err = VideoEditorVideoEncoder_processInputBuffer(pEncoderContext, 0,
-        M4OSA_TRUE);
-    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-    // Process the remaining buffers if necessary
-    if ( (BUFFERING | READING) & pEncoderContext->mState ) {
-        while (1)  {
-            MediaBuffer *outputBuffer =
-                pEncoderContext->mPuller->getBufferBlocking();
-
-            if (outputBuffer == NULL) break;
-
-            err = VideoEditorVideoEncoder_processOutputBuffer(
-                pEncoderContext, outputBuffer);
-            VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
-
-            pEncoderContext->mPuller->putBuffer(outputBuffer);
-        }
-
-        pEncoderContext->mState = STARTED;
-    }
-
-    // Stop the graph module if necessary
-    if ( STARTED == pEncoderContext->mState ) {
-        pEncoderContext->mPuller->stop();
-        pEncoderContext->mEncoder->stop();
-        pEncoderContext->mState = OPENED;
-    }
-
-    if (pEncoderContext->mNbInputFrames != pEncoderContext->mNbOutputFrames) {
-        ALOGW("Some frames were not encoded: input(%d) != output(%d)",
-            pEncoderContext->mNbInputFrames, pEncoderContext->mNbOutputFrames);
-    }
-
-cleanUp:
-    if ( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_stop no error");
-    } else {
-        ALOGV("VideoEditorVideoEncoder_stop ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_stop end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_regulBitRate(M4ENCODER_Context pContext) {
-    ALOGW("regulBitRate is not implemented");
-    return M4NO_ERROR;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_setOption(M4ENCODER_Context pContext,
-        M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-
-    ALOGV("VideoEditorVideoEncoder_setOption start optionID 0x%X", optionID);
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-
-    switch( optionID ) {
-        case M4ENCODER_kOptionID_SetH264ProcessNALUfctsPtr:
-            pEncoderContext->mH264NALUPostProcessFct =
-                (H264MCS_ProcessEncodedNALU_fct*)optionValue;
-            break;
-        case M4ENCODER_kOptionID_H264ProcessNALUContext:
-            pEncoderContext->mH264NALUPostProcessCtx =
-                (M4OSA_Context)optionValue;
-            break;
-        default:
-            ALOGV("VideoEditorVideoEncoder_setOption: unsupported optionId 0x%X",
-                optionID);
-            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
-            break;
-    }
-
-cleanUp:
-    if ( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_setOption no error");
-    } else {
-        ALOGV("VideoEditorVideoEncoder_setOption ERROR 0x%X", err);
-    }
-    ALOGV("VideoEditorVideoEncoder_setOption end");
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_getOption(M4ENCODER_Context pContext,
-        M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
-    M4OSA_ERR err = M4NO_ERROR;
-    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
-
-    ALOGV("VideoEditorVideoEncoder_getOption begin optinId 0x%X", optionID);
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
-    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
-
-    switch( optionID ) {
-        case M4ENCODER_kOptionID_EncoderHeader:
-            VIDEOEDITOR_CHECK(
-                    M4OSA_NULL != pEncoderContext->mHeader.pBuf, M4ERR_STATE);
-            *(M4ENCODER_Header**)optionValue = &(pEncoderContext->mHeader);
-            break;
-        default:
-            ALOGV("VideoEditorVideoEncoder_getOption: unsupported optionId 0x%X",
-                optionID);
-            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
-            break;
-    }
-
-cleanUp:
-    if ( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_getOption no error");
-    } else {
-        ALOGV("VideoEditorVideoEncoder_getOption ERROR 0x%X", err);
-    }
-    return err;
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_getInterface(M4ENCODER_Format format,
-        M4ENCODER_Format* pFormat,
-        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
-    M4OSA_ERR err = M4NO_ERROR;
-
-    // Input parameters check
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pFormat,           M4ERR_PARAMETER);
-    VIDEOEDITOR_CHECK(M4OSA_NULL != pEncoderInterface, M4ERR_PARAMETER);
-
-    ALOGV("VideoEditorVideoEncoder_getInterface begin 0x%x 0x%x %d", pFormat,
-        pEncoderInterface, mode);
-
-    SAFE_MALLOC(*pEncoderInterface, M4ENCODER_GlobalInterface, 1,
-        "VideoEditorVideoEncoder");
-
-    *pFormat = format;
-
-    switch( format ) {
-        case M4ENCODER_kH263:
-            {
-                (*pEncoderInterface)->pFctInit =
-                    VideoEditorVideoEncoder_init_H263;
-                break;
-            }
-        case M4ENCODER_kMPEG4:
-            {
-                (*pEncoderInterface)->pFctInit =
-                    VideoEditorVideoEncoder_init_MPEG4;
-                break;
-            }
-        case M4ENCODER_kH264:
-            {
-                (*pEncoderInterface)->pFctInit =
-                    VideoEditorVideoEncoder_init_H264;
-                break;
-            }
-        default:
-            ALOGV("VideoEditorVideoEncoder_getInterface : unsupported format %d",
-                format);
-            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
-        break;
-    }
-    (*pEncoderInterface)->pFctOpen         = VideoEditorVideoEncoder_open;
-    (*pEncoderInterface)->pFctStart        = VideoEditorVideoEncoder_start;
-    (*pEncoderInterface)->pFctStop         = VideoEditorVideoEncoder_stop;
-    (*pEncoderInterface)->pFctPause        = M4OSA_NULL;
-    (*pEncoderInterface)->pFctResume       = M4OSA_NULL;
-    (*pEncoderInterface)->pFctClose        = VideoEditorVideoEncoder_close;
-    (*pEncoderInterface)->pFctCleanup      = VideoEditorVideoEncoder_cleanup;
-    (*pEncoderInterface)->pFctRegulBitRate =
-        VideoEditorVideoEncoder_regulBitRate;
-    (*pEncoderInterface)->pFctEncode       = VideoEditorVideoEncoder_encode;
-    (*pEncoderInterface)->pFctSetOption    = VideoEditorVideoEncoder_setOption;
-    (*pEncoderInterface)->pFctGetOption    = VideoEditorVideoEncoder_getOption;
-
-cleanUp:
-    if( M4NO_ERROR == err ) {
-        ALOGV("VideoEditorVideoEncoder_getInterface no error");
-    } else {
-        *pEncoderInterface = M4OSA_NULL;
-        ALOGV("VideoEditorVideoEncoder_getInterface ERROR 0x%X", err);
-    }
-    return err;
-}
-
-extern "C" {
-
-M4OSA_ERR VideoEditorVideoEncoder_getInterface_H263(M4ENCODER_Format* pFormat,
-        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
-    return VideoEditorVideoEncoder_getInterface(M4ENCODER_kH263, pFormat,
-            pEncoderInterface, mode);
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_getInterface_MPEG4(M4ENCODER_Format* pFormat,
-        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
-    return VideoEditorVideoEncoder_getInterface(M4ENCODER_kMPEG4, pFormat,
-           pEncoderInterface, mode);
-}
-
-M4OSA_ERR VideoEditorVideoEncoder_getInterface_H264(M4ENCODER_Format* pFormat,
-        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
-    return VideoEditorVideoEncoder_getInterface(M4ENCODER_kH264, pFormat,
-           pEncoderInterface, mode);
-
-}
-
-}  // extern "C"
-
-}  // namespace android
diff --git a/libvideoeditor/vss/video_filters/Android.mk b/libvideoeditor/vss/video_filters/Android.mk
deleted file mode 100755
index e2d2111..0000000
--- a/libvideoeditor/vss/video_filters/Android.mk
+++ /dev/null
@@ -1,5 +0,0 @@
-#LOCAL_PATH:= $(call my-dir)
-#include $(CLEAR_VARS)
-
-#include $(call all-makefiles-under,$(LOCAL_PATH))
-include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/video_filters/src/Android.mk b/libvideoeditor/vss/video_filters/src/Android.mk
deleted file mode 100755
index 85a530c..0000000
--- a/libvideoeditor/vss/video_filters/src/Android.mk
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# Copyright (C) 2011 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH:= $(call my-dir)
-
-#
-# libvideofilters
-#
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE:= libvideoeditor_videofilters
-
-LOCAL_SRC_FILES:=          \
-      M4VIFI_BGR565toYUV420.c \
-      M4VIFI_ResizeRGB888toRGB888.c \
-      M4VIFI_ResizeRGB565toRGB565.c \
-      M4VIFI_Clip.c \
-      M4VIFI_ResizeYUVtoBGR565.c \
-      M4VIFI_RGB888toYUV420.c \
-      M4VIFI_RGB565toYUV420.c \
-      M4VFL_transition.c
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_SHARED_LIBRARIES := \
-    libcutils \
-    libutils \
-    libvideoeditor_osal \
-
-LOCAL_C_INCLUDES += \
-    $(TOP)/frameworks/av/libvideoeditor/osal/inc \
-    $(TOP)/frameworks/av/libvideoeditor/vss/common/inc
-
-LOCAL_SHARED_LIBRARIES += libdl
-
-# All of the shared libraries we link against.
-LOCAL_LDLIBS := \
-    -lpthread -ldl
-
-LOCAL_CFLAGS += -Wno-multichar
-
-include $(BUILD_SHARED_LIBRARY)
-
diff --git a/libvideoeditor/vss/video_filters/src/M4VFL_transition.c b/libvideoeditor/vss/video_filters/src/M4VFL_transition.c
deleted file mode 100755
index 6a5e0b6..0000000
--- a/libvideoeditor/vss/video_filters/src/M4VFL_transition.c
+++ /dev/null
@@ -1,510 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file        M4TRAN_transition.c
- * @brief
- ******************************************************************************
-*/
-
-/**
- * OSAL (memset and memcpy) ***/
-#include "M4OSA_Memory.h"
-
-#include "M4VFL_transition.h"
-
-#include <string.h>
-
-#ifdef LITTLE_ENDIAN
-#define M4VFL_SWAP_SHORT(a) a = ((a & 0xFF) << 8) | ((a & 0xFF00) >> 8)
-#else
-#define M4VFL_SWAP_SHORT(a)
-#endif
-
-#define LUM_FACTOR_MAX 10
-
-
-unsigned char M4VFL_modifyLumaByStep(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
-                                     M4VFL_ModifLumParam *lum_param, void *user_data)
-{
-    unsigned short *p_src, *p_dest, *p_src_line, *p_dest_line;
-    unsigned long pix_src;
-    unsigned long u_outpx, u_outpx2;
-    unsigned long u_width, u_stride, u_stride_out,u_height, pix;
-    unsigned long lf1, lf2, lf3;
-    long i, j;
-
-    if (lum_param->copy_chroma != 0)
-    {
-        /* copy chroma plane */
-
-    }
-
-    /* apply luma factor */
-    u_width = plane_in[0].u_width;
-    u_height = plane_in[0].u_height;
-    u_stride = (plane_in[0].u_stride >> 1);
-    u_stride_out = (plane_out[0].u_stride >> 1);
-    p_dest = (unsigned short *) &plane_out[0].pac_data[plane_out[0].u_topleft];
-    p_src = (unsigned short *) &plane_in[0].pac_data[plane_in[0].u_topleft];
-    p_dest_line = p_dest;
-    p_src_line = p_src;
-
-    switch(lum_param->lum_factor)
-    {
-    case 0:
-        /* very specific case : set luma plane to 16 */
-        for (j = u_height; j != 0; j--)
-        {
-            memset((void *)p_dest,16, u_width);
-            p_dest += u_stride_out;
-        }
-        return 0;
-
-    case 1:
-        /* 0.25 */
-        lf1 = 6; lf2 = 6; lf3 = 7;
-        break;
-    case 2:
-        /* 0.375 */
-        lf1 = 7; lf2 = 7; lf3 = 7;
-        break;
-    case 3:
-        /* 0.5 */
-        lf1 = 7; lf2 = 7; lf3 = 8;
-        break;
-    case 4:
-        /* 0.625 */
-        lf1 = 7; lf2 = 8; lf3 = 8;
-        break;
-    case 5:
-        /* 0.75 */
-        lf1 = 8; lf2 = 8; lf3 = 8;
-        break;
-    case 6:
-        /* 0.875 */
-        lf1 = 9; lf2 = 8; lf3 = 7;
-        break;
-    default:
-        lf1 = 8; lf2 = 8; lf3 = 9;
-        break;
-    }
-
-    for (j = u_height; j != 0; j--)
-    {
-        p_dest = p_dest_line;
-        p_src = p_src_line;
-        for (i = (u_width >> 1); i != 0; i--)
-        {
-            pix_src = (unsigned long) *p_src++;
-            pix = pix_src & 0xFF;
-            u_outpx = (((pix << lf1) + (pix << lf2) + (pix << lf3) ) >> LUM_FACTOR_MAX);
-            pix = ((pix_src & 0xFF00) >> 8);
-            u_outpx2 = ((((pix << lf1) + (pix << lf2) + (pix << lf3) ) >> LUM_FACTOR_MAX)<< 8) ;
-            *p_dest++ = (unsigned short) (u_outpx2 | u_outpx);
-        }
-        p_dest_line += u_stride_out;
-        p_src_line += u_stride;
-    }
-    return 0;
-}
-
-
-unsigned char M4VFL_modifyLumaWithScale(M4ViComImagePlane *plane_in,
-                                         M4ViComImagePlane *plane_out,
-                                         unsigned long lum_factor,
-                                         void *user_data)
-{
-    unsigned short *p_src, *p_dest, *p_src_line, *p_dest_line;
-    unsigned char *p_csrc, *p_cdest, *p_csrc_line, *p_cdest_line;
-    unsigned long pix_src;
-    unsigned long u_outpx, u_outpx2;
-    unsigned long u_width, u_stride, u_stride_out,u_height, pix;
-    long i, j;
-
-    /* copy or filter chroma */
-    u_width = plane_in[1].u_width;
-    u_height = plane_in[1].u_height;
-    u_stride = plane_in[1].u_stride;
-    u_stride_out = plane_out[1].u_stride;
-    p_cdest_line = (unsigned char *) &plane_out[1].pac_data[plane_out[1].u_topleft];
-    p_csrc_line = (unsigned char *) &plane_in[1].pac_data[plane_in[1].u_topleft];
-
-    if (lum_factor > 256)
-    {
-        p_cdest = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
-        p_csrc = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
-        /* copy chroma */
-        for (j = u_height; j != 0; j--)
-        {
-            for (i = u_width; i != 0; i--)
-            {
-                memcpy((void *)p_cdest_line, (void *)p_csrc_line, u_width);
-                memcpy((void *)p_cdest,(void *) p_csrc, u_width);
-            }
-            p_cdest_line += u_stride_out;
-            p_cdest += u_stride_out;
-            p_csrc_line += u_stride;
-            p_csrc += u_stride;
-        }
-    }
-    else
-    {
-        /* filter chroma */
-        pix = (1024 - lum_factor) << 7;
-        for (j = u_height; j != 0; j--)
-        {
-            p_cdest = p_cdest_line;
-            p_csrc = p_csrc_line;
-            for (i = u_width; i != 0; i--)
-            {
-                *p_cdest++ = ((pix + (*p_csrc++ & 0xFF) * lum_factor) >> LUM_FACTOR_MAX);
-            }
-            p_cdest_line += u_stride_out;
-            p_csrc_line += u_stride;
-        }
-        p_cdest_line = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
-        p_csrc_line = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
-        for (j = u_height; j != 0; j--)
-        {
-            p_cdest = p_cdest_line;
-            p_csrc = p_csrc_line;
-            for (i = u_width; i != 0; i--)
-            {
-                *p_cdest++ = ((pix + (*p_csrc & 0xFF) * lum_factor) >> LUM_FACTOR_MAX);
-            }
-            p_cdest_line += u_stride_out;
-            p_csrc_line += u_stride;
-        }
-    }
-    /* apply luma factor */
-    u_width = plane_in[0].u_width;
-    u_height = plane_in[0].u_height;
-    u_stride = (plane_in[0].u_stride >> 1);
-    u_stride_out = (plane_out[0].u_stride >> 1);
-    p_dest = (unsigned short *) &plane_out[0].pac_data[plane_out[0].u_topleft];
-    p_src = (unsigned short *) &plane_in[0].pac_data[plane_in[0].u_topleft];
-    p_dest_line = p_dest;
-    p_src_line = p_src;
-
-    for (j = u_height; j != 0; j--)
-    {
-        p_dest = p_dest_line;
-        p_src = p_src_line;
-        for (i = (u_width >> 1); i != 0; i--)
-        {
-            pix_src = (unsigned long) *p_src++;
-            pix = pix_src & 0xFF;
-            u_outpx = ((pix * lum_factor) >> LUM_FACTOR_MAX);
-            pix = ((pix_src & 0xFF00) >> 8);
-            u_outpx2 = (((pix * lum_factor) >> LUM_FACTOR_MAX)<< 8) ;
-            *p_dest++ = (unsigned short) (u_outpx2 | u_outpx);
-        }
-        p_dest_line += u_stride_out;
-        p_src_line += u_stride;
-    }
-
-    return 0;
-}
-
-/**
- *************************************************************************************************
- * M4OSA_ERR M4VIFI_ImageBlendingonYUV420 (void *pUserData,
- *                                                  M4VIFI_ImagePlane *pPlaneIn1,
- *                                                  M4VIFI_ImagePlane *pPlaneIn2,
- *                                                  M4VIFI_ImagePlane *pPlaneOut,
- *                                                  UInt32 Progress)
- * @brief   Blends two YUV 4:2:0 Planar images.
- * @note    Blends YUV420 planar images,
- *          Map the value of progress from (0 - 1000) to (0 - 1024)
- *          Set the range of blendingfactor,
- *                  1. from 0 to (Progress << 1)            ;for Progress <= 512
- *                  2. from (( Progress - 512)<< 1) to 1024 ;otherwise
- *          Set the increment of blendingfactor for each element in the image row by the factor,
- *                  =  (Range-1) / (image width-1)  ;for width >= range
- *                  =  (Range) / (image width)      ;otherwise
- *          Loop on each(= i) row of output Y plane (steps of 2)
- *              Loop on each(= j) column of output Y plane (steps of 2)
- *                  Get four Y samples and one U & V sample from two input YUV4:2:0 images and
- *                  Compute four Y sample and one U & V sample for output YUV4:2:0 image
- *                      using the following,
- *                  Out(i,j) = blendingfactor(i,j) * In1(i,j)+ (l - blendingfactor(i,j)) *In2(i,j)
- *              end loop column
- *          end loop row.
- * @param   pUserData: (IN)  User Specific Parameter
- * @param   pPlaneIn1: (IN)  Pointer to an array of image plane structures maintained
- *           for Y, U and V planes.
- * @param   pPlaneIn2: (IN)  Pointer to an array of image plane structures maintained
- *           for Y, U and V planes.
- * @param   pPlaneOut: (OUT) Pointer to an array of image plane structures maintained
- *           for Y, U and V planes.
- * @param   Progress:  (IN)  Progress value (varies between 0 and 1000)
- * @return  M4VIFI_OK: No error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  Error in width
- *************************************************************************************************
-*/
-
-/** Check for value is EVEN */
-#ifndef IS_EVEN
-#define IS_EVEN(a)  (!(a & 0x01))
-#endif
-
-/** Used for fixed point implementation */
-#ifndef MAX_SHORT
-#define MAX_SHORT   0x10000
-#endif
-
-#ifndef NULL
-#define NULL    0
-#endif
-
-#ifndef FALSE
-#define FALSE   0
-#define TRUE    !FALSE
-#endif
-
-unsigned char M4VIFI_ImageBlendingonYUV420 (void *pUserData,
-                                            M4ViComImagePlane *pPlaneIn1,
-                                            M4ViComImagePlane *pPlaneIn2,
-                                            M4ViComImagePlane *pPlaneOut,
-                                            UInt32 Progress)
-{
-    UInt8    *pu8_data_Y_start1,*pu8_data_U_start1,*pu8_data_V_start1;
-    UInt8    *pu8_data_Y_start2,*pu8_data_U_start2,*pu8_data_V_start2;
-    UInt8    *pu8_data_Y_start3,*pu8_data_U_start3,*pu8_data_V_start3;
-    UInt8    *pu8_data_Y_current1, *pu8_data_Y_next1, *pu8_data_U1, *pu8_data_V1;
-    UInt8    *pu8_data_Y_current2, *pu8_data_Y_next2, *pu8_data_U2, *pu8_data_V2;
-    UInt8    *pu8_data_Y_current3,*pu8_data_Y_next3, *pu8_data_U3, *pu8_data_V3;
-    UInt32   u32_stride_Y1, u32_stride2_Y1, u32_stride_U1, u32_stride_V1;
-    UInt32   u32_stride_Y2, u32_stride2_Y2, u32_stride_U2, u32_stride_V2;
-    UInt32   u32_stride_Y3, u32_stride2_Y3, u32_stride_U3, u32_stride_V3;
-    UInt32   u32_height,  u32_width;
-    UInt32   u32_blendfactor, u32_startA, u32_endA, u32_blend_inc, u32_x_accum;
-    UInt32   u32_col, u32_row, u32_rangeA, u32_progress;
-    UInt32   u32_U1,u32_V1,u32_U2,u32_V2, u32_Y1, u32_Y2;
-
-
-    /* Check the Y plane height is EVEN and image plane heights are same */
-    if( (IS_EVEN(pPlaneIn1[0].u_height) == FALSE)                ||
-        (IS_EVEN(pPlaneIn2[0].u_height) == FALSE)                ||
-        (IS_EVEN(pPlaneOut[0].u_height) == FALSE)                ||
-        (pPlaneIn1[0].u_height != pPlaneOut[0].u_height)         ||
-        (pPlaneIn2[0].u_height != pPlaneOut[0].u_height) )
-    {
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-    }
-
-    /* Check the Y plane width is EVEN and image plane widths are same */
-    if( (IS_EVEN(pPlaneIn1[0].u_width) == FALSE)                 ||
-        (IS_EVEN(pPlaneIn2[0].u_width) == FALSE)                 ||
-        (IS_EVEN(pPlaneOut[0].u_width) == FALSE)                 ||
-        (pPlaneIn1[0].u_width  != pPlaneOut[0].u_width)          ||
-        (pPlaneIn2[0].u_width  != pPlaneOut[0].u_width)  )
-    {
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-    }
-
-    /* Set the pointer to the beginning of the input1 YUV420 image planes */
-    pu8_data_Y_start1 = pPlaneIn1[0].pac_data + pPlaneIn1[0].u_topleft;
-    pu8_data_U_start1 = pPlaneIn1[1].pac_data + pPlaneIn1[1].u_topleft;
-    pu8_data_V_start1 = pPlaneIn1[2].pac_data + pPlaneIn1[2].u_topleft;
-
-    /* Set the pointer to the beginning of the input2 YUV420 image planes */
-    pu8_data_Y_start2 = pPlaneIn2[0].pac_data + pPlaneIn2[0].u_topleft;
-    pu8_data_U_start2 = pPlaneIn2[1].pac_data + pPlaneIn2[1].u_topleft;
-    pu8_data_V_start2 = pPlaneIn2[2].pac_data + pPlaneIn2[2].u_topleft;
-
-    /* Set the pointer to the beginning of the output YUV420 image planes */
-    pu8_data_Y_start3 = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
-    pu8_data_U_start3 = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
-    pu8_data_V_start3 = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
-
-    /* Set the stride for the next row in each input1 YUV420 plane */
-    u32_stride_Y1 = pPlaneIn1[0].u_stride;
-    u32_stride_U1 = pPlaneIn1[1].u_stride;
-    u32_stride_V1 = pPlaneIn1[2].u_stride;
-
-    /* Set the stride for the next row in each input2 YUV420 plane */
-    u32_stride_Y2 = pPlaneIn2[0].u_stride;
-    u32_stride_U2 = pPlaneIn2[1].u_stride;
-    u32_stride_V2 = pPlaneIn2[2].u_stride;
-
-    /* Set the stride for the next row in each output YUV420 plane */
-    u32_stride_Y3 = pPlaneOut[0].u_stride;
-    u32_stride_U3 = pPlaneOut[1].u_stride;
-    u32_stride_V3 = pPlaneOut[2].u_stride;
-
-    u32_stride2_Y1   = u32_stride_Y1 << 1;
-    u32_stride2_Y2   = u32_stride_Y2 << 1;
-    u32_stride2_Y3   = u32_stride_Y3 << 1;
-
-    /* Get the size of the output image */
-    u32_height = pPlaneOut[0].u_height;
-    u32_width  = pPlaneOut[0].u_width;
-
-    /* User Specified Progress value */
-    u32_progress = Progress;
-
-    /* Map Progress value from (0 - 1000) to (0 - 1024) -> for optimisation */
-    if(u32_progress < 1000)
-        u32_progress = ((u32_progress << 10) / 1000);
-    else
-        u32_progress = 1024;
-
-    /* Set the range of blendingfactor */
-    if(u32_progress <= 512)
-    {
-        u32_startA = 0;
-        u32_endA   = (u32_progress << 1);
-    }
-    else /* u32_progress > 512 */
-    {
-        u32_startA = (u32_progress - 512) << 1;
-        u32_endA   =  1024;
-    }
-    u32_rangeA = u32_endA - u32_startA;
-
-    /* Set the increment of blendingfactor for each element in the image row */
-    if ((u32_width >= u32_rangeA) && (u32_rangeA > 0) )
-    {
-        u32_blend_inc   = ((u32_rangeA-1) * MAX_SHORT) / (u32_width - 1);
-    }
-    else /* (u32_width < u32_rangeA) || (u32_rangeA < 0) */
-    {
-        u32_blend_inc   = (u32_rangeA * MAX_SHORT) / (u32_width);
-    }
-
-    /* Two YUV420 rows are computed at each pass */
-    for (u32_row = u32_height; u32_row != 0; u32_row -=2)
-    {
-        /* Set pointers to the beginning of the row for each input image1 plane */
-        pu8_data_Y_current1 = pu8_data_Y_start1;
-        pu8_data_U1 = pu8_data_U_start1;
-        pu8_data_V1 = pu8_data_V_start1;
-
-        /* Set pointers to the beginning of the row for each input image2 plane */
-        pu8_data_Y_current2 = pu8_data_Y_start2;
-        pu8_data_U2 = pu8_data_U_start2;
-        pu8_data_V2 = pu8_data_V_start2;
-
-        /* Set pointers to the beginning of the row for each output image plane */
-        pu8_data_Y_current3 = pu8_data_Y_start3;
-        pu8_data_U3 = pu8_data_U_start3;
-        pu8_data_V3 = pu8_data_V_start3;
-
-        /* Set pointers to the beginning of the next row for image luma plane */
-        pu8_data_Y_next1 = pu8_data_Y_current1 + u32_stride_Y1;
-        pu8_data_Y_next2 = pu8_data_Y_current2 + u32_stride_Y2;
-        pu8_data_Y_next3 = pu8_data_Y_current3 + u32_stride_Y3;
-
-        /* Initialise blendfactor */
-        u32_blendfactor   = u32_startA;
-        /* Blendfactor Increment accumulator */
-        u32_x_accum = 0;
-
-        /* Loop on each column of the output image */
-        for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
-        {
-            /* Update the blending factor */
-            u32_blendfactor = u32_startA + (u32_x_accum >> 16);
-
-            /* Get Luma value (x,y) of input Image1 */
-            u32_Y1 = *pu8_data_Y_current1++;
-
-            /* Get chrominance2 value */
-            u32_U1 = *pu8_data_U1++;
-            u32_V1 = *pu8_data_V1++;
-
-            /* Get Luma value (x,y) of input Image2 */
-            u32_Y2 = *pu8_data_Y_current2++;
-
-            /* Get chrominance2 value */
-            u32_U2 = *pu8_data_U2++;
-            u32_V2 = *pu8_data_V2++;
-
-            /* Compute Luma value (x,y) of Output image */
-            *pu8_data_Y_current3++  = (UInt8)((u32_blendfactor * u32_Y2 +
-                                                     (1024 - u32_blendfactor)*u32_Y1) >> 10);
-            /* Compute chroma(U) value of Output image */
-            *pu8_data_U3++          = (UInt8)((u32_blendfactor * u32_U2 +
-                                                     (1024 - u32_blendfactor)*u32_U1) >> 10);
-            /* Compute chroma(V) value of Output image */
-            *pu8_data_V3++          = (UInt8)((u32_blendfactor * u32_V2 +
-                                                     (1024 - u32_blendfactor)*u32_V1) >> 10);
-
-            /* Get Luma value (x,y+1) of input Image1 */
-            u32_Y1 = *pu8_data_Y_next1++;
-
-             /* Get Luma value (x,y+1) of input Image2 */
-            u32_Y2 = *pu8_data_Y_next2++;
-
-            /* Compute Luma value (x,y+1) of Output image*/
-            *pu8_data_Y_next3++ = (UInt8)((u32_blendfactor * u32_Y2 +
-                                                    (1024 - u32_blendfactor)*u32_Y1) >> 10);
-            /* Update accumulator */
-            u32_x_accum += u32_blend_inc;
-
-            /* Update the blending factor */
-            u32_blendfactor = u32_startA + (u32_x_accum >> 16);
-
-            /* Get Luma value (x+1,y) of input Image1 */
-            u32_Y1 = *pu8_data_Y_current1++;
-
-            /* Get Luma value (x+1,y) of input Image2 */
-            u32_Y2 = *pu8_data_Y_current2++;
-
-            /* Compute Luma value (x+1,y) of Output image*/
-            *pu8_data_Y_current3++ = (UInt8)((u32_blendfactor * u32_Y2 +
-                                                 (1024 - u32_blendfactor)*u32_Y1) >> 10);
-
-            /* Get Luma value (x+1,y+1) of input Image1 */
-            u32_Y1 = *pu8_data_Y_next1++;
-
-            /* Get Luma value (x+1,y+1) of input Image2 */
-            u32_Y2 = *pu8_data_Y_next2++;
-
-            /* Compute Luma value (x+1,y+1) of Output image*/
-            *pu8_data_Y_next3++ = (UInt8)((u32_blendfactor * u32_Y2 +
-                                                 (1024 - u32_blendfactor)*u32_Y1) >> 10);
-            /* Update accumulator */
-            u32_x_accum += u32_blend_inc;
-
-            /* Working pointers are incremented just after each storage */
-
-        }/* End of row scanning */
-
-        /* Update working pointer of input image1 for next row */
-        pu8_data_Y_start1 += u32_stride2_Y1;
-        pu8_data_U_start1 += u32_stride_U1;
-        pu8_data_V_start1 += u32_stride_V1;
-
-        /* Update working pointer of input image2 for next row */
-        pu8_data_Y_start2 += u32_stride2_Y2;
-        pu8_data_U_start2 += u32_stride_U2;
-        pu8_data_V_start2 += u32_stride_V2;
-
-        /* Update working pointer of output image for next row */
-        pu8_data_Y_start3 += u32_stride2_Y3;
-        pu8_data_U_start3 += u32_stride_U3;
-        pu8_data_V_start3 += u32_stride_V3;
-
-    }/* End of column scanning */
-
-    return M4VIFI_OK;
-}
-/* End of file M4VIFI_ImageBlendingonYUV420.c */
-
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_BGR565toYUV420.c b/libvideoeditor/vss/video_filters/src/M4VIFI_BGR565toYUV420.c
deleted file mode 100755
index c608767..0000000
--- a/libvideoeditor/vss/video_filters/src/M4VIFI_BGR565toYUV420.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ****************************************************************************************
- * @file     M4VIFI_BGR565toYUV420.c
- * @brief    Contain video library function
- * @note     Color Conversion Filter
- *           -# Contains the format conversion filters from BGR565 to YUV420
- ****************************************************************************************
-*/
-
-/* Prototypes of functions, and type definitions */
-#include    "M4VIFI_FiltersAPI.h"
-/* Macro definitions */
-#include    "M4VIFI_Defines.h"
-/* Clip table declaration */
-#include    "M4VIFI_Clip.h"
-
-/**
- *****************************************************************************************
- * M4VIFI_UInt8 M4VIFI_BGR565toYUV420 (void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
- *                                                     M4VIFI_ImagePlane *pPlaneOut)
- * @brief   Transform BGR565 image to a YUV420 image.
- * @note    Convert BGR565 to YUV420,
- *          Loop on each row ( 2 rows by 2 rows )
- *              Loop on each column ( 2 col by 2 col )
- *                  Get 4 BGR samples from input data and build 4 output Y samples
- *                  and each single U & V data
- *              end loop on col
- *          end loop on row
- * @param   pUserData: (IN) User Specific Data
- * @param   pPlaneIn: (IN) Pointer to BGR565 Plane
- * @param   pPlaneOut: (OUT) Pointer to  YUV420 buffer Plane
- * @return  M4VIFI_OK: there is no error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
- *****************************************************************************************
-*/
-
-M4VIFI_UInt8    M4VIFI_BGR565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
-                                                      M4VIFI_ImagePlane *pPlaneOut)
-{
-    M4VIFI_UInt32   u32_width, u32_height;
-    M4VIFI_UInt32   u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
-    M4VIFI_UInt32   u32_stride_bgr, u32_stride_2bgr;
-    M4VIFI_UInt32   u32_col, u32_row;
-
-    M4VIFI_Int32    i32_r00, i32_r01, i32_r10, i32_r11;
-    M4VIFI_Int32    i32_g00, i32_g01, i32_g10, i32_g11;
-    M4VIFI_Int32    i32_b00, i32_b01, i32_b10, i32_b11;
-    M4VIFI_Int32    i32_y00, i32_y01, i32_y10, i32_y11;
-    M4VIFI_Int32    i32_u00, i32_u01, i32_u10, i32_u11;
-    M4VIFI_Int32    i32_v00, i32_v01, i32_v10, i32_v11;
-    M4VIFI_UInt8    *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
-    M4VIFI_UInt8    *pu8_y_data, *pu8_u_data, *pu8_v_data;
-    M4VIFI_UInt8    *pu8_bgrn_data, *pu8_bgrn;
-    M4VIFI_UInt16   u16_pix1, u16_pix2, u16_pix3, u16_pix4;
-
-    /* Check planes height are appropriate */
-    if( (pPlaneIn->u_height != pPlaneOut[0].u_height)           ||
-        (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1))   ||
-        (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
-    {
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-    }
-
-    /* Check planes width are appropriate */
-    if( (pPlaneIn->u_width != pPlaneOut[0].u_width)         ||
-        (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
-        (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
-    {
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-    }
-
-    /* Set the pointer to the beginning of the output data buffers */
-    pu8_y_data  = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
-    pu8_u_data  = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
-    pu8_v_data  = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
-
-    /* Set the pointer to the beginning of the input data buffers */
-    pu8_bgrn_data   = pPlaneIn->pac_data + pPlaneIn->u_topleft;
-
-    /* Get the size of the output image */
-    u32_width   = pPlaneOut[0].u_width;
-    u32_height  = pPlaneOut[0].u_height;
-
-    /* Set the size of the memory jumps corresponding to row jump in each output plane */
-    u32_stride_Y = pPlaneOut[0].u_stride;
-    u32_stride2_Y = u32_stride_Y << 1;
-    u32_stride_U = pPlaneOut[1].u_stride;
-    u32_stride_V = pPlaneOut[2].u_stride;
-
-    /* Set the size of the memory jumps corresponding to row jump in input plane */
-    u32_stride_bgr = pPlaneIn->u_stride;
-    u32_stride_2bgr = u32_stride_bgr << 1;
-
-    /* Loop on each row of the output image, input coordinates are estimated from output ones */
-    /* Two YUV rows are computed at each pass */
-    for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
-    {
-        /* Current Y plane row pointers */
-        pu8_yn = pu8_y_data;
-        /* Next Y plane row pointers */
-        pu8_ys = pu8_yn + u32_stride_Y;
-        /* Current U plane row pointer */
-        pu8_u = pu8_u_data;
-        /* Current V plane row pointer */
-        pu8_v = pu8_v_data;
-
-        pu8_bgrn = pu8_bgrn_data;
-
-        /* Loop on each column of the output image */
-        for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
-        {
-            /* Get four BGR 565 samples from input data */
-            u16_pix1 = *( (M4VIFI_UInt16 *) pu8_bgrn);
-            u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_bgrn + CST_RGB_16_SIZE));
-            u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_bgrn + u32_stride_bgr));
-            u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_bgrn + u32_stride_bgr + CST_RGB_16_SIZE));
-            /* Unpack RGB565 to 8bit R, G, B */
-            /* (x,y) */
-            GET_BGR565(i32_b00, i32_g00, i32_r00, u16_pix1);
-            /* (x+1,y) */
-            GET_BGR565(i32_b10, i32_g10, i32_r10, u16_pix2);
-            /* (x,y+1) */
-            GET_BGR565(i32_b01, i32_g01, i32_r01, u16_pix3);
-            /* (x+1,y+1) */
-            GET_BGR565(i32_b11, i32_g11, i32_r11, u16_pix4);
-
-            /* Convert BGR value to YUV */
-            i32_u00 = U16(i32_r00, i32_g00, i32_b00);
-            i32_v00 = V16(i32_r00, i32_g00, i32_b00);
-            /* luminance value */
-            i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
-
-            i32_u10 = U16(i32_r10, i32_g10, i32_b10);
-            i32_v10 = V16(i32_r10, i32_g10, i32_b10);
-            /* luminance value */
-            i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
-
-            i32_u01 = U16(i32_r01, i32_g01, i32_b01);
-            i32_v01 = V16(i32_r01, i32_g01, i32_b01);
-            /* luminance value */
-            i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
-
-            i32_u11 = U16(i32_r11, i32_g11, i32_b11);
-            i32_v11 = V16(i32_r11, i32_g11, i32_b11);
-            /* luminance value */
-            i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
-
-            /* Store luminance data */
-            pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
-            pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
-            pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
-            pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
-
-            /* Store chroma data */
-            *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
-            *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
-
-            /* Prepare for next column */
-            pu8_bgrn += (CST_RGB_16_SIZE<<1);
-            /* Update current Y plane line pointer*/
-            pu8_yn += 2;
-            /* Update next Y plane line pointer*/
-            pu8_ys += 2;
-            /* Update U plane line pointer*/
-            pu8_u ++;
-            /* Update V plane line pointer*/
-            pu8_v ++;
-        } /* End of horizontal scanning */
-
-        /* Prepare pointers for the next row */
-        pu8_y_data      += u32_stride2_Y;
-        pu8_u_data      += u32_stride_U;
-        pu8_v_data      += u32_stride_V;
-        pu8_bgrn_data   += u32_stride_2bgr;
-
-    } /* End of vertical scanning */
-
-    return M4VIFI_OK;
-}
-/* End of file M4VIFI_BGR565toYUV420.c */
-
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_Clip.c b/libvideoeditor/vss/video_filters/src/M4VIFI_Clip.c
deleted file mode 100755
index e4290b1..0000000
--- a/libvideoeditor/vss/video_filters/src/M4VIFI_Clip.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file     M4VIFI_Clip.c
- * @brief    Management of the RGB Clipping matrix inclusion and Division Table
- * @note     -# Clipping Matrix is used in order to properly manage the inclusion of
- *           the external RGB Clipping matrix used for color conversion.
- *           This file HAS TO BE compiled with all color conversion filters project
- *           -# Division table is used in RGB to HLS color conversion
- *           Important: This file must be compiled during the assembly library building
- ******************************************************************************
-*/
-
-/* Prototypes of functions, and type definitions */
-#include    "M4VIFI_FiltersAPI.h"
-
-CNST M4VIFI_UInt8   M4VIFI_ClipTable[1256]
-= {
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03,
-0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
-0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
-0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
-0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
-0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
-0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33,
-0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
-0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43,
-0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
-0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
-0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b,
-0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63,
-0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
-0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73,
-0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b,
-0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83,
-0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b,
-0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93,
-0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
-0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3,
-0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab,
-0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,
-0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb,
-0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3,
-0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb,
-0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3,
-0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb,
-0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3,
-0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb,
-0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3,
-0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb,
-0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-};
-
-/* Division table for ( 65535/x ); x = 0 to 512 */
-CNST M4VIFI_UInt16  M4VIFI_DivTable[512]
-= {
-0, 65535, 32768, 21845, 16384, 13107, 10922, 9362,
-8192, 7281, 6553, 5957, 5461, 5041, 4681, 4369,
-4096, 3855, 3640, 3449, 3276, 3120, 2978, 2849,
-2730, 2621, 2520, 2427, 2340, 2259, 2184, 2114,
-2048, 1985, 1927, 1872, 1820, 1771, 1724, 1680,
-1638, 1598, 1560, 1524, 1489, 1456, 1424, 1394,
-1365, 1337, 1310, 1285, 1260, 1236, 1213, 1191,
-1170, 1149, 1129, 1110, 1092, 1074, 1057, 1040,
-1024, 1008, 992, 978, 963, 949, 936, 923,
-910, 897, 885, 873, 862, 851, 840, 829,
-819, 809, 799, 789, 780, 771, 762, 753,
-744, 736, 728, 720, 712, 704, 697, 689,
-682, 675, 668, 661, 655, 648, 642, 636,
-630, 624, 618, 612, 606, 601, 595, 590,
-585, 579, 574, 569, 564, 560, 555, 550,
-546, 541, 537, 532, 528, 524, 520, 516,
-512, 508, 504, 500, 496, 492, 489, 485,
-481, 478, 474, 471, 468, 464, 461, 458,
-455, 451, 448, 445, 442, 439, 436, 434,
-431, 428, 425, 422, 420, 417, 414, 412,
-409, 407, 404, 402, 399, 397, 394, 392,
-390, 387, 385, 383, 381, 378, 376, 374,
-372, 370, 368, 366, 364, 362, 360, 358,
-356, 354, 352, 350, 348, 346, 344, 343,
-341, 339, 337, 336, 334, 332, 330, 329,
-327, 326, 324, 322, 321, 319, 318, 316,
-315, 313, 312, 310, 309, 307, 306, 304,
-303, 302, 300, 299, 297, 296, 295, 293,
-292, 291, 289, 288, 287, 286, 284, 283,
-282, 281, 280, 278, 277, 276, 275, 274,
-273, 271, 270, 269, 268, 267, 266, 265,
-264, 263, 262, 261, 260, 259, 258, 257,
-256, 255, 254, 253, 252, 251, 250, 249,
-248, 247, 246, 245, 244, 243, 242, 241,
-240, 240, 239, 238, 237, 236, 235, 234,
-234, 233, 232, 231, 230, 229, 229, 228,
-227, 226, 225, 225, 224, 223, 222, 222,
-221, 220, 219, 219, 218, 217, 217, 216,
-215, 214, 214, 213, 212, 212, 211, 210,
-210, 209, 208, 208, 207, 206, 206, 205,
-204, 204, 203, 202, 202, 201, 201, 200,
-199, 199, 198, 197, 197, 196, 196, 195,
-195, 194, 193, 193, 192, 192, 191, 191,
-190, 189, 189, 188, 188, 187, 187, 186,
-186, 185, 185, 184, 184, 183, 183, 182,
-182, 181, 181, 180, 180, 179, 179, 178,
-178, 177, 177, 176, 176, 175, 175, 174,
-174, 173, 173, 172, 172, 172, 171, 171,
-170, 170, 169, 169, 168, 168, 168, 167,
-167, 166, 166, 165, 165, 165, 164, 164,
-163, 163, 163, 162, 162, 161, 161, 161,
-160, 160, 159, 159, 159, 158, 158, 157,
-157, 157, 156, 156, 156, 155, 155, 154,
-154, 154, 153, 153, 153, 152, 152, 152,
-151, 151, 151, 150, 150, 149, 149, 149,
-148, 148, 148, 147, 147, 147, 146, 146,
-146, 145, 145, 145, 144, 144, 144, 144,
-143, 143, 143, 142, 142, 142, 141, 141,
-141, 140, 140, 140, 140, 139, 139, 139,
-138, 138, 138, 137, 137, 137, 137, 136,
-136, 136, 135, 135, 135, 135, 134, 134,
-134, 134, 133, 133, 133, 132, 132, 132,
-132, 131, 131, 131, 131, 130, 130, 130,
-130, 129, 129, 129, 129, 128, 128, 128
-};
-
-CNST M4VIFI_Int32  const_storage1[8]
-= {
-0x00002568, 0x00003343,0x00000649,0x00000d0f, 0x0000D86C, 0x0000D83B, 0x00010000, 0x00010000
-};
-
-CNST M4VIFI_Int32  const_storage[8]
-= {
-0x00002568, 0x00003343, 0x1BF800, 0x00000649, 0x00000d0f, 0x110180, 0x40cf, 0x22BE00
-};
-
-
-CNST M4VIFI_UInt16  *M4VIFI_DivTable_zero
- = &M4VIFI_DivTable[0];
-
-CNST M4VIFI_UInt8   *M4VIFI_ClipTable_zero
- = &M4VIFI_ClipTable[500];
-
-
-/* End of file M4VIFI_Clip.c */
-
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c b/libvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c
deleted file mode 100755
index 34cbd57..0000000
--- a/libvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @brief    Contain video library function
- * @note     Color Conversion Filter
- *           Contains the format conversion filters from RGB565 to YUV420
- ******************************************************************************
-*/
-
-/* Prototypes of functions, and type definitions */
-#include    "M4VIFI_FiltersAPI.h"
-/* Macro definitions */
-#include    "M4VIFI_Defines.h"
-/* Clip table declaration */
-#include    "M4VIFI_Clip.h"
-
-
-/**
- ******************************************************************************
- * M4VIFI_UInt8 M4VIFI_RGB565toYUV420 (void *pUserData,
- *                                   M4VIFI_ImagePlane *pPlaneIn,
- *                                   M4VIFI_ImagePlane *pPlaneOut)
- * @brief   transform RGB565 image to a YUV420 image.
- * @note    Convert RGB565 to YUV420,
- *          Loop on each row ( 2 rows by 2 rows )
- *              Loop on each column ( 2 col by 2 col )
- *                  Get 4 RGB samples from input data and build 4 output Y samples
- *                  and each single U & V data
- *              end loop on col
- *          end loop on row
- * @param   pUserData: (IN) User Specific Data
- * @param   pPlaneIn: (IN) Pointer to RGB565 Plane
- * @param   pPlaneOut: (OUT) Pointer to  YUV420 buffer Plane
- * @return  M4VIFI_OK: there is no error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
- ******************************************************************************
-*/
-M4VIFI_UInt8    M4VIFI_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
-                                                      M4VIFI_ImagePlane *pPlaneOut)
-{
-    M4VIFI_UInt32   u32_width, u32_height;
-    M4VIFI_UInt32   u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
-    M4VIFI_UInt32   u32_stride_rgb, u32_stride_2rgb;
-    M4VIFI_UInt32   u32_col, u32_row;
-
-    M4VIFI_Int32    i32_r00, i32_r01, i32_r10, i32_r11;
-    M4VIFI_Int32    i32_g00, i32_g01, i32_g10, i32_g11;
-    M4VIFI_Int32    i32_b00, i32_b01, i32_b10, i32_b11;
-    M4VIFI_Int32    i32_y00, i32_y01, i32_y10, i32_y11;
-    M4VIFI_Int32    i32_u00, i32_u01, i32_u10, i32_u11;
-    M4VIFI_Int32    i32_v00, i32_v01, i32_v10, i32_v11;
-    M4VIFI_UInt8    *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
-    M4VIFI_UInt8    *pu8_y_data, *pu8_u_data, *pu8_v_data;
-    M4VIFI_UInt8    *pu8_rgbn_data, *pu8_rgbn;
-    M4VIFI_UInt16   u16_pix1, u16_pix2, u16_pix3, u16_pix4;
-
-    /* Check planes height are appropriate */
-    if ((pPlaneIn->u_height != pPlaneOut[0].u_height)           ||
-        (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1))   ||
-        (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
-    {
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-    }
-
-    /* Check planes width are appropriate */
-    if ((pPlaneIn->u_width != pPlaneOut[0].u_width)         ||
-        (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
-        (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
-    {
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-    }
-
-    /* Set the pointer to the beginning of the output data buffers */
-    pu8_y_data = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
-    pu8_u_data = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
-    pu8_v_data = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
-
-    /* Set the pointer to the beginning of the input data buffers */
-    pu8_rgbn_data   = pPlaneIn->pac_data + pPlaneIn->u_topleft;
-
-    /* Get the size of the output image */
-    u32_width = pPlaneOut[0].u_width;
-    u32_height = pPlaneOut[0].u_height;
-
-    /* Set the size of the memory jumps corresponding to row jump in each output plane */
-    u32_stride_Y = pPlaneOut[0].u_stride;
-    u32_stride2_Y = u32_stride_Y << 1;
-    u32_stride_U = pPlaneOut[1].u_stride;
-    u32_stride_V = pPlaneOut[2].u_stride;
-
-    /* Set the size of the memory jumps corresponding to row jump in input plane */
-    u32_stride_rgb = pPlaneIn->u_stride;
-    u32_stride_2rgb = u32_stride_rgb << 1;
-
-
-    /* Loop on each row of the output image, input coordinates are estimated from output ones */
-    /* Two YUV rows are computed at each pass */
-    for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
-    {
-        /* Current Y plane row pointers */
-        pu8_yn = pu8_y_data;
-        /* Next Y plane row pointers */
-        pu8_ys = pu8_yn + u32_stride_Y;
-        /* Current U plane row pointer */
-        pu8_u = pu8_u_data;
-        /* Current V plane row pointer */
-        pu8_v = pu8_v_data;
-
-        pu8_rgbn = pu8_rgbn_data;
-
-        /* Loop on each column of the output image */
-        for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
-        {
-            /* Get four RGB 565 samples from input data */
-            u16_pix1 = *( (M4VIFI_UInt16 *) pu8_rgbn);
-            u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_rgbn + CST_RGB_16_SIZE));
-            u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb));
-            u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb + CST_RGB_16_SIZE));
-
-            /* Unpack RGB565 to 8bit R, G, B */
-            /* (x,y) */
-            GET_RGB565(i32_r00,i32_g00,i32_b00,u16_pix1);
-            /* (x+1,y) */
-            GET_RGB565(i32_r10,i32_g10,i32_b10,u16_pix2);
-            /* (x,y+1) */
-            GET_RGB565(i32_r01,i32_g01,i32_b01,u16_pix3);
-            /* (x+1,y+1) */
-            GET_RGB565(i32_r11,i32_g11,i32_b11,u16_pix4);
-
-            /* Convert RGB value to YUV */
-            i32_u00 = U16(i32_r00, i32_g00, i32_b00);
-            i32_v00 = V16(i32_r00, i32_g00, i32_b00);
-            /* luminance value */
-            i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
-
-            i32_u10 = U16(i32_r10, i32_g10, i32_b10);
-            i32_v10 = V16(i32_r10, i32_g10, i32_b10);
-            /* luminance value */
-            i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
-
-            i32_u01 = U16(i32_r01, i32_g01, i32_b01);
-            i32_v01 = V16(i32_r01, i32_g01, i32_b01);
-            /* luminance value */
-            i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
-
-            i32_u11 = U16(i32_r11, i32_g11, i32_b11);
-            i32_v11 = V16(i32_r11, i32_g11, i32_b11);
-            /* luminance value */
-            i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
-
-            /* Store luminance data */
-            pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
-            pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
-            pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
-            pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
-
-            /* Store chroma data */
-            *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
-            *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
-
-            /* Prepare for next column */
-            pu8_rgbn += (CST_RGB_16_SIZE<<1);
-            /* Update current Y plane line pointer*/
-            pu8_yn += 2;
-            /* Update next Y plane line pointer*/
-            pu8_ys += 2;
-            /* Update U plane line pointer*/
-            pu8_u ++;
-            /* Update V plane line pointer*/
-            pu8_v ++;
-        } /* End of horizontal scanning */
-
-        /* Prepare pointers for the next row */
-        pu8_y_data += u32_stride2_Y;
-        pu8_u_data += u32_stride_U;
-        pu8_v_data += u32_stride_V;
-        pu8_rgbn_data += u32_stride_2rgb;
-
-
-    } /* End of vertical scanning */
-
-    return M4VIFI_OK;
-}
-
-
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_RGB888toYUV420.c b/libvideoeditor/vss/video_filters/src/M4VIFI_RGB888toYUV420.c
deleted file mode 100755
index 285a2a6..0000000
--- a/libvideoeditor/vss/video_filters/src/M4VIFI_RGB888toYUV420.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include    "M4VIFI_FiltersAPI.h"
-
-#include    "M4VIFI_Defines.h"
-
-#include    "M4VIFI_Clip.h"
-
-/***************************************************************************
-Proto:
-M4VIFI_UInt8    M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn,
-                                     M4VIFI_ImagePlane PlaneOut[3]);
-Purpose:    filling of the YUV420 plane from a BGR24 plane
-Abstract:    Loop on each row ( 2 rows by 2 rows )
-                Loop on each column ( 2 col by 2 col )
-                    Get 4 BGR samples from input data and build 4 output Y samples and
-                    each single U & V data
-                end loop on col
-            end loop on row
-
-In:            RGB24 plane
-InOut:        none
-Out:        array of 3 M4VIFI_ImagePlane structures
-Modified:    ML: RGB function modified to BGR.
-***************************************************************************/
-M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn,
-                                    M4VIFI_ImagePlane PlaneOut[3])
-{
-    M4VIFI_UInt32    u32_width, u32_height;
-    M4VIFI_UInt32    u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V, u32_stride_rgb,\
-                     u32_stride_2rgb;
-    M4VIFI_UInt32    u32_col, u32_row;
-
-    M4VIFI_Int32    i32_r00, i32_r01, i32_r10, i32_r11;
-    M4VIFI_Int32    i32_g00, i32_g01, i32_g10, i32_g11;
-    M4VIFI_Int32    i32_b00, i32_b01, i32_b10, i32_b11;
-    M4VIFI_Int32    i32_y00, i32_y01, i32_y10, i32_y11;
-    M4VIFI_Int32    i32_u00, i32_u01, i32_u10, i32_u11;
-    M4VIFI_Int32    i32_v00, i32_v01, i32_v10, i32_v11;
-    M4VIFI_UInt8    *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
-    M4VIFI_UInt8    *pu8_y_data, *pu8_u_data, *pu8_v_data;
-    M4VIFI_UInt8    *pu8_rgbn_data, *pu8_rgbn;
-
-    /* check sizes */
-    if( (PlaneIn->u_height != PlaneOut[0].u_height)            ||
-        (PlaneOut[0].u_height != (PlaneOut[1].u_height<<1))    ||
-        (PlaneOut[0].u_height != (PlaneOut[2].u_height<<1)))
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-
-    if( (PlaneIn->u_width != PlaneOut[0].u_width)        ||
-        (PlaneOut[0].u_width != (PlaneOut[1].u_width<<1))    ||
-        (PlaneOut[0].u_width != (PlaneOut[2].u_width<<1)))
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-
-
-    /* set the pointer to the beginning of the output data buffers */
-    pu8_y_data    = PlaneOut[0].pac_data + PlaneOut[0].u_topleft;
-    pu8_u_data    = PlaneOut[1].pac_data + PlaneOut[1].u_topleft;
-    pu8_v_data    = PlaneOut[2].pac_data + PlaneOut[2].u_topleft;
-
-    /* idem for input buffer */
-    pu8_rgbn_data    = PlaneIn->pac_data + PlaneIn->u_topleft;
-
-    /* get the size of the output image */
-    u32_width    = PlaneOut[0].u_width;
-    u32_height    = PlaneOut[0].u_height;
-
-    /* set the size of the memory jumps corresponding to row jump in each output plane */
-    u32_stride_Y = PlaneOut[0].u_stride;
-    u32_stride2_Y= u32_stride_Y << 1;
-    u32_stride_U = PlaneOut[1].u_stride;
-    u32_stride_V = PlaneOut[2].u_stride;
-
-    /* idem for input plane */
-    u32_stride_rgb = PlaneIn->u_stride;
-    u32_stride_2rgb = u32_stride_rgb << 1;
-
-    /* loop on each row of the output image, input coordinates are estimated from output ones */
-    /* two YUV rows are computed at each pass */
-    for    (u32_row = u32_height ;u32_row != 0; u32_row -=2)
-    {
-        /* update working pointers */
-        pu8_yn    = pu8_y_data;
-        pu8_ys    = pu8_yn + u32_stride_Y;
-
-        pu8_u    = pu8_u_data;
-        pu8_v    = pu8_v_data;
-
-        pu8_rgbn= pu8_rgbn_data;
-
-        /* loop on each column of the output image*/
-        for    (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
-        {
-            /* get RGB samples of 4 pixels */
-            GET_RGB24(i32_r00, i32_g00, i32_b00, pu8_rgbn, 0);
-            GET_RGB24(i32_r10, i32_g10, i32_b10, pu8_rgbn, CST_RGB_24_SIZE);
-            GET_RGB24(i32_r01, i32_g01, i32_b01, pu8_rgbn, u32_stride_rgb);
-            GET_RGB24(i32_r11, i32_g11, i32_b11, pu8_rgbn, u32_stride_rgb + CST_RGB_24_SIZE);
-
-            i32_u00    = U24(i32_r00, i32_g00, i32_b00);
-            i32_v00    = V24(i32_r00, i32_g00, i32_b00);
-            i32_y00    = Y24(i32_r00, i32_g00, i32_b00);        /* matrix luminance */
-            pu8_yn[0]= (M4VIFI_UInt8)i32_y00;
-
-            i32_u10    = U24(i32_r10, i32_g10, i32_b10);
-            i32_v10    = V24(i32_r10, i32_g10, i32_b10);
-            i32_y10    = Y24(i32_r10, i32_g10, i32_b10);
-            pu8_yn[1]= (M4VIFI_UInt8)i32_y10;
-
-            i32_u01    = U24(i32_r01, i32_g01, i32_b01);
-            i32_v01    = V24(i32_r01, i32_g01, i32_b01);
-            i32_y01    = Y24(i32_r01, i32_g01, i32_b01);
-            pu8_ys[0]= (M4VIFI_UInt8)i32_y01;
-
-            i32_u11    = U24(i32_r11, i32_g11, i32_b11);
-            i32_v11    = V24(i32_r11, i32_g11, i32_b11);
-            i32_y11    = Y24(i32_r11, i32_g11, i32_b11);
-            pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
-
-            *pu8_u    = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
-            *pu8_v    = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
-
-            pu8_rgbn    +=  (CST_RGB_24_SIZE<<1);
-            pu8_yn        += 2;
-            pu8_ys        += 2;
-
-            pu8_u ++;
-            pu8_v ++;
-        } /* end of horizontal scanning */
-
-        pu8_y_data        += u32_stride2_Y;
-        pu8_u_data        += u32_stride_U;
-        pu8_v_data        += u32_stride_V;
-        pu8_rgbn_data    += u32_stride_2rgb;
-
-
-    } /* End of vertical scanning */
-
-    return M4VIFI_OK;
-}
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c
deleted file mode 100755
index 617e4ed..0000000
--- a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- ******************************************************************************
- * @file     M4VIFI_ResizeRGB565toRGB565.c
- * @brief    Contain video library function
- * @note     This file has a Resize filter function
- *           Generic resizing of RGB565 (Planar) image
- ******************************************************************************
-*/
-/* Prototypes of functions, and type definitions */
-#include    "M4VIFI_FiltersAPI.h"
-/* Macro definitions */
-#include    "M4VIFI_Defines.h"
-/* Clip table declaration */
-#include    "M4VIFI_Clip.h"
-
-/**
- ***********************************************************************************************
- * M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
- *                                                                   M4VIFI_ImagePlane *pPlaneOut)
- * @brief   Resizes RGB565 Planar plane.
- * @param   pUserData: (IN) User Data
- * @param   pPlaneIn: (IN) Pointer to RGB565 (Planar) plane buffer
- * @param   pPlaneOut: (OUT) Pointer to RGB565 (Planar) plane
- * @return  M4VIFI_OK: there is no error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  Error in width
- ***********************************************************************************************
-*/
-M4VIFI_UInt8    M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
-                                                    M4VIFI_ImagePlane *pPlaneIn,
-                                                    M4VIFI_ImagePlane *pPlaneOut)
-{
-    M4VIFI_UInt16   *pu16_data_in;
-    M4VIFI_UInt16   *pu16_data_out;
-    M4VIFI_UInt32   u32_width_in, u32_width_out, u32_height_in, u32_height_out;
-    M4VIFI_UInt32   u32_stride_in, u32_stride_out;
-    M4VIFI_UInt32   u32_x_inc, u32_y_inc;
-    M4VIFI_UInt32   u32_x_accum, u32_y_accum, u32_x_accum_start;
-    M4VIFI_UInt32   u32_width, u32_height;
-    M4VIFI_UInt32   u32_y_frac;
-    M4VIFI_UInt32   u32_x_frac;
-    M4VIFI_UInt32   u32_Rtemp_value,u32_Gtemp_value,u32_Btemp_value;
-    M4VIFI_UInt16   *pu16_src_top;
-    M4VIFI_UInt16   *pu16_src_bottom;
-    M4VIFI_UInt32   i32_b00, i32_g00, i32_r00;
-    M4VIFI_UInt32   i32_b01, i32_g01, i32_r01;
-    M4VIFI_UInt32   i32_b02, i32_g02, i32_r02;
-    M4VIFI_UInt32   i32_b03, i32_g03, i32_r03;
-    M4VIFI_UInt8    count_trans=0;
-
-    /* Check for the RGB width and height are even */
-    if ((IS_EVEN(pPlaneIn->u_height) == FALSE) ||
-        (IS_EVEN(pPlaneOut->u_height) == FALSE)) {
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-    }
-
-    if ((IS_EVEN(pPlaneIn->u_width) == FALSE) ||
-        (IS_EVEN(pPlaneOut->u_width) == FALSE)) {
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-    }
-
-    /* Set the working pointers at the beginning of the input/output data field */
-    pu16_data_in    = (M4VIFI_UInt16*)(pPlaneIn->pac_data + pPlaneIn->u_topleft);
-    pu16_data_out   = (M4VIFI_UInt16*)(pPlaneOut->pac_data + pPlaneOut->u_topleft);
-
-    /* Get the memory jump corresponding to a row jump */
-    u32_stride_in   = pPlaneIn->u_stride;
-    u32_stride_out  = pPlaneOut->u_stride;
-
-    /* Set the bounds of the active image */
-    u32_width_in    = pPlaneIn->u_width;
-    u32_height_in   = pPlaneIn->u_height;
-
-    u32_width_out   = pPlaneOut->u_width;
-    u32_height_out  = pPlaneOut->u_height;
-
-    /* Compute horizontal ratio between src and destination width.*/
-    if (u32_width_out >= u32_width_in) {
-        u32_x_inc   = ((u32_width_in-1) * MAX_SHORT) / (u32_width_out-1);
-    } else {
-        u32_x_inc   = (u32_width_in * MAX_SHORT) / (u32_width_out);
-    }
-
-    /* Compute vertical ratio between src and destination height.*/
-    if (u32_height_out >= u32_height_in) {
-        u32_y_inc   = ((u32_height_in - 1) * MAX_SHORT) / (u32_height_out-1);
-    } else {
-        u32_y_inc = (u32_height_in * MAX_SHORT) / (u32_height_out);
-    }
-
-    /*
-    Calculate initial accumulator value : u32_y_accum_start.
-    u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
-    */
-    if (u32_y_inc >= MAX_SHORT) {
-        /*
-            Keep the fractional part, integer  part is coded
-            on the 16 high bits and the fractionnal on the 15 low bits
-        */
-        u32_y_accum = u32_y_inc & 0xffff;
-
-        if (!u32_y_accum)
-        {
-            u32_y_accum = MAX_SHORT;
-        }
-
-        u32_y_accum >>= 1;
-    } else {
-        u32_y_accum = 0;
-    }
-
-    /*
-        Calculate initial accumulator value : u32_x_accum_start.
-        u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
-    */
-    if (u32_x_inc >= MAX_SHORT) {
-        u32_x_accum_start = u32_x_inc & 0xffff;
-
-        if (!u32_x_accum_start) {
-            u32_x_accum_start = MAX_SHORT;
-        }
-
-        u32_x_accum_start >>= 1;
-    } else {
-        u32_x_accum_start = 0;
-    }
-
-    u32_height = u32_height_out;
-
-    /*
-    Bilinear interpolation linearly interpolates along each row, and then uses that
-    result in a linear interpolation donw each column. Each estimated pixel in the
-    output image is a weighted combination of its four neighbours according to the formula:
-    F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
-    with  R(x) = / x+1  -1 =< x =< 0 \ 1-x  0 =< x =< 1 and a (resp. b)weighting coefficient
-    is the distance from the nearest neighbor in the p (resp. q) direction
-    */
-
-    do { /* Scan all the row */
-
-        /* Vertical weight factor */
-        u32_y_frac = (u32_y_accum>>12)&15;
-
-        /* Reinit accumulator */
-        u32_x_accum = u32_x_accum_start;
-
-        u32_width = u32_width_out;
-
-        do { /* Scan along each row */
-            pu16_src_top = pu16_data_in + (u32_x_accum >> 16);
-            pu16_src_bottom = pu16_src_top + (u32_stride_in>>1);
-            u32_x_frac = (u32_x_accum >> 12)&15; /* Horizontal weight factor */
-
-            /* Weighted combination */
-            if ((u32_height == 1) && (u32_height_in == u32_height_out)) {
-                GET_RGB565(i32_b00,i32_g00,i32_r00,(M4VIFI_UInt16)pu16_src_top[0]);
-                GET_RGB565(i32_b01,i32_g01,i32_r01,(M4VIFI_UInt16)pu16_src_top[1]);
-                GET_RGB565(i32_b02,i32_g02,i32_r02,(M4VIFI_UInt16)pu16_src_top[0]);
-                GET_RGB565(i32_b03,i32_g03,i32_r03,(M4VIFI_UInt16)pu16_src_top[1]);
-            } else {
-                GET_RGB565(i32_b00,i32_g00,i32_r00,(M4VIFI_UInt16)pu16_src_top[0]);
-                GET_RGB565(i32_b01,i32_g01,i32_r01,(M4VIFI_UInt16)pu16_src_top[1]);
-                GET_RGB565(i32_b02,i32_g02,i32_r02,(M4VIFI_UInt16)pu16_src_bottom[0]);
-                GET_RGB565(i32_b03,i32_g03,i32_r03,(M4VIFI_UInt16)pu16_src_bottom[1]);
-
-            }
-
-            /* Solution to avoid green effects due to transparency */
-            count_trans = 0;
-
-            /* If RGB is transparent color (0, 63, 0), we transform it to white (31,63,31) */
-            if (i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
-            {
-                i32_b00 = 31;
-                i32_r00 = 31;
-                count_trans++;
-            }
-            if (i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
-            {
-                i32_b01 = 31;
-                i32_r01 = 31;
-                count_trans++;
-            }
-            if (i32_b02 == 0 && i32_g02 == 63 && i32_r02 == 0)
-            {
-                i32_b02 = 31;
-                i32_r02 = 31;
-                count_trans++;
-            }
-            if (i32_b03 == 0 && i32_g03 == 63 && i32_r03 == 0)
-            {
-                i32_b03 = 31;
-                i32_r03 = 31;
-                count_trans++;
-            }
-
-            if (count_trans > 2) {
-                /* pixel is transparent */
-                u32_Rtemp_value = 0;
-                u32_Gtemp_value = 63;
-                u32_Btemp_value = 0;
-            } else {
-                u32_Rtemp_value = (M4VIFI_UInt8)(((i32_r00*(16-u32_x_frac) +
-                                 i32_r01*u32_x_frac)*(16-u32_y_frac) +
-                                (i32_r02*(16-u32_x_frac) +
-                                 i32_r03*u32_x_frac)*u32_y_frac )>>8);
-
-                u32_Gtemp_value = (M4VIFI_UInt8)(((i32_g00*(16-u32_x_frac) +
-                                 i32_g01*u32_x_frac)*(16-u32_y_frac) +
-                                (i32_g02*(16-u32_x_frac) +
-                                 i32_g03*u32_x_frac)*u32_y_frac )>>8);
-
-                u32_Btemp_value =  (M4VIFI_UInt8)(((i32_b00*(16-u32_x_frac) +
-                                 i32_b01*u32_x_frac)*(16-u32_y_frac) +
-                                (i32_b02*(16-u32_x_frac) +
-                                 i32_b03*u32_x_frac)*u32_y_frac )>>8);
-            }
-
-            *pu16_data_out++ = (M4VIFI_UInt16)( (((u32_Gtemp_value & 0x38) >> 3) | (u32_Btemp_value << 3)) |\
-                                ( (((u32_Gtemp_value & 0x7) << 5 ) | u32_Rtemp_value)<<8 ));
-
-            /* Update horizontal accumulator */
-            u32_x_accum += u32_x_inc;
-
-        } while(--u32_width);
-
-
-        /* Update vertical accumulator */
-        u32_y_accum += u32_y_inc;
-        if (u32_y_accum>>16) {
-            pu16_data_in = pu16_data_in + (u32_y_accum >> 16) * (u32_stride_in>>1);
-            u32_y_accum &= 0xffff;
-        }
-
-    } while(--u32_height);
-
-    return M4VIFI_OK;
-}
-
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB888toRGB888.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB888toRGB888.c
deleted file mode 100755
index deb9d44..0000000
--- a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB888toRGB888.c
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file     M4VIFI_ResizeYUV420toYUV420.c
- * @brief    Contain video library function
- * @note     This file has a Resize filter function
- *           -# Generic resizing of YUV420 (Planar) image
- ******************************************************************************
-*/
-
-/* Prototypes of functions, and type definitions */
-#include    "M4VIFI_FiltersAPI.h"
-/* Macro definitions */
-#include    "M4VIFI_Defines.h"
-/* Clip table declaration */
-#include    "M4VIFI_Clip.h"
-
-/**
- ***********************************************************************************************
- * M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
- *                                                                  M4VIFI_ImagePlane *pPlaneOut)
- * @brief   Resizes YUV420 Planar plane.
- * @note    Basic structure of the function
- *          Loop on each row (step 2)
- *              Loop on each column (step 2)
- *                  Get four Y samples and 1 U & V sample
- *                  Resize the Y with corresponing U and V samples
- *                  Place the YUV in the ouput plane
- *              end loop column
- *          end loop row
- *          For resizing bilinear interpolation linearly interpolates along
- *          each row, and then uses that result in a linear interpolation down each column.
- *          Each estimated pixel in the output image is a weighted
- *          combination of its four neighbours. The ratio of compression
- *          or dilatation is estimated using input and output sizes.
- * @param   pUserData: (IN) User Data
- * @param   pPlaneIn: (IN) Pointer to YUV420 (Planar) plane buffer
- * @param   pPlaneOut: (OUT) Pointer to YUV420 (Planar) plane
- * @return  M4VIFI_OK: there is no error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  Error in width
- ***********************************************************************************************
-*/
-M4VIFI_UInt8    M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
-                                                                M4VIFI_ImagePlane *pPlaneIn,
-                                                                M4VIFI_ImagePlane *pPlaneOut)
-{
-    M4VIFI_UInt8    *pu8_data_in;
-    M4VIFI_UInt8    *pu8_data_out;
-    M4VIFI_UInt32   u32_width_in, u32_width_out, u32_height_in, u32_height_out;
-    M4VIFI_UInt32   u32_stride_in, u32_stride_out;
-    M4VIFI_UInt32   u32_x_inc, u32_y_inc;
-    M4VIFI_UInt32   u32_x_accum, u32_y_accum, u32_x_accum_start;
-    M4VIFI_UInt32   u32_width, u32_height;
-    M4VIFI_UInt32   u32_y_frac;
-    M4VIFI_UInt32   u32_x_frac;
-    M4VIFI_UInt32   u32_Rtemp_value,u32_Gtemp_value,u32_Btemp_value;
-    M4VIFI_UInt8    *pu8_src_top;
-    M4VIFI_UInt8    *pu8_src_bottom;
-    M4VIFI_UInt32    i32_b00, i32_g00, i32_r00;
-    M4VIFI_UInt32    i32_b01, i32_g01, i32_r01;
-    M4VIFI_UInt32    i32_b02, i32_g02, i32_r02;
-    M4VIFI_UInt32    i32_b03, i32_g03, i32_r03;
-
-    /* Check for the YUV width and height are even */
-    if ((IS_EVEN(pPlaneIn->u_height) == FALSE)    ||
-        (IS_EVEN(pPlaneOut->u_height) == FALSE))
-    {
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-    }
-
-    if ((IS_EVEN(pPlaneIn->u_width) == FALSE) ||
-        (IS_EVEN(pPlaneOut->u_width) == FALSE))
-    {
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-    }
-
-
-        /* Set the working pointers at the beginning of the input/output data field */
-        pu8_data_in     = (M4VIFI_UInt8*)(pPlaneIn->pac_data + pPlaneIn->u_topleft);
-        pu8_data_out    = (M4VIFI_UInt8*)(pPlaneOut->pac_data + pPlaneOut->u_topleft);
-
-        /* Get the memory jump corresponding to a row jump */
-        u32_stride_in   = pPlaneIn->u_stride;
-        u32_stride_out  = pPlaneOut->u_stride;
-
-        /* Set the bounds of the active image */
-        u32_width_in    = pPlaneIn->u_width;
-        u32_height_in   = pPlaneIn->u_height;
-
-        u32_width_out   = pPlaneOut->u_width;
-        u32_height_out  = pPlaneOut->u_height;
-
-        /* Compute horizontal ratio between src and destination width.*/
-        if (u32_width_out >= u32_width_in)
-        {
-            u32_x_inc   = ((u32_width_in-1) * MAX_SHORT) / (u32_width_out-1);
-        }
-        else
-        {
-            u32_x_inc   = (u32_width_in * MAX_SHORT) / (u32_width_out);
-        }
-
-        /* Compute vertical ratio between src and destination height.*/
-        if (u32_height_out >= u32_height_in)
-        {
-            u32_y_inc   = ((u32_height_in - 1) * MAX_SHORT) / (u32_height_out-1);
-        }
-        else
-        {
-            u32_y_inc = (u32_height_in * MAX_SHORT) / (u32_height_out);
-        }
-
-        /*
-        Calculate initial accumulator value : u32_y_accum_start.
-        u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
-        */
-        if (u32_y_inc >= MAX_SHORT)
-        {
-            /*
-                Keep the fractionnal part, assimung that integer  part is coded
-                on the 16 high bits and the fractionnal on the 15 low bits
-            */
-            u32_y_accum = u32_y_inc & 0xffff;
-
-            if (!u32_y_accum)
-            {
-                u32_y_accum = MAX_SHORT;
-            }
-
-            u32_y_accum >>= 1;
-        }
-        else
-        {
-            u32_y_accum = 0;
-        }
-
-
-        /*
-            Calculate initial accumulator value : u32_x_accum_start.
-            u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
-        */
-        if (u32_x_inc >= MAX_SHORT)
-        {
-            u32_x_accum_start = u32_x_inc & 0xffff;
-
-            if (!u32_x_accum_start)
-            {
-                u32_x_accum_start = MAX_SHORT;
-            }
-
-            u32_x_accum_start >>= 1;
-        }
-        else
-        {
-            u32_x_accum_start = 0;
-        }
-
-        u32_height = u32_height_out;
-
-        /*
-        Bilinear interpolation linearly interpolates along each row, and then uses that
-        result in a linear interpolation donw each column. Each estimated pixel in the
-        output image is a weighted combination of its four neighbours according to the formula:
-        F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
-        with  R(x) = / x+1  -1 =< x =< 0 \ 1-x  0 =< x =< 1 and a (resp. b)weighting coefficient
-        is the distance from the nearest neighbor in the p (resp. q) direction
-        */
-
-        do { /* Scan all the row */
-
-            /* Vertical weight factor */
-            u32_y_frac = (u32_y_accum>>12)&15;
-
-            /* Reinit accumulator */
-            u32_x_accum = u32_x_accum_start;
-
-            u32_width = u32_width_out;
-
-            do { /* Scan along each row */
-                pu8_src_top = pu8_data_in + (u32_x_accum >> 16)*3;
-                pu8_src_bottom = pu8_src_top + (u32_stride_in);
-                u32_x_frac = (u32_x_accum >> 12)&15; /* Horizontal weight factor */
-
-                if ((u32_width == 1) && (u32_width_in == u32_width_out)) {
-                    /*
-                       When input height is equal to output height and input width
-                       equal to output width, replicate the corner pixels for
-                       interpolation
-                    */
-                    if ((u32_height == 1) && (u32_height_in == u32_height_out)) {
-                        GET_RGB24(i32_b00,i32_g00,i32_r00,pu8_src_top,0);
-                        GET_RGB24(i32_b01,i32_g01,i32_r01,pu8_src_top,0);
-                        GET_RGB24(i32_b02,i32_g02,i32_r02,pu8_src_top,0);
-                        GET_RGB24(i32_b03,i32_g03,i32_r03,pu8_src_top,0);
-                    }
-                    /*
-                       When input height is not equal to output height and
-                       input width equal to output width, replicate the
-                       column for interpolation
-                    */
-                    else {
-                        GET_RGB24(i32_b00,i32_g00,i32_r00,pu8_src_top,0);
-                        GET_RGB24(i32_b01,i32_g01,i32_r01,pu8_src_top,0);
-                        GET_RGB24(i32_b02,i32_g02,i32_r02,pu8_src_bottom,0);
-                        GET_RGB24(i32_b03,i32_g03,i32_r03,pu8_src_bottom,0);
-                    }
-                } else {
-                    /*
-                       When input height is equal to output height and
-                       input width not equal to output width, replicate the
-                       row for interpolation
-                    */
-                    if ((u32_height == 1) && (u32_height_in == u32_height_out)) {
-                        GET_RGB24(i32_b00,i32_g00,i32_r00,pu8_src_top,0);
-                        GET_RGB24(i32_b01,i32_g01,i32_r01,pu8_src_top,3);
-                        GET_RGB24(i32_b02,i32_g02,i32_r02,pu8_src_top,0);
-                        GET_RGB24(i32_b03,i32_g03,i32_r03,pu8_src_top,3);
-                    } else {
-                        GET_RGB24(i32_b00,i32_g00,i32_r00,pu8_src_top,0);
-                        GET_RGB24(i32_b01,i32_g01,i32_r01,pu8_src_top,3);
-                        GET_RGB24(i32_b02,i32_g02,i32_r02,pu8_src_bottom,0);
-                        GET_RGB24(i32_b03,i32_g03,i32_r03,pu8_src_bottom,3);
-                    }
-                }
-                u32_Rtemp_value = (M4VIFI_UInt8)(((i32_r00*(16-u32_x_frac) +
-                                 i32_r01*u32_x_frac)*(16-u32_y_frac) +
-                                (i32_r02*(16-u32_x_frac) +
-                                 i32_r03*u32_x_frac)*u32_y_frac )>>8);
-
-                u32_Gtemp_value = (M4VIFI_UInt8)(((i32_g00*(16-u32_x_frac) +
-                                 i32_g01*u32_x_frac)*(16-u32_y_frac) +
-                                (i32_g02*(16-u32_x_frac) +
-                                 i32_g03*u32_x_frac)*u32_y_frac )>>8);
-
-                u32_Btemp_value =  (M4VIFI_UInt8)(((i32_b00*(16-u32_x_frac) +
-                                 i32_b01*u32_x_frac)*(16-u32_y_frac) +
-                                (i32_b02*(16-u32_x_frac) +
-                                 i32_b03*u32_x_frac)*u32_y_frac )>>8);
-
-                *pu8_data_out++ = u32_Btemp_value ;
-                *pu8_data_out++ = u32_Gtemp_value ;
-                *pu8_data_out++ = u32_Rtemp_value ;
-
-                /* Update horizontal accumulator */
-                u32_x_accum += u32_x_inc;
-
-            } while(--u32_width);
-
-            //pu16_data_out = pu16_data_out + (u32_stride_out>>1) - (u32_width_out);
-
-            /* Update vertical accumulator */
-            u32_y_accum += u32_y_inc;
-            if (u32_y_accum>>16)
-            {
-                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * (u32_stride_in) ;
-                u32_y_accum &= 0xffff;
-            }
-        } while(--u32_height);
-
-    return M4VIFI_OK;
-}
-/* End of file M4VIFI_ResizeRGB565toRGB565.c */
-
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoBGR565.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoBGR565.c
deleted file mode 100755
index 0042e80..0000000
--- a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoBGR565.c
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file     M4VIFI_ResizeYUV420toBGR565.c
- * @brief    Contain video library function
- * @note     This file has a Combo filter function
- *           -# Resizes YUV420 and converts to RGR565 with rotation
- ******************************************************************************
-*/
-
-/* Prototypes of functions, and type definitions */
-#include    "M4VIFI_FiltersAPI.h"
-/* Macro definitions */
-#include    "M4VIFI_Defines.h"
-/* Clip table declaration */
-#include    "M4VIFI_Clip.h"
-
-/**
- *********************************************************************************************
- * M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565(void *pContext, M4VIFI_ImagePlane *pPlaneIn,
- *                                                                  M4VIFI_ImagePlane *pPlaneOut)
- * @brief   Resize YUV420 plane and converts to BGR565 with +90 rotation.
- * @note    Basic sturture of the function
- *          Loop on each row (step 2)
- *              Loop on each column (step 2)
- *                  Get four Y samples and 1 u & V sample
- *                  Resize the Y with corresponing U and V samples
- *                  Compute the four corresponding R G B values
- *                  Place the R G B in the ouput plane in rotated fashion
- *              end loop column
- *          end loop row
- *          For resizing bilinear interpolation linearly interpolates along
- *          each row, and then uses that result in a linear interpolation down each column.
- *          Each estimated pixel in the output image is a weighted
- *          combination of its four neighbours. The ratio of compression
- *          or dilatation is estimated using input and output sizes.
- * @param   pPlaneIn: (IN) Pointer to YUV plane buffer
- * @param   pContext: (IN) Context Pointer
- * @param   pPlaneOut: (OUT) Pointer to BGR565 Plane
- * @return  M4VIFI_OK: there is no error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
- *********************************************************************************************
-*/
-M4VIFI_UInt8    M4VIFI_ResizeBilinearYUV420toBGR565(void* pContext,
-                                                                 M4VIFI_ImagePlane *pPlaneIn,
-                                                                 M4VIFI_ImagePlane *pPlaneOut)
-{
-    M4VIFI_UInt8    *pu8_data_in[PLANES], *pu8_data_in1[PLANES],*pu8_data_out;
-    M4VIFI_UInt32   *pu32_rgb_data_current, *pu32_rgb_data_next, *pu32_rgb_data_start;
-
-    M4VIFI_UInt32   u32_width_in[PLANES], u32_width_out, u32_height_in[PLANES], u32_height_out;
-    M4VIFI_UInt32   u32_stride_in[PLANES];
-    M4VIFI_UInt32   u32_stride_out, u32_stride2_out, u32_width2_RGB, u32_height2_RGB;
-    M4VIFI_UInt32   u32_x_inc[PLANES], u32_y_inc[PLANES];
-    M4VIFI_UInt32   u32_x_accum_Y, u32_x_accum_U, u32_x_accum_start;
-    M4VIFI_UInt32   u32_y_accum_Y, u32_y_accum_U;
-    M4VIFI_UInt32   u32_x_frac_Y, u32_x_frac_U, u32_y_frac_Y,u32_y_frac_U;
-    M4VIFI_Int32    U_32, V_32, Y_32, Yval_32;
-    M4VIFI_UInt8    u8_Red, u8_Green, u8_Blue;
-    M4VIFI_UInt32   u32_row, u32_col;
-
-    M4VIFI_UInt32   u32_plane;
-    M4VIFI_UInt32   u32_rgb_temp1, u32_rgb_temp2;
-    M4VIFI_UInt32   u32_rgb_temp3,u32_rgb_temp4;
-    M4VIFI_UInt32   u32_check_size;
-
-    M4VIFI_UInt8    *pu8_src_top_Y,*pu8_src_top_U,*pu8_src_top_V ;
-    M4VIFI_UInt8    *pu8_src_bottom_Y, *pu8_src_bottom_U, *pu8_src_bottom_V;
-
-    /* Check for the YUV width and height are even */
-    u32_check_size = IS_EVEN(pPlaneIn[0].u_height);
-    if( u32_check_size == FALSE )
-    {
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-    }
-    u32_check_size = IS_EVEN(pPlaneIn[0].u_width);
-    if (u32_check_size == FALSE )
-    {
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-
-    }
-    /* Make the ouput width and height as even */
-    pPlaneOut->u_height = pPlaneOut->u_height & 0xFFFFFFFE;
-    pPlaneOut->u_width = pPlaneOut->u_width & 0xFFFFFFFE;
-    pPlaneOut->u_stride = pPlaneOut->u_stride & 0xFFFFFFFC;
-
-    /* Assignment of output pointer */
-    pu8_data_out    = pPlaneOut->pac_data + pPlaneOut->u_topleft;
-    /* Assignment of output width(rotated) */
-    u32_width_out   = pPlaneOut->u_width;
-    /* Assignment of output height(rotated) */
-    u32_height_out  = pPlaneOut->u_height;
-
-    u32_width2_RGB  = pPlaneOut->u_width >> 1;
-    u32_height2_RGB = pPlaneOut->u_height >> 1;
-
-    u32_stride_out = pPlaneOut->u_stride >> 1;
-    u32_stride2_out = pPlaneOut->u_stride >> 2;
-
-    for(u32_plane = 0; u32_plane < PLANES; u32_plane++)
-    {
-        /* Set the working pointers at the beginning of the input/output data field */
-        pu8_data_in[u32_plane] = pPlaneIn[u32_plane].pac_data + pPlaneIn[u32_plane].u_topleft;
-
-        /* Get the memory jump corresponding to a row jump */
-        u32_stride_in[u32_plane] = pPlaneIn[u32_plane].u_stride;
-
-        /* Set the bounds of the active image */
-        u32_width_in[u32_plane] = pPlaneIn[u32_plane].u_width;
-        u32_height_in[u32_plane] = pPlaneIn[u32_plane].u_height;
-    }
-    /* Compute horizontal ratio between src and destination width for Y Plane. */
-    if (u32_width_out >= u32_width_in[YPlane])
-    {
-        u32_x_inc[YPlane]   = ((u32_width_in[YPlane]-1) * MAX_SHORT) / (u32_width_out-1);
-    }
-    else
-    {
-        u32_x_inc[YPlane]   = (u32_width_in[YPlane] * MAX_SHORT) / (u32_width_out);
-    }
-
-    /* Compute vertical ratio between src and destination height for Y Plane.*/
-    if (u32_height_out >= u32_height_in[YPlane])
-    {
-        u32_y_inc[YPlane]   = ((u32_height_in[YPlane]-1) * MAX_SHORT) / (u32_height_out-1);
-    }
-    else
-    {
-        u32_y_inc[YPlane] = (u32_height_in[YPlane] * MAX_SHORT) / (u32_height_out);
-    }
-
-    /* Compute horizontal ratio between src and destination width for U and V Planes. */
-    if (u32_width2_RGB >= u32_width_in[UPlane])
-    {
-        u32_x_inc[UPlane]   = ((u32_width_in[UPlane]-1) * MAX_SHORT) / (u32_width2_RGB-1);
-    }
-    else
-    {
-        u32_x_inc[UPlane]   = (u32_width_in[UPlane] * MAX_SHORT) / (u32_width2_RGB);
-    }
-
-    /* Compute vertical ratio between src and destination height for U and V Planes. */
-
-    if (u32_height2_RGB >= u32_height_in[UPlane])
-    {
-        u32_y_inc[UPlane]   = ((u32_height_in[UPlane]-1) * MAX_SHORT) / (u32_height2_RGB-1);
-    }
-    else
-    {
-        u32_y_inc[UPlane]  = (u32_height_in[UPlane] * MAX_SHORT) / (u32_height2_RGB);
-    }
-
-    u32_y_inc[VPlane] = u32_y_inc[UPlane];
-    u32_x_inc[VPlane] = u32_x_inc[UPlane];
-
-    /*
-        Calculate initial accumulator value : u32_y_accum_start.
-        u32_y_accum_start is coded on 15 bits,and represents a value between 0 and 0.5
-    */
-    if (u32_y_inc[YPlane] > MAX_SHORT)
-    {
-        /*
-            Keep the fractionnal part, assimung that integer  part is coded on the 16 high bits,
-            and the fractionnal on the 15 low bits
-        */
-        u32_y_accum_Y = u32_y_inc[YPlane] & 0xffff;
-        u32_y_accum_U = u32_y_inc[UPlane] & 0xffff;
-
-        if (!u32_y_accum_Y)
-        {
-            u32_y_accum_Y = MAX_SHORT;
-            u32_y_accum_U = MAX_SHORT;
-        }
-        u32_y_accum_Y >>= 1;
-        u32_y_accum_U >>= 1;
-    }
-    else
-    {
-        u32_y_accum_Y = 0;
-        u32_y_accum_U = 0;
-
-    }
-
-    /*
-        Calculate initial accumulator value : u32_x_accum_start.
-        u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
-    */
-    if (u32_x_inc[YPlane] > MAX_SHORT)
-    {
-        u32_x_accum_start = u32_x_inc[YPlane] & 0xffff;
-
-        if (!u32_x_accum_start)
-        {
-            u32_x_accum_start = MAX_SHORT;
-        }
-
-        u32_x_accum_start >>= 1;
-    }
-    else
-    {
-        u32_x_accum_start = 0;
-    }
-
-    pu32_rgb_data_start = (M4VIFI_UInt32*)pu8_data_out;
-
-    /*
-        Bilinear interpolation linearly interpolates along each row, and then uses that
-        result in a linear interpolation donw each column. Each estimated pixel in the
-        output image is a weighted combination of its four neighbours according to the formula :
-        F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
-        with  R(x) = / x+1  -1 =< x =< 0 \ 1-x  0 =< x =< 1 and a (resp. b) weighting coefficient
-        is the distance from the nearest neighbor in the p (resp. q) direction
-    */
-    for (u32_row = u32_height_out; u32_row != 0; u32_row -= 2)
-    {
-        u32_x_accum_Y = u32_x_accum_start;
-        u32_x_accum_U = u32_x_accum_start;
-
-        /* Vertical weight factor */
-        u32_y_frac_Y = (u32_y_accum_Y >> 12) & 15;
-        u32_y_frac_U = (u32_y_accum_U >> 12) & 15;
-
-        /* RGB current line position pointer */
-        pu32_rgb_data_current = pu32_rgb_data_start ;
-
-        /* RGB next line position pointer */
-        pu32_rgb_data_next    = pu32_rgb_data_current + (u32_stride2_out);
-
-        /* Y Plane next row pointer */
-        pu8_data_in1[YPlane] = pu8_data_in[YPlane];
-
-        u32_rgb_temp3 = u32_y_accum_Y + (u32_y_inc[YPlane]);
-        if (u32_rgb_temp3 >> 16)
-        {
-            pu8_data_in1[YPlane] =  pu8_data_in[YPlane] +
-                                                (u32_rgb_temp3 >> 16) * (u32_stride_in[YPlane]);
-            u32_rgb_temp3 &= 0xffff;
-        }
-        u32_rgb_temp4 = (u32_rgb_temp3 >> 12) & 15;
-
-        for (u32_col = u32_width_out; u32_col != 0; u32_col -= 2)
-        {
-
-            /* Input Y plane elements */
-            pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16);
-            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
-
-            /* Input U Plane elements */
-            pu8_src_top_U = pu8_data_in[UPlane] + (u32_x_accum_U >> 16);
-            pu8_src_bottom_U = pu8_src_top_U + u32_stride_in[UPlane];
-
-            pu8_src_top_V = pu8_data_in[VPlane] + (u32_x_accum_U >> 16);
-            pu8_src_bottom_V = pu8_src_top_V + u32_stride_in[VPlane];
-
-            /* Horizontal weight factor for Y plane */
-            u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
-            /* Horizontal weight factor for U and V planes */
-            u32_x_frac_U = (u32_x_accum_U >> 12)&15;
-
-            /* Weighted combination */
-            U_32 = (((pu8_src_top_U[0]*(16-u32_x_frac_U) + pu8_src_top_U[1]*u32_x_frac_U)
-                    *(16-u32_y_frac_U) + (pu8_src_bottom_U[0]*(16-u32_x_frac_U)
-                    + pu8_src_bottom_U[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
-
-            V_32 = (((pu8_src_top_V[0]*(16-u32_x_frac_U) + pu8_src_top_V[1]*u32_x_frac_U)
-                    *(16-u32_y_frac_U)+ (pu8_src_bottom_V[0]*(16-u32_x_frac_U)
-                    + pu8_src_bottom_V[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
-
-            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
-                    *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
-                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
-
-            u32_x_accum_U += (u32_x_inc[UPlane]);
-
-            /* YUV to RGB */
-            #ifdef __RGB_V1__
-                    Yval_32 = Y_32*37;
-            #else   /* __RGB_V1__v */
-                    Yval_32 = Y_32*0x2568;
-            #endif /* __RGB_V1__v */
-
-                    DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
-
-            /* Pack 8 bit R,G,B to RGB565 */
-            #ifdef  LITTLE_ENDIAN
-                    u32_rgb_temp1 = PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
-            #else   /* LITTLE_ENDIAN */
-                    u32_rgb_temp1 = PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
-            #endif  /* LITTLE_ENDIAN */
-
-
-            pu8_src_top_Y = pu8_data_in1[YPlane]+(u32_x_accum_Y >> 16);
-            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
-
-            /* Weighted combination */
-            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
-                    *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
-                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 ) >> 8);
-
-            u32_x_accum_Y += u32_x_inc[YPlane];
-
-            /* Horizontal weight factor */
-            u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
-
-            /* YUV to RGB */
-            #ifdef __RGB_V1__
-                    Yval_32 = Y_32*37;
-            #else   /* __RGB_V1__v */
-                    Yval_32 = Y_32*0x2568;
-            #endif  /* __RGB_V1__v */
-
-            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
-
-            /* Pack 8 bit R,G,B to RGB565 */
-            #ifdef  LITTLE_ENDIAN
-                    u32_rgb_temp2 = PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
-            #else   /* LITTLE_ENDIAN */
-                    u32_rgb_temp2 = PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
-            #endif  /* LITTLE_ENDIAN */
-
-
-            pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16) ;
-            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
-
-            /* Weighted combination */
-            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
-                    *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
-                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
-
-            /* YUV to RGB */
-            #ifdef __RGB_V1__
-                    Yval_32 = Y_32*37;
-            #else   /* __RGB_V1__v */
-                    Yval_32 = Y_32*0x2568;
-            #endif  /* __RGB_V1__v */
-
-            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
-
-            /* Pack 8 bit R,G,B to RGB565 */
-            #ifdef  LITTLE_ENDIAN
-                    *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
-                                                        PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
-            #else   /* LITTLE_ENDIAN */
-                    *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
-                                                        PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
-            #endif  /* LITTLE_ENDIAN */
-
-
-            pu8_src_top_Y = pu8_data_in1[YPlane]+ (u32_x_accum_Y >> 16);
-            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
-
-            /* Weighted combination */
-            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
-                    *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
-                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 )>>8);
-
-            u32_x_accum_Y += u32_x_inc[YPlane];
-            /* YUV to RGB */
-            #ifdef __RGB_V1__
-                    Yval_32=Y_32*37;
-            #else   /* __RGB_V1__v */
-                    Yval_32=Y_32*0x2568;
-            #endif  /* __RGB_V1__v */
-
-            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
-
-            /* Pack 8 bit R,G,B to RGB565 */
-            #ifdef  LITTLE_ENDIAN
-                    *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
-                                                        PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
-            #else   /* LITTLE_ENDIAN */
-                    *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
-                                                        PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
-            #endif  /* LITTLE_ENDIAN */
-
-        }   /* End of horizontal scanning */
-
-        u32_y_accum_Y  =  u32_rgb_temp3 + (u32_y_inc[YPlane]);
-        u32_y_accum_U += (u32_y_inc[UPlane]);
-
-        /* Y plane row update */
-        if (u32_y_accum_Y >> 16)
-        {
-            pu8_data_in[YPlane] =  pu8_data_in1[YPlane] +
-                                                ((u32_y_accum_Y >> 16) * (u32_stride_in[YPlane]));
-            u32_y_accum_Y &= 0xffff;
-        }
-        else
-        {
-            pu8_data_in[YPlane] = pu8_data_in1[YPlane];
-        }
-        /* U and V planes row update */
-        if (u32_y_accum_U >> 16)
-        {
-            pu8_data_in[UPlane] =  pu8_data_in[UPlane] +
-                                                (u32_y_accum_U >> 16) * (u32_stride_in[UPlane]);
-            pu8_data_in[VPlane] =  pu8_data_in[VPlane] +
-                                                (u32_y_accum_U >> 16) * (u32_stride_in[VPlane]);
-            u32_y_accum_U &= 0xffff;
-        }
-        /* BGR pointer Update */
-        pu32_rgb_data_start += u32_stride_out;
-
-    }   /* End of vertical scanning */
-    return M4VIFI_OK;
-}
-
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoRGB565.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoRGB565.c
deleted file mode 100755
index eda9d07..0000000
--- a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoRGB565.c
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- ******************************************************************************
- * @file     M4VIFI_ResizeYUV420toRGB565RotatedRight.c
- * @brief    Contain video library function
- * @note     This file has a Combo filter function
- *           -# Resizes YUV420 and converts to RGR565 with rotation
- * @date
- *           - 2004/08/11: Creation
- ******************************************************************************
-*/
-
-/* Prototypes of functions, and type definitions */
-#include    "M4VIFI_FiltersAPI.h"
-/* Macro definitions */
-#include    "M4VIFI_Defines.h"
-/* Clip table declaration */
-#include    "M4VIFI_Clip.h"
-
-/**
- ********************************************************************************************
- * M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight(void *pContext,
- *                                                              M4VIFI_ImagePlane *pPlaneIn,
- *                                                              M4VIFI_ImagePlane *pPlaneOut)
- * @brief   Resize YUV420 plane and converts to RGB565 with +90 rotation.
- * @note    Basic sturture of the function
- *          Loop on each row (step 2)
- *              Loop on each column (step 2)
- *                  Get four Y samples and 1 u & V sample
- *                  Resize the Y with corresponing U and V samples
- *                  Compute the four corresponding R G B values
- *                  Place the R G B in the ouput plane in rotated fashion
- *              end loop column
- *          end loop row
- *          For resizing bilinear interpolation linearly interpolates along
- *          each row, and then uses that result in a linear interpolation down each column.
- *          Each estimated pixel in the output image is a weighted
- *          combination of its four neighbours. The ratio of compression
- *          or dilatation is estimated using input and output sizes.
- * @param   pPlaneIn: (IN) Pointer to YUV plane buffer
- * @param   pContext: (IN) Context Pointer
- * @param   pPlaneOut: (OUT) Pointer to BGR565 Plane
- * @return  M4VIFI_OK: there is no error
- * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
- * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
- ********************************************************************************************
-*/
-M4VIFI_UInt8    M4VIFI_ResizeBilinearYUV420toRGB565(void* pContext,
-                                                    M4VIFI_ImagePlane *pPlaneIn,
-                                                    M4VIFI_ImagePlane *pPlaneOut)
-{
-    M4VIFI_UInt8    *pu8_data_in[PLANES], *pu8_data_in1[PLANES],*pu8_data_out;
-    M4VIFI_UInt32   *pu32_rgb_data_current, *pu32_rgb_data_next, *pu32_rgb_data_start;
-
-    M4VIFI_UInt32   u32_width_in[PLANES], u32_width_out, u32_height_in[PLANES], u32_height_out;
-    M4VIFI_UInt32   u32_stride_in[PLANES];
-    M4VIFI_UInt32   u32_stride_out, u32_stride2_out, u32_width2_RGB, u32_height2_RGB;
-    M4VIFI_UInt32   u32_x_inc[PLANES], u32_y_inc[PLANES];
-    M4VIFI_UInt32   u32_x_accum_Y, u32_x_accum_U, u32_x_accum_start;
-    M4VIFI_UInt32   u32_y_accum_Y, u32_y_accum_U;
-    M4VIFI_UInt32   u32_x_frac_Y, u32_x_frac_U, u32_y_frac_Y,u32_y_frac_U;
-    M4VIFI_Int32    U_32, V_32, Y_32, Yval_32;
-    M4VIFI_UInt8    u8_Red, u8_Green, u8_Blue;
-    M4VIFI_UInt32   u32_row, u32_col;
-
-    M4VIFI_UInt32   u32_plane;
-    M4VIFI_UInt32   u32_rgb_temp1, u32_rgb_temp2;
-    M4VIFI_UInt32   u32_rgb_temp3,u32_rgb_temp4;
-    M4VIFI_UInt32   u32_check_size;
-
-    M4VIFI_UInt8    *pu8_src_top_Y,*pu8_src_top_U,*pu8_src_top_V ;
-    M4VIFI_UInt8    *pu8_src_bottom_Y, *pu8_src_bottom_U, *pu8_src_bottom_V;
-
-    /* Check for the  width and height are even */
-    u32_check_size = IS_EVEN(pPlaneIn[0].u_height);
-    if( u32_check_size == FALSE )
-    {
-        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
-    }
-    u32_check_size = IS_EVEN(pPlaneIn[0].u_width);
-    if (u32_check_size == FALSE )
-    {
-        return M4VIFI_ILLEGAL_FRAME_WIDTH;
-
-    }
-    /* Make the ouput width and height as even */
-    pPlaneOut->u_height = pPlaneOut->u_height & 0xFFFFFFFE;
-    pPlaneOut->u_width = pPlaneOut->u_width & 0xFFFFFFFE;
-    pPlaneOut->u_stride = pPlaneOut->u_stride & 0xFFFFFFFC;
-
-    /* Assignment of output pointer */
-    pu8_data_out    = pPlaneOut->pac_data + pPlaneOut->u_topleft;
-    /* Assignment of output width(rotated) */
-    u32_width_out   = pPlaneOut->u_width;
-    /* Assignment of output height(rotated) */
-    u32_height_out  = pPlaneOut->u_height;
-
-    /* Set the bounds of the active image */
-    u32_width2_RGB  = pPlaneOut->u_width >> 1;
-    u32_height2_RGB = pPlaneOut->u_height >> 1;
-    /* Get the memory jump corresponding to a row jump */
-    u32_stride_out = pPlaneOut->u_stride >> 1;
-    u32_stride2_out = pPlaneOut->u_stride >> 2;
-
-    for(u32_plane = 0; u32_plane < PLANES; u32_plane++)
-    {
-        /* Set the working pointers at the beginning of the input/output data field */
-        pu8_data_in[u32_plane] = pPlaneIn[u32_plane].pac_data + pPlaneIn[u32_plane].u_topleft;
-
-        /* Get the memory jump corresponding to a row jump */
-        u32_stride_in[u32_plane] = pPlaneIn[u32_plane].u_stride;
-
-        /* Set the bounds of the active image */
-        u32_width_in[u32_plane] = pPlaneIn[u32_plane].u_width;
-        u32_height_in[u32_plane] = pPlaneIn[u32_plane].u_height;
-    }
-    /* Compute horizontal ratio between src and destination width for Y Plane.*/
-    if (u32_width_out >= u32_width_in[YPlane])
-    {
-        u32_x_inc[YPlane]   = ((u32_width_in[YPlane]-1) * MAX_SHORT) / (u32_width_out-1);
-    }
-    else
-    {
-        u32_x_inc[YPlane]   = (u32_width_in[YPlane] * MAX_SHORT) / (u32_width_out);
-    }
-
-    /* Compute vertical ratio between src and destination height for Y Plane.*/
-    if (u32_height_out >= u32_height_in[YPlane])
-    {
-        u32_y_inc[YPlane]   = ((u32_height_in[YPlane]-1) * MAX_SHORT) / (u32_height_out-1);
-    }
-    else
-    {
-        u32_y_inc[YPlane] = (u32_height_in[YPlane] * MAX_SHORT) / (u32_height_out);
-    }
-
-    /* Compute horizontal ratio between src and destination width for U and V Planes.*/
-    if (u32_width2_RGB >= u32_width_in[UPlane])
-    {
-        u32_x_inc[UPlane]   = ((u32_width_in[UPlane]-1) * MAX_SHORT) / (u32_width2_RGB-1);
-    }
-    else
-    {
-        u32_x_inc[UPlane]   = (u32_width_in[UPlane] * MAX_SHORT) / (u32_width2_RGB);
-    }
-
-    /* Compute vertical ratio between src and destination height for U and V Planes.*/
-
-    if (u32_height2_RGB >= u32_height_in[UPlane])
-    {
-        u32_y_inc[UPlane]   = ((u32_height_in[UPlane]-1) * MAX_SHORT) / (u32_height2_RGB-1);
-    }
-    else
-    {
-        u32_y_inc[UPlane]  = (u32_height_in[UPlane] * MAX_SHORT) / (u32_height2_RGB);
-    }
-
-    u32_y_inc[VPlane] = u32_y_inc[UPlane];
-    u32_x_inc[VPlane] = u32_x_inc[UPlane];
-
-    /*
-    Calculate initial accumulator value : u32_y_accum_start.
-    u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
-    */
-    if (u32_y_inc[YPlane] > MAX_SHORT)
-    {
-        /*
-        Keep the fractionnal part, assimung that integer  part is coded on the 16 high bits,
-        and the fractionnal on the 15 low bits
-        */
-        u32_y_accum_Y = u32_y_inc[YPlane] & 0xffff;
-        u32_y_accum_U = u32_y_inc[UPlane] & 0xffff;
-
-        if (!u32_y_accum_Y)
-        {
-            u32_y_accum_Y = MAX_SHORT;
-            u32_y_accum_U = MAX_SHORT;
-        }
-        u32_y_accum_Y >>= 1;
-        u32_y_accum_U >>= 1;
-    }
-    else
-    {
-        u32_y_accum_Y = 0;
-        u32_y_accum_U = 0;
-
-    }
-
-    /*
-    Calculate initial accumulator value : u32_x_accum_start.
-    u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
-    */
-    if (u32_x_inc[YPlane] > MAX_SHORT)
-    {
-        u32_x_accum_start = u32_x_inc[YPlane] & 0xffff;
-
-        if (!u32_x_accum_start)
-        {
-            u32_x_accum_start = MAX_SHORT;
-        }
-
-        u32_x_accum_start >>= 1;
-    }
-    else
-    {
-        u32_x_accum_start = 0;
-    }
-    /* Intialise the RGB pointer */
-    pu32_rgb_data_start = (M4VIFI_UInt32*)pu8_data_out;
-
-    /*
-        Bilinear interpolation linearly interpolates along each row, and then uses that
-        result in a linear interpolation donw each column. Each estimated pixel in the
-        output image is a weighted combination of its four neighbours according to the formula :
-        F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
-        with  R(x) = / x+1  -1 =< x =< 0 \ 1-x  0 =< x =< 1 and a (resp. b) weighting coefficient
-        is the distance from the nearest neighbor in the p (resp. q) direction
-    */
-    for (u32_row = u32_height_out; u32_row != 0; u32_row -= 2)
-    {
-        u32_x_accum_Y = u32_x_accum_start;
-        u32_x_accum_U = u32_x_accum_start;
-
-        /* Vertical weight factor */
-        u32_y_frac_Y = (u32_y_accum_Y >> 12) & 15;
-        u32_y_frac_U = (u32_y_accum_U >> 12) & 15;
-
-        /* RGB current line Position Pointer */
-        pu32_rgb_data_current = pu32_rgb_data_start ;
-
-        /* RGB next line position pointer */
-        pu32_rgb_data_next    = pu32_rgb_data_current + (u32_stride2_out);
-
-        /* Y Plane next row pointer */
-        pu8_data_in1[YPlane] = pu8_data_in[YPlane];
-
-        u32_rgb_temp3 = u32_y_accum_Y + (u32_y_inc[YPlane]);
-        if (u32_rgb_temp3 >> 16)
-        {
-            pu8_data_in1[YPlane] =  pu8_data_in[YPlane] +
-                                                (u32_rgb_temp3 >> 16) * (u32_stride_in[YPlane]);
-            u32_rgb_temp3 &= 0xffff;
-        }
-        u32_rgb_temp4 = (u32_rgb_temp3 >> 12) & 15;
-
-        for (u32_col = u32_width_out; u32_col != 0; u32_col -= 2)
-        {
-
-            /* Input Y plane elements */
-            pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16);
-            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
-
-            /* Input U Plane elements */
-            pu8_src_top_U = pu8_data_in[UPlane] + (u32_x_accum_U >> 16);
-            pu8_src_bottom_U = pu8_src_top_U + u32_stride_in[UPlane];
-
-            pu8_src_top_V = pu8_data_in[VPlane] + (u32_x_accum_U >> 16);
-            pu8_src_bottom_V = pu8_src_top_V + u32_stride_in[VPlane];
-
-            /* Horizontal weight factor for Y Plane */
-            u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
-            /* Horizontal weight factor for U and V Planes */
-            u32_x_frac_U = (u32_x_accum_U >> 12)&15;
-
-            /* Weighted combination */
-            U_32 = (((pu8_src_top_U[0]*(16-u32_x_frac_U) + pu8_src_top_U[1]*u32_x_frac_U)
-                    *(16-u32_y_frac_U) + (pu8_src_bottom_U[0]*(16-u32_x_frac_U)
-                    + pu8_src_bottom_U[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
-
-            V_32 = (((pu8_src_top_V[0]*(16-u32_x_frac_U) + pu8_src_top_V[1]*u32_x_frac_U)
-                    *(16-u32_y_frac_U) + (pu8_src_bottom_V[0]*(16-u32_x_frac_U)
-                    + pu8_src_bottom_V[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
-
-            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
-                    *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
-                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
-
-            u32_x_accum_U += (u32_x_inc[UPlane]);
-
-            /* YUV to RGB */
-            #ifdef __RGB_V1__
-                    Yval_32 = Y_32*37;
-            #else   /* __RGB_V1__v */
-                    Yval_32 = Y_32*0x2568;
-            #endif /* __RGB_V1__v */
-
-                    DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
-
-            /* Pack 8 bit R,G,B to RGB565 */
-            #ifdef  LITTLE_ENDIAN
-                    u32_rgb_temp1 = PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
-            #else   /* LITTLE_ENDIAN */
-                    u32_rgb_temp1 = PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
-            #endif  /* LITTLE_ENDIAN */
-
-
-            pu8_src_top_Y = pu8_data_in1[YPlane]+(u32_x_accum_Y >> 16);
-            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
-
-            /* Weighted combination */
-            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
-                    *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
-                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 ) >> 8);
-
-            u32_x_accum_Y += u32_x_inc[YPlane];
-            /* Horizontal weight factor */
-            u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
-            /* YUV to RGB */
-            #ifdef __RGB_V1__
-                    Yval_32 = Y_32*37;
-            #else   /* __RGB_V1__v */
-                    Yval_32 = Y_32*0x2568;
-            #endif  /* __RGB_V1__v */
-
-            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
-
-            /* Pack 8 bit R,G,B to RGB565 */
-            #ifdef  LITTLE_ENDIAN
-                    u32_rgb_temp2 = PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
-            #else   /* LITTLE_ENDIAN */
-                    u32_rgb_temp2 = PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
-            #endif  /* LITTLE_ENDIAN */
-
-
-            pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16) ;
-            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
-
-            /* Weighted combination */
-            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
-                    *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
-                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
-            /* YUV to RGB */
-            #ifdef __RGB_V1__
-                    Yval_32 = Y_32*37;
-            #else   /* __RGB_V1__v */
-                    Yval_32 = Y_32*0x2568;
-            #endif  /* __RGB_V1__v */
-
-            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
-
-            /* Pack 8 bit R,G,B to RGB565 */
-            #ifdef  LITTLE_ENDIAN
-                    *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
-                                                        PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
-            #else   /* LITTLE_ENDIAN */
-                    *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
-                                                        PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
-            #endif  /* LITTLE_ENDIAN */
-
-
-            pu8_src_top_Y = pu8_data_in1[YPlane]+ (u32_x_accum_Y >> 16);
-            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
-
-            /* Weighted combination */
-            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
-                    *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
-                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 )>>8);
-
-            u32_x_accum_Y += u32_x_inc[YPlane];
-            /* YUV to RGB */
-            #ifdef __RGB_V1__
-                    Yval_32=Y_32*37;
-            #else   /* __RGB_V1__v */
-                    Yval_32=Y_32*0x2568;
-            #endif  /* __RGB_V1__v */
-
-            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
-
-            /* Pack 8 bit R,G,B to RGB565 */
-            #ifdef  LITTLE_ENDIAN
-                    *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
-                                                        PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
-            #else   /* LITTLE_ENDIAN */
-                    *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
-                                                        PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
-            #endif  /* LITTLE_ENDIAN */
-
-        }   /* End of horizontal scanning */
-
-        u32_y_accum_Y  =  u32_rgb_temp3 + (u32_y_inc[YPlane]);
-        u32_y_accum_U += (u32_y_inc[UPlane]);
-
-        /* Y plane row update */
-        if (u32_y_accum_Y >> 16)
-        {
-            pu8_data_in[YPlane] =  pu8_data_in1[YPlane] +
-                                                ((u32_y_accum_Y >> 16) * (u32_stride_in[YPlane]));
-            u32_y_accum_Y &= 0xffff;
-        }
-        else
-        {
-            pu8_data_in[YPlane] = pu8_data_in1[YPlane];
-        }
-        /* U and V planes row update */
-        if (u32_y_accum_U >> 16)
-        {
-            pu8_data_in[UPlane] =  pu8_data_in[UPlane] +
-                                                (u32_y_accum_U >> 16) * (u32_stride_in[UPlane]);
-            pu8_data_in[VPlane] =  pu8_data_in[VPlane] +
-                                                (u32_y_accum_U >> 16) * (u32_stride_in[VPlane]);
-            u32_y_accum_U &= 0xffff;
-        }
-
-        pu32_rgb_data_start += u32_stride_out;
-
-    }   /* End of vertical scanning */
-    return M4VIFI_OK;
-}
-
diff --git a/libvideoeditor/vss/video_filters/src/MODULE_LICENSE_APACHE2 b/libvideoeditor/vss/video_filters/src/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/libvideoeditor/vss/video_filters/src/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/libvideoeditor/vss/video_filters/src/NOTICE b/libvideoeditor/vss/video_filters/src/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/libvideoeditor/vss/video_filters/src/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
-   Copyright (c) 2005-2008, The Android Open Source Project
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 1663d47..661fde9 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -30,25 +30,13 @@
 
 #define MINUS_3_DB_IN_Q19_12 2896 // -3dB = 0.707 * 2^12 = 2896
 
+// subset of possible audio_channel_mask_t values, and AUDIO_CHANNEL_OUT_* renamed to CHANNEL_MASK_*
 typedef enum {
-    CHANNEL_MASK_SURROUND = AUDIO_CHANNEL_OUT_SURROUND,
-    CHANNEL_MASK_QUAD_BACK = AUDIO_CHANNEL_OUT_QUAD,
-    // like AUDIO_CHANNEL_OUT_QUAD with *_SIDE_* instead of *_BACK_*, same channel order
-    CHANNEL_MASK_QUAD_SIDE =
-            AUDIO_CHANNEL_OUT_FRONT_LEFT |
-            AUDIO_CHANNEL_OUT_FRONT_RIGHT |
-            AUDIO_CHANNEL_OUT_SIDE_LEFT |
-            AUDIO_CHANNEL_OUT_SIDE_RIGHT,
-    CHANNEL_MASK_5POINT1_BACK = AUDIO_CHANNEL_OUT_5POINT1,
-    // like AUDIO_CHANNEL_OUT_5POINT1 with *_SIDE_* instead of *_BACK_*, same channel order
-    CHANNEL_MASK_5POINT1_SIDE =
-            AUDIO_CHANNEL_OUT_FRONT_LEFT |
-            AUDIO_CHANNEL_OUT_FRONT_RIGHT |
-            AUDIO_CHANNEL_OUT_FRONT_CENTER |
-            AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
-            AUDIO_CHANNEL_OUT_SIDE_LEFT |
-            AUDIO_CHANNEL_OUT_SIDE_RIGHT,
-    CHANNEL_MASK_7POINT1_SIDE_BACK = AUDIO_CHANNEL_OUT_7POINT1,
+    CHANNEL_MASK_QUAD_BACK = AUDIO_CHANNEL_OUT_QUAD_BACK,
+    CHANNEL_MASK_QUAD_SIDE = AUDIO_CHANNEL_OUT_QUAD_SIDE,
+    CHANNEL_MASK_5POINT1_BACK = AUDIO_CHANNEL_OUT_5POINT1_BACK,
+    CHANNEL_MASK_5POINT1_SIDE = AUDIO_CHANNEL_OUT_5POINT1_SIDE,
+    CHANNEL_MASK_7POINT1 = AUDIO_CHANNEL_OUT_7POINT1,
 } downmix_input_channel_mask_t;
 
 // effect_handle_t interface implementation for downmix effect
@@ -340,14 +328,11 @@
         case CHANNEL_MASK_QUAD_SIDE:
             Downmix_foldFromQuad(pSrc, pDst, numFrames, accumulate);
             break;
-        case CHANNEL_MASK_SURROUND:
-            Downmix_foldFromSurround(pSrc, pDst, numFrames, accumulate);
-            break;
         case CHANNEL_MASK_5POINT1_BACK:
         case CHANNEL_MASK_5POINT1_SIDE:
             Downmix_foldFrom5Point1(pSrc, pDst, numFrames, accumulate);
             break;
-        case CHANNEL_MASK_7POINT1_SIDE_BACK:
+        case CHANNEL_MASK_7POINT1:
             Downmix_foldFrom7Point1(pSrc, pDst, numFrames, accumulate);
             break;
         default:
@@ -828,65 +813,6 @@
 
 
 /*----------------------------------------------------------------------------
- * Downmix_foldFromSurround()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a "surround sound" (mono rear) signal to stereo
- *
- * Inputs:
- *  pSrc       surround signal to downmix
- *  numFrames  the number of surround frames to downmix
- *  accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- *               or overwrite pDst (when false)
- *
- * Outputs:
- *  pDst       downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFromSurround(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
-    int32_t lt, rt, centerPlusRearContrib; // samples in Q19.12 format
-    // sample at index 0 is FL
-    // sample at index 1 is FR
-    // sample at index 2 is FC
-    // sample at index 3 is RC
-    // code is mostly duplicated between the two values of accumulate to avoid repeating the test
-    // for every sample
-    if (accumulate) {
-        while (numFrames) {
-            // centerPlusRearContrib = FC(-3dB) + RC(-3dB)
-            centerPlusRearContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12) + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
-            // FL + centerPlusRearContrib
-            lt = (pSrc[0] << 12) + centerPlusRearContrib;
-            // FR + centerPlusRearContrib
-            rt = (pSrc[1] << 12) + centerPlusRearContrib;
-            // accumulate in destination
-            pDst[0] = clamp16(pDst[0] + (lt >> 13));
-            pDst[1] = clamp16(pDst[1] + (rt >> 13));
-            pSrc += 4;
-            pDst += 2;
-            numFrames--;
-        }
-    } else { // same code as above but without adding and clamping pDst[i] to itself
-        while (numFrames) {
-            // centerPlusRearContrib = FC(-3dB) + RC(-3dB)
-            centerPlusRearContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12) + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
-            // FL + centerPlusRearContrib
-            lt = (pSrc[0] << 12) + centerPlusRearContrib;
-            // FR + centerPlusRearContrib
-            rt = (pSrc[1] << 12) + centerPlusRearContrib;
-            // store in destination
-            pDst[0] = clamp16(lt >> 13); // differs from when accumulate is true above
-            pDst[1] = clamp16(rt >> 13); // differs from when accumulate is true above
-            pSrc += 4;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-}
-
-
-/*----------------------------------------------------------------------------
  * Downmix_foldFrom5Point1()
  *----------------------------------------------------------------------------
  * Purpose:
diff --git a/media/libeffects/downmix/EffectDownmix.h b/media/libeffects/downmix/EffectDownmix.h
index fcb3c9e..2399abd 100644
--- a/media/libeffects/downmix/EffectDownmix.h
+++ b/media/libeffects/downmix/EffectDownmix.h
@@ -97,7 +97,6 @@
 int Downmix_getParameter(downmix_object_t *pDownmixer, int32_t param, uint32_t *pSize, void *pValue);
 
 void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-void Downmix_foldFromSurround(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
 void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
 void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
 bool Downmix_foldGeneric(
diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk
index dd2d306..c92c543 100644
--- a/media/libeffects/visualizer/Android.mk
+++ b/media/libeffects/visualizer/Android.mk
@@ -17,7 +17,6 @@
 LOCAL_MODULE:= libvisualizer
 
 LOCAL_C_INCLUDES := \
-	$(call include-path-for, graphics corecg) \
 	$(call include-path-for, audio-effects)
 
 
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 56e7787..f3770e4 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -25,6 +25,8 @@
     AudioRecord.cpp \
     AudioSystem.cpp \
     mediaplayer.cpp \
+    IMediaHTTPConnection.cpp \
+    IMediaHTTPService.cpp \
     IMediaLogService.cpp \
     IMediaPlayerService.cpp \
     IMediaPlayerClient.cpp \
@@ -44,7 +46,7 @@
     IAudioPolicyService.cpp \
     MediaScanner.cpp \
     MediaScannerClient.cpp \
-    autodetect.cpp \
+    CharacterEncodingDetector.cpp \
     IMediaDeathNotifier.cpp \
     MediaProfiles.cpp \
     IEffect.cpp \
@@ -58,26 +60,34 @@
 
 LOCAL_SRC_FILES += ../libnbaio/roundup.c
 
-# for <cutils/atomic-inline.h>
-LOCAL_CFLAGS += -DANDROID_SMP=$(if $(findstring true,$(TARGET_CPU_SMP)),1,0)
-LOCAL_SRC_FILES += SingleStateQueue.cpp
-LOCAL_CFLAGS += -DSINGLE_STATE_QUEUE_INSTANTIATIONS='"SingleStateQueueInstantiations.cpp"'
-# Consider a separate a library for SingleStateQueueInstantiations.
-
 LOCAL_SHARED_LIBRARIES := \
-	libui liblog libcutils libutils libbinder libsonivox libicuuc libexpat \
+	libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \
         libcamera_client libstagefright_foundation \
-        libgui libdl libaudioutils
+        libgui libdl libaudioutils libnbaio
+
+LOCAL_STATIC_LIBRARIES += libinstantssq
 
 LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
 
 LOCAL_MODULE:= libmedia
 
 LOCAL_C_INCLUDES := \
-    $(call include-path-for, graphics corecg) \
     $(TOP)/frameworks/native/include/media/openmax \
     external/icu4c/common \
+    external/icu4c/i18n \
     $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils)
 
 include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+# for <cutils/atomic-inline.h>
+LOCAL_CFLAGS += -DANDROID_SMP=$(if $(findstring true,$(TARGET_CPU_SMP)),1,0)
+LOCAL_SRC_FILES += SingleStateQueue.cpp
+LOCAL_CFLAGS += -DSINGLE_STATE_QUEUE_INSTANTIATIONS='"SingleStateQueueInstantiations.cpp"'
+
+LOCAL_MODULE := libinstantssq
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index 8dfffb3..35f6557 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -380,9 +380,9 @@
 }
 
 void AudioEffect::commandExecuted(uint32_t cmdCode,
-                                  uint32_t cmdSize,
+                                  uint32_t cmdSize __unused,
                                   void *cmdData,
-                                  uint32_t replySize,
+                                  uint32_t replySize __unused,
                                   void *replyData)
 {
     if (cmdData == NULL || replyData == NULL) {
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index ccbc5a3..a7bf380 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -41,37 +41,29 @@
         return BAD_VALUE;
     }
 
-    // default to 0 in case of error
-    *frameCount = 0;
-
-    size_t size = 0;
+    size_t size;
     status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);
     if (status != NO_ERROR) {
-        ALOGE("AudioSystem could not query the input buffer size; status %d", status);
-        return NO_INIT;
+        ALOGE("AudioSystem could not query the input buffer size for sampleRate %u, format %#x, "
+              "channelMask %#x; status %d", sampleRate, format, channelMask, status);
+        return status;
     }
 
-    if (size == 0) {
-        ALOGE("Unsupported configuration: sampleRate %u, format %d, channelMask %#x",
+    // We double the size of input buffer for ping pong use of record buffer.
+    // Assumes audio_is_linear_pcm(format)
+    if ((*frameCount = (size * 2) / (popcount(channelMask) * audio_bytes_per_sample(format))) == 0) {
+        ALOGE("Unsupported configuration: sampleRate %u, format %#x, channelMask %#x",
             sampleRate, format, channelMask);
         return BAD_VALUE;
     }
 
-    // We double the size of input buffer for ping pong use of record buffer.
-    size <<= 1;
-
-    // Assumes audio_is_linear_pcm(format)
-    uint32_t channelCount = popcount(channelMask);
-    size /= channelCount * audio_bytes_per_sample(format);
-
-    *frameCount = size;
     return NO_ERROR;
 }
 
 // ---------------------------------------------------------------------------
 
 AudioRecord::AudioRecord()
-    : mStatus(NO_INIT), mSessionId(0),
+    : mStatus(NO_INIT), mSessionId(AUDIO_SESSION_ALLOCATE),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT)
 {
 }
@@ -81,14 +73,14 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
-        int frameCount,
+        size_t frameCount,
         callback_t cbf,
         void* user,
-        int notificationFrames,
+        uint32_t notificationFrames,
         int sessionId,
         transfer_type transferType,
-        audio_input_flags_t flags)
-    : mStatus(NO_INIT), mSessionId(0),
+        audio_input_flags_t flags __unused)
+    : mStatus(NO_INIT), mSessionId(AUDIO_SESSION_ALLOCATE),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mProxy(NULL)
@@ -110,12 +102,10 @@
             mAudioRecordThread->requestExitAndWait();
             mAudioRecordThread.clear();
         }
-        if (mAudioRecord != 0) {
-            mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
-            mAudioRecord.clear();
-        }
+        mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
+        mAudioRecord.clear();
         IPCThreadState::self()->flushCommands();
-        AudioSystem::releaseAudioSessionId(mSessionId);
+        AudioSystem::releaseAudioSessionId(mSessionId, -1);
     }
 }
 
@@ -124,15 +114,20 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
-        int frameCountInt,
+        size_t frameCount,
         callback_t cbf,
         void* user,
-        int notificationFrames,
+        uint32_t notificationFrames,
         bool threadCanCallJava,
         int sessionId,
         transfer_type transferType,
         audio_input_flags_t flags)
 {
+    ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
+          "notificationFrames %u, sessionId %d, transferType %d, flags %#x",
+          inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
+          sessionId, transferType, flags);
+
     switch (transferType) {
     case TRANSFER_DEFAULT:
         if (cbf == NULL || threadCanCallJava) {
@@ -156,23 +151,15 @@
     }
     mTransfer = transferType;
 
-    // FIXME "int" here is legacy and will be replaced by size_t later
-    if (frameCountInt < 0) {
-        ALOGE("Invalid frame count %d", frameCountInt);
-        return BAD_VALUE;
-    }
-    size_t frameCount = frameCountInt;
-
-    ALOGV("set(): sampleRate %u, channelMask %#x, frameCount %u", sampleRate, channelMask,
-            frameCount);
-
     AutoMutex lock(mLock);
 
+    // invariant that mAudioRecord != 0 is true only after set() returns successfully
     if (mAudioRecord != 0) {
         ALOGE("Track already in use");
         return INVALID_OPERATION;
     }
 
+    // handle default values first.
     if (inputSource == AUDIO_SOURCE_DEFAULT) {
         inputSource = AUDIO_SOURCE_MIC;
     }
@@ -191,12 +178,12 @@
 
     // validate parameters
     if (!audio_is_valid_format(format)) {
-        ALOGE("Invalid format %d", format);
+        ALOGE("Invalid format %#x", format);
         return BAD_VALUE;
     }
     // Temporary restriction: AudioFlinger currently supports 16-bit PCM only
     if (format != AUDIO_FORMAT_PCM_16_BIT) {
-        ALOGE("Format %d is not supported", format);
+        ALOGE("Format %#x is not supported", format);
         return BAD_VALUE;
     }
     mFormat = format;
@@ -209,15 +196,19 @@
     uint32_t channelCount = popcount(channelMask);
     mChannelCount = channelCount;
 
-    // Assumes audio_is_linear_pcm(format), else sizeof(uint8_t)
-    mFrameSize = channelCount * audio_bytes_per_sample(format);
+    if (audio_is_linear_pcm(format)) {
+        mFrameSize = channelCount * audio_bytes_per_sample(format);
+    } else {
+        mFrameSize = sizeof(uint8_t);
+    }
 
     // validate framecount
-    size_t minFrameCount = 0;
+    size_t minFrameCount;
     status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
             sampleRate, format, channelMask);
     if (status != NO_ERROR) {
-        ALOGE("getMinFrameCount() failed; status %d", status);
+        ALOGE("getMinFrameCount() failed for sampleRate %u, format %#x, channelMask %#x; status %d",
+                sampleRate, format, channelMask, status);
         return status;
     }
     ALOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);
@@ -228,12 +219,13 @@
         ALOGE("frameCount %u < minFrameCount %u", frameCount, minFrameCount);
         return BAD_VALUE;
     }
-    mFrameCount = frameCount;
+    // mFrameCount is initialized in openRecord_l
+    mReqFrameCount = frameCount;
 
     mNotificationFramesReq = notificationFrames;
     mNotificationFramesAct = 0;
 
-    if (sessionId == 0 ) {
+    if (sessionId == AUDIO_SESSION_ALLOCATE) {
         mSessionId = AudioSystem::newAudioSessionId();
     } else {
         mSessionId = sessionId;
@@ -241,26 +233,27 @@
     ALOGV("set(): mSessionId %d", mSessionId);
 
     mFlags = flags;
-
-    // create the IAudioRecord
-    status = openRecord_l(0 /*epoch*/);
-    if (status) {
-        return status;
-    }
+    mCbf = cbf;
 
     if (cbf != NULL) {
         mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
         mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
     }
 
+    // create the IAudioRecord
+    status = openRecord_l(0 /*epoch*/);
+
+    if (status != NO_ERROR) {
+        if (mAudioRecordThread != 0) {
+            mAudioRecordThread->requestExit();   // see comment in AudioRecord.h
+            mAudioRecordThread->requestExitAndWait();
+            mAudioRecordThread.clear();
+        }
+        return status;
+    }
+
     mStatus = NO_ERROR;
-
-    // Update buffer size in case it has been limited by AudioFlinger during track creation
-    mFrameCount = mCblk->frameCount_;
-
     mActive = false;
-    mCbf = cbf;
-    mRefreshRemaining = true;
     mUserData = user;
     // TODO: add audio hardware input latency here
     mLatency = (1000*mFrameCount) / sampleRate;
@@ -268,7 +261,7 @@
     mMarkerReached = false;
     mNewPosition = 0;
     mUpdatePeriod = 0;
-    AudioSystem::acquireAudioSessionId(mSessionId);
+    AudioSystem::acquireAudioSessionId(mSessionId, -1);
     mSequence = 1;
     mObservedSequence = mSequence;
     mInOverrun = false;
@@ -289,6 +282,9 @@
 
     // reset current position as seen by client to 0
     mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
+    // force refresh of remaining frames by processAudioBuffer() as last
+    // read before stop could be partial.
+    mRefreshRemaining = true;
 
     mNewPosition = mProxy->getPosition() + mUpdatePeriod;
     int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);
@@ -352,6 +348,7 @@
 
 status_t AudioRecord::setMarkerPosition(uint32_t marker)
 {
+    // The only purpose of setting marker position is to get a callback
     if (mCbf == NULL) {
         return INVALID_OPERATION;
     }
@@ -377,6 +374,7 @@
 
 status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
+    // The only purpose of setting position update period is to get a callback
     if (mCbf == NULL) {
         return INVALID_OPERATION;
     }
@@ -412,7 +410,7 @@
     return NO_ERROR;
 }
 
-unsigned int AudioRecord::getInputFramesLost() const
+uint32_t AudioRecord::getInputFramesLost() const
 {
     // no need to check mActive, because if inactive this will return 0, which is what we want
     return AudioSystem::getInputFramesLost(getInput());
@@ -430,55 +428,82 @@
         return NO_INIT;
     }
 
-    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
-    pid_t tid = -1;
+    // Fast tracks must be at the primary _output_ [sic] sampling rate,
+    // because there is currently no concept of a primary input sampling rate
+    uint32_t afSampleRate = AudioSystem::getPrimaryOutputSamplingRate();
+    if (afSampleRate == 0) {
+        ALOGW("getPrimaryOutputSamplingRate failed");
+    }
 
     // Client can only express a preference for FAST.  Server will perform additional tests.
-    // The only supported use case for FAST is callback transfer mode.
+    if ((mFlags & AUDIO_INPUT_FLAG_FAST) && !(
+            // use case: callback transfer mode
+            (mTransfer == TRANSFER_CALLBACK) &&
+            // matching sample rate
+            (mSampleRate == afSampleRate))) {
+        ALOGW("AUDIO_INPUT_FLAG_FAST denied by client");
+        // once denied, do not request again if IAudioRecord is re-created
+        mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
+    }
+
+    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
+
+    pid_t tid = -1;
     if (mFlags & AUDIO_INPUT_FLAG_FAST) {
-        if ((mTransfer != TRANSFER_CALLBACK) || (mAudioRecordThread == 0)) {
-            ALOGW("AUDIO_INPUT_FLAG_FAST denied by client");
-            // once denied, do not request again if IAudioRecord is re-created
-            mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
-        } else {
-            trackFlags |= IAudioFlinger::TRACK_FAST;
+        trackFlags |= IAudioFlinger::TRACK_FAST;
+        if (mAudioRecordThread != 0) {
             tid = mAudioRecordThread->getTid();
         }
     }
 
+    // FIXME Assume double buffering, because we don't know the true HAL sample rate
+    const uint32_t nBuffering = 2;
+
     mNotificationFramesAct = mNotificationFramesReq;
+    size_t frameCount = mReqFrameCount;
 
     if (!(mFlags & AUDIO_INPUT_FLAG_FAST)) {
         // Make sure that application is notified with sufficient margin before overrun
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > mFrameCount/2) {
-            mNotificationFramesAct = mFrameCount/2;
+        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
+            mNotificationFramesAct = frameCount/2;
         }
     }
 
     audio_io_handle_t input = AudioSystem::getInput(mInputSource, mSampleRate, mFormat,
             mChannelMask, mSessionId);
-    if (input == 0) {
-        ALOGE("Could not get audio input for record source %d", mInputSource);
+    if (input == AUDIO_IO_HANDLE_NONE) {
+        ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
+              "channel mask %#x, session %d",
+              mInputSource, mSampleRate, mFormat, mChannelMask, mSessionId);
         return BAD_VALUE;
     }
+    {
+    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
+    // we must release it ourselves if anything goes wrong.
 
+    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
+                                // but we will still need the original value also
     int originalSessionId = mSessionId;
     sp<IAudioRecord> record = audioFlinger->openRecord(input,
                                                        mSampleRate, mFormat,
                                                        mChannelMask,
-                                                       mFrameCount,
+                                                       &temp,
                                                        &trackFlags,
                                                        tid,
                                                        &mSessionId,
                                                        &status);
-    ALOGE_IF(originalSessionId != 0 && mSessionId != originalSessionId,
+    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
             "session ID changed from %d to %d", originalSessionId, mSessionId);
 
-    if (record == 0 || status != NO_ERROR) {
+    if (status != NO_ERROR) {
         ALOGE("AudioFlinger could not create record track, status: %d", status);
-        AudioSystem::releaseInput(input);
-        return status;
+        goto release;
     }
+    ALOG_ASSERT(record != 0);
+
+    // AudioFlinger now owns the reference to the I/O handle,
+    // so we are no longer responsible for releasing it.
+
     sp<IMemory> iMem = record->getCblk();
     if (iMem == 0) {
         ALOGE("Could not get control block");
@@ -489,38 +514,56 @@
         ALOGE("Could not get control block pointer");
         return NO_INIT;
     }
+    // invariant that mAudioRecord != 0 is true only after set() returns successfully
     if (mAudioRecord != 0) {
         mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
         mDeathNotifier.clear();
     }
-    mInput = input;
     mAudioRecord = record;
+
     mCblkMemory = iMem;
     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
     mCblk = cblk;
-    // FIXME missing fast track frameCount logic
+    // note that temp is the (possibly revised) value of frameCount
+    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
+        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
+    }
+    frameCount = temp;
+
     mAwaitBoost = false;
     if (mFlags & AUDIO_INPUT_FLAG_FAST) {
         if (trackFlags & IAudioFlinger::TRACK_FAST) {
-            ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %u", mFrameCount);
+            ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %u", frameCount);
             mAwaitBoost = true;
-            // double-buffering is not required for fast tracks, due to tighter scheduling
-            if (mNotificationFramesAct == 0 || mNotificationFramesAct > mFrameCount) {
-                mNotificationFramesAct = mFrameCount;
-            }
         } else {
-            ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %u", mFrameCount);
+            ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
             // once denied, do not request again if IAudioRecord is re-created
             mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
-            if (mNotificationFramesAct == 0 || mNotificationFramesAct > mFrameCount/2) {
-                mNotificationFramesAct = mFrameCount/2;
-            }
+        }
+        // Theoretically double-buffering is not required for fast tracks,
+        // due to tighter scheduling.  But in practice, to accomodate kernels with
+        // scheduling jitter, and apps with computation jitter, we use double-buffering.
+        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
+            mNotificationFramesAct = frameCount/nBuffering;
         }
     }
 
-    // starting address of buffers in shared memory
+    // We retain a copy of the I/O handle, but don't own the reference
+    mInput = input;
+    mRefreshRemaining = true;
+
+    // Starting address of buffers in shared memory, immediately after the control block.  This
+    // address is for the mapping within client address space.  AudioFlinger::TrackBase::mBuffer
+    // is for the server address space.
     void *buffers = (char*)cblk + sizeof(audio_track_cblk_t);
 
+    mFrameCount = frameCount;
+    // If IAudioRecord is re-created, don't let the requested frameCount
+    // decrease.  This can confuse clients that cache frameCount().
+    if (frameCount > mReqFrameCount) {
+        mReqFrameCount = frameCount;
+    }
+
     // update proxy
     mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mFrameSize);
     mProxy->setEpoch(epoch);
@@ -530,6 +573,14 @@
     mAudioRecord->asBinder()->linkToDeath(mDeathNotifier, this);
 
     return NO_ERROR;
+    }
+
+release:
+    AudioSystem::releaseInput(input);
+    if (status == NO_ERROR) {
+        status = NO_INIT;
+    }
+    return status;
 }
 
 status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
@@ -591,6 +642,9 @@
                 if (newSequence == oldSequence) {
                     status = restoreRecord_l("obtainBuffer");
                     if (status != NO_ERROR) {
+                        buffer.mFrameCount = 0;
+                        buffer.mRaw = NULL;
+                        buffer.mNonContig = 0;
                         break;
                     }
                 }
@@ -692,7 +746,7 @@
 
 // -------------------------------------------------------------------------
 
-nsecs_t AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread)
+nsecs_t AudioRecord::processAudioBuffer()
 {
     mLock.lock();
     if (mAwaitBoost) {
@@ -760,17 +814,17 @@
     }
 
     // Cache other fields that will be needed soon
-    size_t notificationFrames = mNotificationFramesAct;
+    uint32_t notificationFrames = mNotificationFramesAct;
     if (mRefreshRemaining) {
         mRefreshRemaining = false;
         mRemainingFrames = notificationFrames;
         mRetryOnPartialBuffer = false;
     }
     size_t misalignment = mProxy->getMisalignment();
-    int32_t sequence = mSequence;
+    uint32_t sequence = mSequence;
 
     // These fields don't need to be cached, because they are assigned only by set():
-    //      mTransfer, mCbf, mUserData, mSampleRate
+    //      mTransfer, mCbf, mUserData, mSampleRate, mFrameSize
 
     mLock.unlock();
 
@@ -844,8 +898,8 @@
                 "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
         requested = &ClientProxy::kNonBlocking;
         size_t avail = audioBuffer.frameCount + nonContig;
-        ALOGV("obtainBuffer(%u) returned %u = %u + %u",
-                mRemainingFrames, avail, audioBuffer.frameCount, nonContig);
+        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
+                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
         if (err != NO_ERROR) {
             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) {
                 break;
@@ -954,7 +1008,7 @@
 
 // =========================================================================
 
-void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who)
+void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
 {
     sp<AudioRecord> audioRecord = mAudioRecord.promote();
     if (audioRecord != 0) {
@@ -966,7 +1020,8 @@
 // =========================================================================
 
 AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver, bool bCanCallJava)
-    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL)
+    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
+      mIgnoreNextPausedInt(false)
 {
 }
 
@@ -983,6 +1038,10 @@
             // caller will check for exitPending()
             return true;
         }
+        if (mIgnoreNextPausedInt) {
+            mIgnoreNextPausedInt = false;
+            mPausedInt = false;
+        }
         if (mPausedInt) {
             if (mPausedNs > 0) {
                 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
@@ -993,7 +1052,7 @@
             return true;
         }
     }
-    nsecs_t ns =  mReceiver.processAudioBuffer(this);
+    nsecs_t ns =  mReceiver.processAudioBuffer();
     switch (ns) {
     case 0:
         return true;
@@ -1017,12 +1076,7 @@
 {
     // must be in this order to avoid a race condition
     Thread::requestExit();
-    AutoMutex _l(mMyLock);
-    if (mPaused || mPausedInt) {
-        mPaused = false;
-        mPausedInt = false;
-        mMyCond.signal();
-    }
+    resume();
 }
 
 void AudioRecord::AudioRecordThread::pause()
@@ -1034,6 +1088,7 @@
 void AudioRecord::AudioRecordThread::resume()
 {
     AutoMutex _l(mMyLock);
+    mIgnoreNextPausedInt = true;
     if (mPaused || mPausedInt) {
         mPaused = false;
         mPausedInt = false;
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index cc5b810..2f16444 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -35,15 +35,15 @@
 sp<IAudioFlinger> AudioSystem::gAudioFlinger;
 sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
 audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
-// Cached values
 
-DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(0);
+// Cached values for output handles
+DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(NULL);
 
 // Cached values for recording queries, all protected by gLock
-uint32_t AudioSystem::gPrevInSamplingRate = 16000;
-audio_format_t AudioSystem::gPrevInFormat = AUDIO_FORMAT_PCM_16_BIT;
-audio_channel_mask_t AudioSystem::gPrevInChannelMask = AUDIO_CHANNEL_IN_MONO;
-size_t AudioSystem::gInBuffSize = 0;
+uint32_t AudioSystem::gPrevInSamplingRate;
+audio_format_t AudioSystem::gPrevInFormat;
+audio_channel_mask_t AudioSystem::gPrevInChannelMask;
+size_t AudioSystem::gInBuffSize = 0;    // zero indicates cache is invalid
 
 
 // establish binder interface to AudioFlinger service
@@ -84,13 +84,15 @@
     return DEAD_OBJECT;
 }
 
-status_t AudioSystem::muteMicrophone(bool state) {
+status_t AudioSystem::muteMicrophone(bool state)
+{
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->setMicMute(state);
 }
 
-status_t AudioSystem::isMicrophoneMuted(bool* state) {
+status_t AudioSystem::isMicrophoneMuted(bool* state)
+{
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     *state = af->getMicMute();
@@ -175,13 +177,15 @@
     return af->setMode(mode);
 }
 
-status_t AudioSystem::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs) {
+status_t AudioSystem::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
+{
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->setParameters(ioHandle, keyValuePairs);
 }
 
-String8 AudioSystem::getParameters(audio_io_handle_t ioHandle, const String8& keys) {
+String8 AudioSystem::getParameters(audio_io_handle_t ioHandle, const String8& keys)
+{
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     String8 result = String8("");
     if (af == 0) return result;
@@ -190,6 +194,16 @@
     return result;
 }
 
+status_t AudioSystem::setParameters(const String8& keyValuePairs)
+{
+    return setParameters(AUDIO_IO_HANDLE_NONE, keyValuePairs);
+}
+
+String8 AudioSystem::getParameters(const String8& keys)
+{
+    return getParameters(AUDIO_IO_HANDLE_NONE, keys);
+}
+
 // convert volume steps to natural log scale
 
 // change this value to change volume scaling
@@ -249,6 +263,11 @@
         *samplingRate = outputDesc->samplingRate;
         gLock.unlock();
     }
+    if (*samplingRate == 0) {
+        ALOGE("AudioSystem::getSamplingRate failed for output %d stream type %d",
+                output, streamType);
+        return BAD_VALUE;
+    }
 
     ALOGV("getSamplingRate() streamType %d, output %d, sampling rate %u", streamType, output,
             *samplingRate);
@@ -265,7 +284,7 @@
     }
 
     output = getOutput(streamType);
-    if (output == 0) {
+    if (output == AUDIO_IO_HANDLE_NONE) {
         return PERMISSION_DENIED;
     }
 
@@ -289,6 +308,11 @@
         *frameCount = outputDesc->frameCount;
         gLock.unlock();
     }
+    if (*frameCount == 0) {
+        ALOGE("AudioSystem::getFrameCount failed for output %d stream type %d",
+                output, streamType);
+        return BAD_VALUE;
+    }
 
     ALOGV("getFrameCount() streamType %d, output %d, frameCount %d", streamType, output,
             *frameCount);
@@ -305,15 +329,14 @@
     }
 
     output = getOutput(streamType);
-    if (output == 0) {
+    if (output == AUDIO_IO_HANDLE_NONE) {
         return PERMISSION_DENIED;
     }
 
-    return getLatency(output, streamType, latency);
+    return getLatency(output, latency);
 }
 
 status_t AudioSystem::getLatency(audio_io_handle_t output,
-                                 audio_stream_type_t streamType,
                                  uint32_t* latency)
 {
     OutputDescriptor *outputDesc;
@@ -330,7 +353,7 @@
         gLock.unlock();
     }
 
-    ALOGV("getLatency() streamType %d, output %d, latency %d", streamType, output, *latency);
+    ALOGV("getLatency() output %d, latency %d", output, *latency);
 
     return NO_ERROR;
 }
@@ -349,6 +372,12 @@
             return PERMISSION_DENIED;
         }
         inBuffSize = af->getInputBufferSize(sampleRate, format, channelMask);
+        if (inBuffSize == 0) {
+            ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %#x channelMask %x",
+                    sampleRate, format, channelMask);
+            return BAD_VALUE;
+        }
+        // A benign race is possible here: we could overwrite a fresher cache entry
         gLock.lock();
         // save the request params
         gPrevInSamplingRate = sampleRate;
@@ -371,55 +400,52 @@
 }
 
 status_t AudioSystem::getRenderPosition(audio_io_handle_t output, uint32_t *halFrames,
-                                        uint32_t *dspFrames, audio_stream_type_t stream)
+                                        uint32_t *dspFrames)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
 
-    if (stream == AUDIO_STREAM_DEFAULT) {
-        stream = AUDIO_STREAM_MUSIC;
-    }
-
-    if (output == 0) {
-        output = getOutput(stream);
-    }
-
     return af->getRenderPosition(halFrames, dspFrames, output);
 }
 
-size_t AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) {
+uint32_t AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle)
+{
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
-    unsigned int result = 0;
+    uint32_t result = 0;
     if (af == 0) return result;
-    if (ioHandle == 0) return result;
+    if (ioHandle == AUDIO_IO_HANDLE_NONE) return result;
 
     result = af->getInputFramesLost(ioHandle);
     return result;
 }
 
-int AudioSystem::newAudioSessionId() {
+int AudioSystem::newAudioSessionId()
+{
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
-    if (af == 0) return 0;
+    if (af == 0) return AUDIO_SESSION_ALLOCATE;
     return af->newAudioSessionId();
 }
 
-void AudioSystem::acquireAudioSessionId(int audioSession) {
+void AudioSystem::acquireAudioSessionId(int audioSession, pid_t pid)
+{
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af != 0) {
-        af->acquireAudioSessionId(audioSession);
+        af->acquireAudioSessionId(audioSession, pid);
     }
 }
 
-void AudioSystem::releaseAudioSessionId(int audioSession) {
+void AudioSystem::releaseAudioSessionId(int audioSession, pid_t pid)
+{
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af != 0) {
-        af->releaseAudioSessionId(audioSession);
+        af->releaseAudioSessionId(audioSession, pid);
     }
 }
 
 // ---------------------------------------------------------------------------
 
-void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who) {
+void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who __unused)
+{
     Mutex::Autolock _l(AudioSystem::gLock);
 
     AudioSystem::gAudioFlinger.clear();
@@ -438,7 +464,7 @@
     const OutputDescriptor *desc;
     audio_stream_type_t stream;
 
-    if (ioHandle == 0) return;
+    if (ioHandle == AUDIO_IO_HANDLE_NONE) return;
 
     Mutex::Autolock _l(AudioSystem::gLock);
 
@@ -455,7 +481,7 @@
 
         OutputDescriptor *outputDesc =  new OutputDescriptor(*desc);
         gOutputs.add(ioHandle, outputDesc);
-        ALOGV("ioConfigChanged() new output samplingRate %u, format %d channel mask %#x frameCount %u "
+        ALOGV("ioConfigChanged() new output samplingRate %u, format %#x channel mask %#x frameCount %u "
                 "latency %d",
                 outputDesc->samplingRate, outputDesc->format, outputDesc->channelMask,
                 outputDesc->frameCount, outputDesc->latency);
@@ -479,7 +505,7 @@
         if (param2 == NULL) break;
         desc = (const OutputDescriptor *)param2;
 
-        ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %d channel mask %#x "
+        ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %#x channel mask %#x "
                 "frameCount %d latency %d",
                 ioHandle, desc->samplingRate, desc->format,
                 desc->channelMask, desc->frameCount, desc->latency);
@@ -496,12 +522,14 @@
     }
 }
 
-void AudioSystem::setErrorCallback(audio_error_callback cb) {
+void AudioSystem::setErrorCallback(audio_error_callback cb)
+{
     Mutex::Autolock _l(gLock);
     gAudioErrorCallback = cb;
 }
 
-bool AudioSystem::routedToA2dpOutput(audio_stream_type_t streamType) {
+bool AudioSystem::routedToA2dpOutput(audio_stream_type_t streamType)
+{
     switch (streamType) {
     case AUDIO_STREAM_MUSIC:
     case AUDIO_STREAM_VOICE_CALL:
@@ -702,14 +730,15 @@
 audio_devices_t AudioSystem::getDevicesForStream(audio_stream_type_t stream)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
-    if (aps == 0) return (audio_devices_t)0;
+    if (aps == 0) return AUDIO_DEVICE_NONE;
     return aps->getDevicesForStream(stream);
 }
 
 audio_io_handle_t AudioSystem::getOutputForEffect(const effect_descriptor_t *desc)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
-    if (aps == 0) return PERMISSION_DENIED;
+    // FIXME change return type to status_t, and return PERMISSION_DENIED here
+    if (aps == 0) return AUDIO_IO_HANDLE_NONE;
     return aps->getOutputForEffect(desc);
 }
 
@@ -804,7 +833,8 @@
 
 // ---------------------------------------------------------------------------
 
-void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) {
+void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused)
+{
     Mutex::Autolock _l(AudioSystem::gLock);
     AudioSystem::gAudioPolicyService.clear();
 
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 3f3a88c..8daf08b 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -44,9 +44,6 @@
         return BAD_VALUE;
     }
 
-    // default to 0 in case of error
-    *frameCount = 0;
-
     // FIXME merge with similar code in createTrack_l(), except we're missing
     //       some information here that is available in createTrack_l():
     //          audio_io_handle_t output
@@ -54,16 +51,26 @@
     //          audio_channel_mask_t channelMask
     //          audio_output_flags_t flags
     uint32_t afSampleRate;
-    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
-        return NO_INIT;
+    status_t status;
+    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
+    if (status != NO_ERROR) {
+        ALOGE("Unable to query output sample rate for stream type %d; status %d",
+                streamType, status);
+        return status;
     }
     size_t afFrameCount;
-    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
-        return NO_INIT;
+    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
+    if (status != NO_ERROR) {
+        ALOGE("Unable to query output frame count for stream type %d; status %d",
+                streamType, status);
+        return status;
     }
     uint32_t afLatency;
-    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
-        return NO_INIT;
+    status = AudioSystem::getOutputLatency(&afLatency, streamType);
+    if (status != NO_ERROR) {
+        ALOGE("Unable to query output latency for stream type %d; status %d",
+                streamType, status);
+        return status;
     }
 
     // Ensure that buffer depth covers at least audio hardware latency
@@ -74,6 +81,13 @@
 
     *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
             afFrameCount * minBufCount * sampleRate / afSampleRate;
+    // The formula above should always produce a non-zero value, but return an error
+    // in the unlikely event that it does not, as that's part of the API contract.
+    if (*frameCount == 0) {
+        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
+                streamType, sampleRate);
+        return BAD_VALUE;
+    }
     ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
             *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
     return NO_ERROR;
@@ -95,15 +109,16 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
-        int frameCount,
+        size_t frameCount,
         audio_output_flags_t flags,
         callback_t cbf,
         void* user,
-        int notificationFrames,
+        uint32_t notificationFrames,
         int sessionId,
         transfer_type transferType,
         const audio_offload_info_t *offloadInfo,
-        int uid)
+        int uid,
+        pid_t pid)
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -113,7 +128,7 @@
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
-            offloadInfo, uid);
+            offloadInfo, uid, pid);
 }
 
 AudioTrack::AudioTrack(
@@ -125,11 +140,12 @@
         audio_output_flags_t flags,
         callback_t cbf,
         void* user,
-        int notificationFrames,
+        uint32_t notificationFrames,
         int sessionId,
         transfer_type transferType,
         const audio_offload_info_t *offloadInfo,
-        int uid)
+        int uid,
+        pid_t pid)
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -138,7 +154,8 @@
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
-            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, uid);
+            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
+            uid, pid);
 }
 
 AudioTrack::~AudioTrack()
@@ -157,7 +174,9 @@
         mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
         mAudioTrack.clear();
         IPCThreadState::self()->flushCommands();
-        AudioSystem::releaseAudioSessionId(mSessionId);
+        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
+                IPCThreadState::self()->getCallingPid(), mClientPid);
+        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
     }
 }
 
@@ -166,18 +185,24 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
-        int frameCountInt,
+        size_t frameCount,
         audio_output_flags_t flags,
         callback_t cbf,
         void* user,
-        int notificationFrames,
+        uint32_t notificationFrames,
         const sp<IMemory>& sharedBuffer,
         bool threadCanCallJava,
         int sessionId,
         transfer_type transferType,
         const audio_offload_info_t *offloadInfo,
-        int uid)
+        int uid,
+        pid_t pid)
 {
+    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
+          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
+          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
+          sessionId, transferType);
+
     switch (transferType) {
     case TRANSFER_DEFAULT:
         if (sharedBuffer != 0) {
@@ -211,15 +236,9 @@
         ALOGE("Invalid transfer type %d", transferType);
         return BAD_VALUE;
     }
+    mSharedBuffer = sharedBuffer;
     mTransfer = transferType;
 
-    // FIXME "int" here is legacy and will be replaced by size_t later
-    if (frameCountInt < 0) {
-        ALOGE("Invalid frame count %d", frameCountInt);
-        return BAD_VALUE;
-    }
-    size_t frameCount = frameCountInt;
-
     ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
             sharedBuffer->size());
 
@@ -233,19 +252,24 @@
         return INVALID_OPERATION;
     }
 
-    mOutput = 0;
-
     // handle default values first.
     if (streamType == AUDIO_STREAM_DEFAULT) {
         streamType = AUDIO_STREAM_MUSIC;
     }
+    if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
+        ALOGE("Invalid stream type %d", streamType);
+        return BAD_VALUE;
+    }
+    mStreamType = streamType;
 
+    status_t status;
     if (sampleRate == 0) {
-        uint32_t afSampleRate;
-        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
-            return NO_INIT;
+        status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType);
+        if (status != NO_ERROR) {
+            ALOGE("Could not get output sample rate for stream type %d; status %d",
+                    streamType, status);
+            return status;
         }
-        sampleRate = afSampleRate;
     }
     mSampleRate = sampleRate;
 
@@ -253,15 +277,21 @@
     if (format == AUDIO_FORMAT_DEFAULT) {
         format = AUDIO_FORMAT_PCM_16_BIT;
     }
-    if (channelMask == 0) {
-        channelMask = AUDIO_CHANNEL_OUT_STEREO;
-    }
 
     // validate parameters
     if (!audio_is_valid_format(format)) {
-        ALOGE("Invalid format %d", format);
+        ALOGE("Invalid format %#x", format);
         return BAD_VALUE;
     }
+    mFormat = format;
+
+    if (!audio_is_output_channel(channelMask)) {
+        ALOGE("Invalid channel mask %#x", channelMask);
+        return BAD_VALUE;
+    }
+    mChannelMask = channelMask;
+    uint32_t channelCount = popcount(channelMask);
+    mChannelCount = channelCount;
 
     // AudioFlinger does not currently support 8-bit data in shared memory
     if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
@@ -285,14 +315,6 @@
         flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
     }
 
-    if (!audio_is_output_channel(channelMask)) {
-        ALOGE("Invalid channel mask %#x", channelMask);
-        return BAD_VALUE;
-    }
-    mChannelMask = channelMask;
-    uint32_t channelCount = popcount(channelMask);
-    mChannelCount = channelCount;
-
     if (audio_is_linear_pcm(format)) {
         mFrameSize = channelCount * audio_bytes_per_sample(format);
         mFrameSizeAF = channelCount * sizeof(int16_t);
@@ -301,30 +323,36 @@
         mFrameSizeAF = sizeof(uint8_t);
     }
 
-    audio_io_handle_t output = AudioSystem::getOutput(
-                                    streamType,
-                                    sampleRate, format, channelMask,
-                                    flags,
-                                    offloadInfo);
-
-    if (output == 0) {
-        ALOGE("Could not get audio output for stream type %d", streamType);
-        return BAD_VALUE;
+    // Make copy of input parameter offloadInfo so that in the future:
+    //  (a) createTrack_l doesn't need it as an input parameter
+    //  (b) we can support re-creation of offloaded tracks
+    if (offloadInfo != NULL) {
+        mOffloadInfoCopy = *offloadInfo;
+        mOffloadInfo = &mOffloadInfoCopy;
+    } else {
+        mOffloadInfo = NULL;
     }
 
-    mVolume[LEFT] = 1.0f;
-    mVolume[RIGHT] = 1.0f;
+    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
+    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
     mSendLevel = 0.0f;
-    mFrameCount = frameCount;
+    // mFrameCount is initialized in createTrack_l
     mReqFrameCount = frameCount;
     mNotificationFramesReq = notificationFrames;
     mNotificationFramesAct = 0;
     mSessionId = sessionId;
-    if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
+    int callingpid = IPCThreadState::self()->getCallingPid();
+    int mypid = getpid();
+    if (uid == -1 || (callingpid != mypid)) {
         mClientUid = IPCThreadState::self()->getCallingUid();
     } else {
         mClientUid = uid;
     }
+    if (pid == -1 || (callingpid != mypid)) {
+        mClientPid = callingpid;
+    } else {
+        mClientPid = pid;
+    }
     mAuxEffectId = 0;
     mFlags = flags;
     mCbf = cbf;
@@ -335,14 +363,7 @@
     }
 
     // create the IAudioTrack
-    status_t status = createTrack_l(streamType,
-                                  sampleRate,
-                                  format,
-                                  frameCount,
-                                  flags,
-                                  sharedBuffer,
-                                  output,
-                                  0 /*epoch*/);
+    status = createTrack_l(0 /*epoch*/);
 
     if (status != NO_ERROR) {
         if (mAudioTrackThread != 0) {
@@ -350,17 +371,10 @@
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
-        //Use of direct and offloaded output streams is ref counted by audio policy manager.
-        // As getOutput was called above and resulted in an output stream to be opened,
-        // we need to release it.
-        AudioSystem::releaseOutput(output);
         return status;
     }
 
     mStatus = NO_ERROR;
-    mStreamType = streamType;
-    mFormat = format;
-    mSharedBuffer = sharedBuffer;
     mState = STATE_STOPPED;
     mUserData = user;
     mLoopPeriod = 0;
@@ -368,11 +382,10 @@
     mMarkerReached = false;
     mNewPosition = 0;
     mUpdatePeriod = 0;
-    AudioSystem::acquireAudioSessionId(mSessionId);
+    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
     mSequence = 1;
     mObservedSequence = mSequence;
     mInUnderrun = false;
-    mOutput = output;
 
     return NO_ERROR;
 }
@@ -448,12 +461,11 @@
 void AudioTrack::stop()
 {
     AutoMutex lock(mLock);
-    // FIXME pause then stop should not be a nop
-    if (mState != STATE_ACTIVE) {
+    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
         return;
     }
 
-    if (isOffloaded()) {
+    if (isOffloaded_l()) {
         mState = STATE_STOPPING;
     } else {
         mState = STATE_STOPPED;
@@ -475,7 +487,7 @@
 
     sp<AudioTrackThread> t = mAudioTrackThread;
     if (t != 0) {
-        if (!isOffloaded()) {
+        if (!isOffloaded_l()) {
             t->pause();
         }
     } else {
@@ -513,7 +525,7 @@
     mRefreshRemaining = true;
 
     mState = STATE_FLUSHED;
-    if (isOffloaded()) {
+    if (isOffloaded_l()) {
         mProxy->interrupt();
     }
     mProxy->flush();
@@ -533,8 +545,8 @@
     mProxy->interrupt();
     mAudioTrack->pause();
 
-    if (isOffloaded()) {
-        if (mOutput != 0) {
+    if (isOffloaded_l()) {
+        if (mOutput != AUDIO_IO_HANDLE_NONE) {
             uint32_t halFrames;
             // OffloadThread sends HAL pause in its threadLoop.. time saved
             // here can be slightly off
@@ -551,12 +563,12 @@
     }
 
     AutoMutex lock(mLock);
-    mVolume[LEFT] = left;
-    mVolume[RIGHT] = right;
+    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
+    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
 
     mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
 
-    if (isOffloaded()) {
+    if (isOffloaded_l()) {
         mAudioTrack->signal();
     }
     return NO_ERROR;
@@ -620,8 +632,8 @@
     // sample rate can be updated during playback by the offloaded decoder so we need to
     // query the HAL and update if needed.
 // FIXME use Proxy return channel to update the rate from server and avoid polling here
-    if (isOffloaded()) {
-        if (mOutput != 0) {
+    if (isOffloaded_l()) {
+        if (mOutput != AUDIO_IO_HANDLE_NONE) {
             uint32_t sampleRate = 0;
             status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate);
             if (status == NO_ERROR) {
@@ -704,6 +716,7 @@
     AutoMutex lock(mLock);
     mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
+
     return NO_ERROR;
 }
 
@@ -757,7 +770,7 @@
     }
 
     AutoMutex lock(mLock);
-    if (isOffloaded()) {
+    if (isOffloaded_l()) {
         uint32_t dspFrames = 0;
 
         if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
@@ -766,7 +779,7 @@
             return NO_ERROR;
         }
 
-        if (mOutput != 0) {
+        if (mOutput != AUDIO_IO_HANDLE_NONE) {
             uint32_t halFrames;
             AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
         }
@@ -812,23 +825,12 @@
     return NO_ERROR;
 }
 
-audio_io_handle_t AudioTrack::getOutput()
+audio_io_handle_t AudioTrack::getOutput() const
 {
     AutoMutex lock(mLock);
     return mOutput;
 }
 
-// must be called with mLock held
-audio_io_handle_t AudioTrack::getOutput_l()
-{
-    if (mOutput) {
-        return mOutput;
-    } else {
-        return AudioSystem::getOutput(mStreamType,
-                                      mSampleRate, mFormat, mChannelMask, mFlags);
-    }
-}
-
 status_t AudioTrack::attachAuxEffect(int effectId)
 {
     AutoMutex lock(mLock);
@@ -842,15 +844,7 @@
 // -------------------------------------------------------------------------
 
 // must be called with mLock held
-status_t AudioTrack::createTrack_l(
-        audio_stream_type_t streamType,
-        uint32_t sampleRate,
-        audio_format_t format,
-        size_t frameCount,
-        audio_output_flags_t flags,
-        const sp<IMemory>& sharedBuffer,
-        audio_io_handle_t output,
-        size_t epoch)
+status_t AudioTrack::createTrack_l(size_t epoch)
 {
     status_t status;
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
@@ -859,50 +853,57 @@
         return NO_INIT;
     }
 
+    audio_io_handle_t output = AudioSystem::getOutput(mStreamType, mSampleRate, mFormat,
+            mChannelMask, mFlags, mOffloadInfo);
+    if (output == AUDIO_IO_HANDLE_NONE) {
+        ALOGE("Could not get audio output for stream type %d, sample rate %u, format %#x, "
+              "channel mask %#x, flags %#x",
+              mStreamType, mSampleRate, mFormat, mChannelMask, mFlags);
+        return BAD_VALUE;
+    }
+    {
+    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
+    // we must release it ourselves if anything goes wrong.
+
     // Not all of these values are needed under all conditions, but it is easier to get them all
 
     uint32_t afLatency;
-    status = AudioSystem::getLatency(output, streamType, &afLatency);
+    status = AudioSystem::getLatency(output, &afLatency);
     if (status != NO_ERROR) {
         ALOGE("getLatency(%d) failed status %d", output, status);
-        return NO_INIT;
+        goto release;
     }
 
     size_t afFrameCount;
-    status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
+    status = AudioSystem::getFrameCount(output, mStreamType, &afFrameCount);
     if (status != NO_ERROR) {
-        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
-        return NO_INIT;
+        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, mStreamType, status);
+        goto release;
     }
 
     uint32_t afSampleRate;
-    status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
+    status = AudioSystem::getSamplingRate(output, mStreamType, &afSampleRate);
     if (status != NO_ERROR) {
-        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
-        return NO_INIT;
+        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, mStreamType, status);
+        goto release;
     }
 
     // Client decides whether the track is TIMED (see below), but can only express a preference
     // for FAST.  Server will perform additional tests.
-    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
+    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
             // either of these use cases:
             // use case 1: shared buffer
-            (sharedBuffer != 0) ||
-            // use case 2: callback handler
-            (mCbf != NULL))) {
+            (mSharedBuffer != 0) ||
+            // use case 2: callback transfer mode
+            (mTransfer == TRANSFER_CALLBACK)) &&
+            // matching sample rate
+            (mSampleRate == afSampleRate))) {
         ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
         // once denied, do not request again if IAudioTrack is re-created
-        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
-        mFlags = flags;
+        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
     }
     ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
 
-    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && sampleRate != afSampleRate) {
-        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client due to mismatching sample rate (%d vs %d)",
-              sampleRate, afSampleRate);
-        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
-    }
-
     // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
     //  n = 1   fast track with single buffering; nBuffering is ignored
     //  n = 2   fast track with double buffering
@@ -910,43 +911,45 @@
     //  n = 3   normal track, with sample rate conversion
     //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
     //  n > 3   very high latency or very small notification interval; nBuffering is ignored
-    const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;
+    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
 
     mNotificationFramesAct = mNotificationFramesReq;
 
-    if (!audio_is_linear_pcm(format)) {
+    size_t frameCount = mReqFrameCount;
+    if (!audio_is_linear_pcm(mFormat)) {
 
-        if (sharedBuffer != 0) {
+        if (mSharedBuffer != 0) {
             // Same comment as below about ignoring frameCount parameter for set()
-            frameCount = sharedBuffer->size();
+            frameCount = mSharedBuffer->size();
         } else if (frameCount == 0) {
             frameCount = afFrameCount;
         }
         if (mNotificationFramesAct != frameCount) {
             mNotificationFramesAct = frameCount;
         }
-    } else if (sharedBuffer != 0) {
+    } else if (mSharedBuffer != 0) {
 
         // Ensure that buffer alignment matches channel count
         // 8-bit data in shared memory is not currently supported by AudioFlinger
-        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
+        size_t alignment = /* mFormat == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
         if (mChannelCount > 1) {
             // More than 2 channels does not require stronger alignment than stereo
             alignment <<= 1;
         }
-        if (((uintptr_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
+        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
             ALOGE("Invalid buffer alignment: address %p, channel count %u",
-                    sharedBuffer->pointer(), mChannelCount);
-            return BAD_VALUE;
+                    mSharedBuffer->pointer(), mChannelCount);
+            status = BAD_VALUE;
+            goto release;
         }
 
         // When initializing a shared buffer AudioTrack via constructors,
         // there's no frameCount parameter.
         // But when initializing a shared buffer AudioTrack via set(),
         // there _is_ a frameCount parameter.  We silently ignore it.
-        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
+        frameCount = mSharedBuffer->size()/mChannelCount/sizeof(int16_t);
 
-    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
+    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
 
         // FIXME move these calculations and associated checks to server
 
@@ -958,10 +961,10 @@
             minBufCount = nBuffering;
         }
 
-        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
+        size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
         ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
                 ", afLatency=%d",
-                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
+                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
 
         if (frameCount == 0) {
             frameCount = minFrameCount;
@@ -986,52 +989,64 @@
     }
 
     pid_t tid = -1;
-    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
+    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         trackFlags |= IAudioFlinger::TRACK_FAST;
         if (mAudioTrackThread != 0) {
             tid = mAudioTrackThread->getTid();
         }
     }
 
-    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
         trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
     }
 
-    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
-                                                      sampleRate,
+    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
+                                // but we will still need the original value also
+    sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
+                                                      mSampleRate,
                                                       // AudioFlinger only sees 16-bit PCM
-                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
-                                                              AUDIO_FORMAT_PCM_16_BIT : format,
+                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT ?
+                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
                                                       mChannelMask,
-                                                      frameCount,
+                                                      &temp,
                                                       &trackFlags,
-                                                      sharedBuffer,
+                                                      mSharedBuffer,
                                                       output,
                                                       tid,
                                                       &mSessionId,
-                                                      mName,
                                                       mClientUid,
                                                       &status);
 
-    if (track == 0) {
+    if (status != NO_ERROR) {
         ALOGE("AudioFlinger could not create track, status: %d", status);
-        return status;
+        goto release;
     }
+    ALOG_ASSERT(track != 0);
+
+    // AudioFlinger now owns the reference to the I/O handle,
+    // so we are no longer responsible for releasing it.
+
     sp<IMemory> iMem = track->getCblk();
     if (iMem == 0) {
         ALOGE("Could not get control block");
         return NO_INIT;
     }
+    void *iMemPointer = iMem->pointer();
+    if (iMemPointer == NULL) {
+        ALOGE("Could not get control block pointer");
+        return NO_INIT;
+    }
     // invariant that mAudioTrack != 0 is true only after set() returns successfully
     if (mAudioTrack != 0) {
         mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
         mDeathNotifier.clear();
     }
     mAudioTrack = track;
+
     mCblkMemory = iMem;
-    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
+    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
     mCblk = cblk;
-    size_t temp = cblk->frameCount_;
+    // note that temp is the (possibly revised) value of frameCount
     if (temp < frameCount || (frameCount == 0 && temp == 0)) {
         // In current design, AudioTrack client checks and ensures frame count validity before
         // passing it to AudioFlinger so AudioFlinger should not return a different value except
@@ -1039,12 +1054,13 @@
         ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
     }
     frameCount = temp;
+
     mAwaitBoost = false;
-    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
+    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         if (trackFlags & IAudioFlinger::TRACK_FAST) {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
             mAwaitBoost = true;
-            if (sharedBuffer == 0) {
+            if (mSharedBuffer == 0) {
                 // Theoretically double-buffering is not required for fast tracks,
                 // due to tighter scheduling.  But in practice, to accommodate kernels with
                 // scheduling jitter, and apps with computation jitter, we use double-buffering.
@@ -1055,26 +1071,27 @@
         } else {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
             // once denied, do not request again if IAudioTrack is re-created
-            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
-            mFlags = flags;
-            if (sharedBuffer == 0) {
+            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
+            if (mSharedBuffer == 0) {
                 if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
                     mNotificationFramesAct = frameCount/nBuffering;
                 }
             }
         }
     }
-    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
         if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
             ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
         } else {
             ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
-            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
-            mFlags = flags;
-            return NO_INIT;
+            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+            // FIXME This is a warning, not an error, so don't return error status
+            //return NO_INIT;
         }
     }
 
+    // We retain a copy of the I/O handle, but don't own the reference
+    mOutput = output;
     mRefreshRemaining = true;
 
     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
@@ -1082,15 +1099,16 @@
     // immediately after the control block.  This address is for the mapping within client
     // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
     void* buffers;
-    if (sharedBuffer == 0) {
+    if (mSharedBuffer == 0) {
         buffers = (char*)cblk + sizeof(audio_track_cblk_t);
     } else {
-        buffers = sharedBuffer->pointer();
+        buffers = mSharedBuffer->pointer();
     }
 
     mAudioTrack->attachAuxEffect(mAuxEffectId);
     // FIXME don't believe this lie
-    mLatency = afLatency + (1000*frameCount) / sampleRate;
+    mLatency = afLatency + (1000*frameCount) / mSampleRate;
+
     mFrameCount = frameCount;
     // If IAudioTrack is re-created, don't let the requested frameCount
     // decrease.  This can confuse clients that cache frameCount().
@@ -1099,15 +1117,15 @@
     }
 
     // update proxy
-    if (sharedBuffer == 0) {
+    if (mSharedBuffer == 0) {
         mStaticProxy.clear();
         mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
     } else {
         mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
         mProxy = mStaticProxy;
     }
-    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
-            uint16_t(mVolume[LEFT] * 0x1000));
+    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[AUDIO_INTERLEAVE_RIGHT] * 0x1000)) << 16) |
+            uint16_t(mVolume[AUDIO_INTERLEAVE_LEFT] * 0x1000));
     mProxy->setSendLevel(mSendLevel);
     mProxy->setSampleRate(mSampleRate);
     mProxy->setEpoch(epoch);
@@ -1117,6 +1135,14 @@
     mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
 
     return NO_ERROR;
+    }
+
+release:
+    AudioSystem::releaseOutput(output);
+    if (status == NO_ERROR) {
+        status = NO_INIT;
+    }
+    return status;
 }
 
 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
@@ -1244,8 +1270,7 @@
     if (mState == STATE_ACTIVE) {
         audio_track_cblk_t* cblk = mCblk;
         if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
-            ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
-                    this, mName.string());
+            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
             // FIXME ignoring status
             mAudioTrack->start();
         }
@@ -1254,7 +1279,7 @@
 
 // -------------------------------------------------------------------------
 
-ssize_t AudioTrack::write(const void* buffer, size_t userSize)
+ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
 {
     if (mTransfer != TRANSFER_SYNC || mIsTimed) {
         return INVALID_OPERATION;
@@ -1273,7 +1298,8 @@
     while (userSize >= mFrameSize) {
         audioBuffer.frameCount = userSize / mFrameSize;
 
-        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
+        status_t err = obtainBuffer(&audioBuffer,
+                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
         if (err < 0) {
             if (written > 0) {
                 break;
@@ -1369,7 +1395,7 @@
 
 // -------------------------------------------------------------------------
 
-nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
+nsecs_t AudioTrack::processAudioBuffer()
 {
     // Currently the AudioTrack thread is not created if there are no callbacks.
     // Would it ever make sense to run the thread, even without callbacks?
@@ -1407,7 +1433,7 @@
         // for offloaded tracks restoreTrack_l() will just update the sequence and clear
         // AudioSystem cache. We should not exit here but after calling the callback so
         // that the upper layers can recreate the track
-        if (!isOffloaded() || (mSequence == mObservedSequence)) {
+        if (!isOffloaded_l() || (mSequence == mObservedSequence)) {
             status_t status = restoreTrack_l("processAudioBuffer");
             mLock.unlock();
             // Run again immediately, but with a new IAudioTrack
@@ -1462,7 +1488,7 @@
     // Cache other fields that will be needed soon
     uint32_t loopPeriod = mLoopPeriod;
     uint32_t sampleRate = mSampleRate;
-    size_t notificationFrames = mNotificationFramesAct;
+    uint32_t notificationFrames = mNotificationFramesAct;
     if (mRefreshRemaining) {
         mRefreshRemaining = false;
         mRemainingFrames = notificationFrames;
@@ -1626,7 +1652,6 @@
         size_t reqSize = audioBuffer.size;
         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
         size_t writtenSize = audioBuffer.size;
-        size_t writtenFrames = writtenSize / mFrameSize;
 
         // Sanity check on returned size
         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
@@ -1692,22 +1717,19 @@
 status_t AudioTrack::restoreTrack_l(const char *from)
 {
     ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
-          isOffloaded() ? "Offloaded" : "PCM", from);
+          isOffloaded_l() ? "Offloaded" : "PCM", from);
     ++mSequence;
     status_t result;
 
     // refresh the audio configuration cache in this process to make sure we get new
-    // output parameters in getOutput_l() and createTrack_l()
+    // output parameters in createTrack_l()
     AudioSystem::clearAudioConfigCache();
 
-    if (isOffloaded()) {
+    if (isOffloaded_l()) {
+        // FIXME re-creation of offloaded tracks is not yet implemented
         return DEAD_OBJECT;
     }
 
-    // force new output query from audio policy manager;
-    mOutput = 0;
-    audio_io_handle_t output = getOutput_l();
-
     // if the new IAudioTrack is created, createTrack_l() will modify the
     // following member variables: mAudioTrack, mCblkMemory and mCblk.
     // It will also delete the strong references on previous IAudioTrack and IMemory
@@ -1715,14 +1737,7 @@
     // take the frames that will be lost by track recreation into account in saved position
     size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
     size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
-    result = createTrack_l(mStreamType,
-                           mSampleRate,
-                           mFormat,
-                           mReqFrameCount,  // so that frame count never goes down
-                           mFlags,
-                           mSharedBuffer,
-                           output,
-                           position /*epoch*/);
+    result = createTrack_l(position /*epoch*/);
 
     if (result == NO_ERROR) {
         // continue playback from last known position, but
@@ -1750,10 +1765,6 @@
         }
     }
     if (result != NO_ERROR) {
-        //Use of direct and offloaded output streams is ref counted by audio policy manager.
-        // As getOutput was called above and resulted in an output stream to be opened,
-        // we need to release it.
-        AudioSystem::releaseOutput(output);
         ALOGW("restoreTrack_l() failed status %d", result);
         mState = STATE_STOPPED;
     }
@@ -1786,14 +1797,21 @@
 
 String8 AudioTrack::getParameters(const String8& keys)
 {
-    if (mOutput) {
-        return AudioSystem::getParameters(mOutput, keys);
+    audio_io_handle_t output = getOutput();
+    if (output != AUDIO_IO_HANDLE_NONE) {
+        return AudioSystem::getParameters(output, keys);
     } else {
         return String8::empty();
     }
 }
 
-status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
+bool AudioTrack::isOffloaded() const
+{
+    AutoMutex lock(mLock);
+    return isOffloaded_l();
+}
+
+status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
 {
 
     const size_t SIZE = 256;
@@ -1802,7 +1820,7 @@
 
     result.append(" AudioTrack::dump\n");
     snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
-            mVolume[0], mVolume[1]);
+            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
     result.append(buffer);
     snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
             mChannelCount, mFrameCount);
@@ -1823,7 +1841,7 @@
 
 // =========================================================================
 
-void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
+void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
 {
     sp<AudioTrack> audioTrack = mAudioTrack.promote();
     if (audioTrack != 0) {
@@ -1867,7 +1885,7 @@
             return true;
         }
     }
-    nsecs_t ns = mReceiver.processAudioBuffer(this);
+    nsecs_t ns = mReceiver.processAudioBuffer();
     switch (ns) {
     case 0:
         return true;
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index e898109..58c9fc1 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -26,7 +26,7 @@
 namespace android {
 
 audio_track_cblk_t::audio_track_cblk_t()
-    : mServer(0), frameCount_(0), mFutex(0), mMinimum(0),
+    : mServer(0), mFutex(0), mMinimum(0),
     mVolumeLR(0x10001000), mSampleRate(0), mSendLevel(0), mFlags(0)
 {
     memset(&u, 0, sizeof(u));
@@ -200,7 +200,7 @@
             ts = &remaining;
             break;
         default:
-            LOG_FATAL("obtainBuffer() timeout=%d", timeout);
+            LOG_ALWAYS_FATAL("obtainBuffer() timeout=%d", timeout);
             ts = NULL;
             break;
         }
@@ -429,7 +429,7 @@
             ts = &remaining;
             break;
         default:
-            LOG_FATAL("waitStreamEndDone() timeout=%d", timeout);
+            LOG_ALWAYS_FATAL("waitStreamEndDone() timeout=%d", timeout);
             ts = NULL;
             break;
         }
@@ -470,7 +470,7 @@
 
 void StaticAudioTrackClientProxy::flush()
 {
-    LOG_FATAL("static flush");
+    LOG_ALWAYS_FATAL("static flush");
 }
 
 void StaticAudioTrackClientProxy::setLoop(size_t loopStart, size_t loopEnd, int loopCount)
@@ -771,7 +771,7 @@
     return (ssize_t) position;
 }
 
-status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
+status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush __unused)
 {
     if (mIsShutdown) {
         buffer->mFrameCount = 0;
@@ -854,7 +854,7 @@
     buffer->mNonContig = 0;
 }
 
-void StaticAudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount)
+void StaticAudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount __unused)
 {
     // Unlike AudioTrackServerProxy::tallyUnderrunFrames() used for streaming tracks,
     // we don't have a location to count underrun frames.  The underrun frame counter
diff --git a/media/libmedia/CharacterEncodingDetector.cpp b/media/libmedia/CharacterEncodingDetector.cpp
new file mode 100644
index 0000000..4992798
--- /dev/null
+++ b/media/libmedia/CharacterEncodingDetector.cpp
@@ -0,0 +1,441 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CharacterEncodingDector"
+#include <utils/Log.h>
+
+#include "CharacterEncodingDetector.h"
+#include "CharacterEncodingDetectorTables.h"
+
+#include "utils/Vector.h"
+#include "StringArray.h"
+
+#include "unicode/ucnv.h"
+#include "unicode/ucsdet.h"
+#include "unicode/ustring.h"
+
+namespace android {
+
+CharacterEncodingDetector::CharacterEncodingDetector() {
+
+    UErrorCode status = U_ZERO_ERROR;
+    mUtf8Conv = ucnv_open("UTF-8", &status);
+    if (U_FAILURE(status)) {
+        ALOGE("could not create UConverter for UTF-8");
+        mUtf8Conv = NULL;
+    }
+}
+
+CharacterEncodingDetector::~CharacterEncodingDetector() {
+    ucnv_close(mUtf8Conv);
+}
+
+void CharacterEncodingDetector::addTag(const char *name, const char *value) {
+    mNames.push_back(name);
+    mValues.push_back(value);
+}
+
+size_t CharacterEncodingDetector::size() {
+    return mNames.size();
+}
+
+status_t CharacterEncodingDetector::getTag(int index, const char **name, const char**value) {
+    if (index >= mNames.size()) {
+        return BAD_VALUE;
+    }
+
+    *name = mNames.getEntry(index);
+    *value = mValues.getEntry(index);
+    return OK;
+}
+
+static bool isPrintableAscii(const char *value, size_t len) {
+    for (size_t i = 0; i < len; i++) {
+        if ((value[i] & 0x80) || value[i] < 0x20 || value[i] == 0x7f) {
+            return false;
+        }
+    }
+    return true;
+}
+
+void CharacterEncodingDetector::detectAndConvert() {
+
+    int size = mNames.size();
+    ALOGV("%d tags before conversion", size);
+    for (int i = 0; i < size; i++) {
+        ALOGV("%s: %s", mNames.getEntry(i), mValues.getEntry(i));
+    }
+
+    if (size && mUtf8Conv) {
+
+        UErrorCode status = U_ZERO_ERROR;
+        UCharsetDetector *csd = ucsdet_open(&status);
+        const UCharsetMatch *ucm;
+
+        // try combined detection of artist/album/title etc.
+        char buf[1024];
+        buf[0] = 0;
+        int idx;
+        bool allprintable = true;
+        for (int i = 0; i < size; i++) {
+            const char *name = mNames.getEntry(i);
+            const char *value = mValues.getEntry(i);
+            if (!isPrintableAscii(value, strlen(value)) && (
+                        !strcmp(name, "artist") ||
+                        !strcmp(name, "albumartist") ||
+                        !strcmp(name, "composer") ||
+                        !strcmp(name, "genre") ||
+                        !strcmp(name, "album") ||
+                        !strcmp(name, "title"))) {
+                strlcat(buf, value, sizeof(buf));
+                // separate tags by space so ICU's ngram detector can do its job
+                strlcat(buf, " ", sizeof(buf));
+                allprintable = false;
+            }
+        }
+
+        const char *combinedenc = "UTF-8";
+        if (allprintable) {
+            // since 'buf' is empty, ICU would return a UTF-8 matcher with low confidence, so
+            // no need to even call it
+            ALOGV("all tags are printable, assuming ascii (%d)", strlen(buf));
+        } else {
+            ucsdet_setText(csd, buf, strlen(buf), &status);
+            int32_t matches;
+            const UCharsetMatch** ucma = ucsdet_detectAll(csd, &matches, &status);
+            bool goodmatch = true;
+            const UCharsetMatch* bestCombinedMatch = getPreferred(buf, strlen(buf),
+                    ucma, matches, &goodmatch);
+
+            if (!goodmatch && strlen(buf) < 20) {
+                ALOGV("not a good match, trying with more data");
+                // This string might be too short for ICU to do anything useful with.
+                // (real world example: "Björk" in ISO-8859-1 might be detected as GB18030, because
+                //  the ISO detector reports a confidence of 0, while the GB18030 detector reports
+                //  a confidence of 10 with no invalid characters)
+                // Append artist, album and title if they were previously omitted because they
+                // were printable ascii.
+                bool added = false;
+                for (int i = 0; i < size; i++) {
+                    const char *name = mNames.getEntry(i);
+                    const char *value = mValues.getEntry(i);
+                    if (isPrintableAscii(value, strlen(value)) && (
+                                !strcmp(name, "artist") ||
+                                !strcmp(name, "album") ||
+                                !strcmp(name, "title"))) {
+                        strlcat(buf, value, sizeof(buf));
+                        strlcat(buf, " ", sizeof(buf));
+                        added = true;
+                    }
+                }
+                if (added) {
+                    ucsdet_setText(csd, buf, strlen(buf), &status);
+                    ucma = ucsdet_detectAll(csd, &matches, &status);
+                    bestCombinedMatch = getPreferred(buf, strlen(buf),
+                            ucma, matches, &goodmatch);
+                    if (!goodmatch) {
+                        ALOGV("still not a good match after adding printable tags");
+                    }
+                } else {
+                    ALOGV("no printable tags to add");
+                }
+            }
+
+            if (bestCombinedMatch != NULL) {
+                combinedenc = ucsdet_getName(bestCombinedMatch, &status);
+            }
+        }
+
+        for (int i = 0; i < size; i++) {
+            const char *name = mNames.getEntry(i);
+            uint8_t* src = (uint8_t *)mValues.getEntry(i);
+            int len = strlen((char *)src);
+            uint8_t* dest = src;
+
+            ALOGV("@@@ checking %s", name);
+            const char *s = mValues.getEntry(i);
+            int32_t inputLength = strlen(s);
+            const char *enc;
+
+            if (!allprintable && (!strcmp(name, "artist") ||
+                    !strcmp(name, "albumartist") ||
+                    !strcmp(name, "composer") ||
+                    !strcmp(name, "genre") ||
+                    !strcmp(name, "album") ||
+                    !strcmp(name, "title"))) {
+                // use encoding determined from the combination of artist/album/title etc.
+                enc = combinedenc;
+            } else {
+                if (isPrintableAscii(s, inputLength)) {
+                    enc = "UTF-8";
+                    ALOGV("@@@@ %s is ascii", mNames.getEntry(i));
+                } else {
+                    ucsdet_setText(csd, s, inputLength, &status);
+                    ucm = ucsdet_detect(csd, &status);
+                    if (!ucm) {
+                        mValues.setEntry(i, "???");
+                        continue;
+                    }
+                    enc = ucsdet_getName(ucm, &status);
+                    ALOGV("@@@@ recognized charset: %s for %s confidence %d",
+                            enc, mNames.getEntry(i), ucsdet_getConfidence(ucm, &status));
+                }
+            }
+
+            if (strcmp(enc,"UTF-8") != 0) {
+                // only convert if the source encoding isn't already UTF-8
+                ALOGV("@@@ using converter %s for %s", enc, mNames.getEntry(i));
+                UConverter *conv = ucnv_open(enc, &status);
+                if (U_FAILURE(status)) {
+                    ALOGE("could not create UConverter for %s", enc);
+                    continue;
+                }
+
+                // convert from native encoding to UTF-8
+                const char* source = mValues.getEntry(i);
+                int targetLength = len * 3 + 1;
+                char* buffer = new char[targetLength];
+                // don't normally check for NULL, but in this case targetLength may be large
+                if (!buffer)
+                    break;
+                char* target = buffer;
+
+                ucnv_convertEx(mUtf8Conv, conv, &target, target + targetLength,
+                        &source, source + strlen(source),
+                        NULL, NULL, NULL, NULL, TRUE, TRUE, &status);
+
+                if (U_FAILURE(status)) {
+                    ALOGE("ucnv_convertEx failed: %d", status);
+                    mValues.setEntry(i, "???");
+                } else {
+                    // zero terminate
+                    *target = 0;
+                    mValues.setEntry(i, buffer);
+                }
+
+                delete[] buffer;
+
+                ucnv_close(conv);
+            }
+        }
+
+        for (int i = size - 1; i >= 0; --i) {
+            if (strlen(mValues.getEntry(i)) == 0) {
+                ALOGV("erasing %s because entry is empty", mNames.getEntry(i));
+                mNames.erase(i);
+                mValues.erase(i);
+            }
+        }
+
+        ucsdet_close(csd);
+    }
+}
+
+/*
+ * When ICU detects multiple encoding matches, apply additional heuristics to determine
+ * which one is the best match, since ICU can't always be trusted to make the right choice.
+ *
+ * What this method does is:
+ * - decode the input using each of the matches found
+ * - recalculate the starting confidence level for multibyte encodings using a different
+ *   algorithm and larger frequent character lists than ICU
+ * - devalue encoding where the conversion contains unlikely characters (symbols, reserved, etc)
+ * - pick the highest match
+ * - signal to the caller whether this match is considered good: confidence > 15, and confidence
+ *   delta with the next runner up > 15
+ */
+const UCharsetMatch *CharacterEncodingDetector::getPreferred(
+        const char *input, size_t len,
+        const UCharsetMatch** ucma, size_t nummatches,
+        bool *goodmatch) {
+
+    *goodmatch = false;
+    Vector<const UCharsetMatch*> matches;
+    UErrorCode status = U_ZERO_ERROR;
+
+    ALOGV("%d matches", nummatches);
+    for (size_t i = 0; i < nummatches; i++) {
+        const char *encname = ucsdet_getName(ucma[i], &status);
+        int confidence = ucsdet_getConfidence(ucma[i], &status);
+        ALOGV("%d: %s %d", i, encname, confidence);
+        matches.push_back(ucma[i]);
+    }
+
+    size_t num = matches.size();
+    if (num == 0) {
+        return NULL;
+    }
+    if (num == 1) {
+        int confidence = ucsdet_getConfidence(matches[0], &status);
+        if (confidence > 15) {
+            *goodmatch = true;
+        }
+        return matches[0];
+    }
+
+    ALOGV("considering %d matches", num);
+
+    // keep track of how many "special" characters result when converting the input using each
+    // encoding
+    Vector<int> newconfidence;
+    for (size_t i = 0; i < num; i++) {
+        const uint16_t *freqdata = NULL;
+        float freqcoverage = 0;
+        status = U_ZERO_ERROR;
+        const char *encname = ucsdet_getName(matches[i], &status);
+        int confidence = ucsdet_getConfidence(matches[i], &status);
+        if (!strcmp("GB18030", encname)) {
+            freqdata = frequent_zhCN;
+            freqcoverage = frequent_zhCN_coverage;
+        } else if (!strcmp("Big5", encname)) {
+            freqdata = frequent_zhTW;
+            freqcoverage = frequent_zhTW_coverage;
+        } else if (!strcmp("EUC-KR", encname)) {
+            freqdata = frequent_ko;
+            freqcoverage = frequent_ko_coverage;
+        } else if (!strcmp("EUC-JP", encname)) {
+            freqdata = frequent_ja;
+            freqcoverage = frequent_ja_coverage;
+        } else if (!strcmp("Shift_JIS", encname)) {
+            freqdata = frequent_ja;
+            freqcoverage = frequent_ja_coverage;
+        }
+
+        ALOGV("%d: %s %d", i, encname, confidence);
+        UConverter *conv = ucnv_open(encname, &status);
+        const char *source = input;
+        const char *sourceLimit = input + len;
+        status = U_ZERO_ERROR;
+        int demerit = 0;
+        int frequentchars = 0;
+        int totalchars = 0;
+        while (true) {
+            // demerit the current encoding for each "special" character found after conversion.
+            // The amount of demerit is somewhat arbitrarily chosen.
+            int inchar;
+            if (source != sourceLimit) {
+                inchar = (source[0] << 8) + source[1];
+            }
+            UChar32 c = ucnv_getNextUChar(conv, &source, sourceLimit, &status);
+            if (!U_SUCCESS(status)) {
+                break;
+            }
+            if (c < 0x20 || (c >= 0x7f && c <= 0x009f)) {
+                ALOGV("control character %x", c);
+                demerit += 100;
+            } else if ((c >= 0xa0 && c <= 0xbe)         // symbols, superscripts
+                    || (c == 0xd7) || (c == 0xf7)       // multiplication and division signs
+                    || (c >= 0x2000 && c <= 0x209f)) {  // punctuation, superscripts
+                ALOGV("unlikely character %x", c);
+                demerit += 10;
+            } else if (c >= 0xe000 && c <= 0xf8ff) {
+                ALOGV("private use character %x", c);
+                demerit += 30;
+            } else if (c >= 0x2190 && c <= 0x2bff) {
+                // this range comprises various symbol ranges that are unlikely to appear in
+                // music file metadata.
+                ALOGV("symbol %x", c);
+                demerit += 10;
+            } else if (c == 0xfffd) {
+                ALOGV("replacement character");
+                demerit += 50;
+            } else if (c >= 0xfff0 && c <= 0xfffc) {
+                ALOGV("unicode special %x", c);
+                demerit += 50;
+            } else if (freqdata != NULL) {
+                totalchars++;
+                if (isFrequent(freqdata, c)) {
+                    frequentchars++;
+                }
+            }
+        }
+        if (freqdata != NULL && totalchars != 0) {
+            int myconfidence = 10 + float((100 * frequentchars) / totalchars) / freqcoverage;
+            ALOGV("ICU confidence: %d, my confidence: %d (%d %d)", confidence, myconfidence,
+                    totalchars, frequentchars);
+            if (myconfidence > 100) myconfidence = 100;
+            if (myconfidence < 0) myconfidence = 0;
+            confidence = myconfidence;
+        }
+        ALOGV("%d-%d=%d", confidence, demerit, confidence - demerit);
+        newconfidence.push_back(confidence - demerit);
+        ucnv_close(conv);
+        if (i == 0 && (confidence - demerit) == 100) {
+            // no need to check any further, we'll end up using this match anyway
+            break;
+        }
+    }
+
+    // find match with highest confidence after adjusting for unlikely characters
+    int highest = newconfidence[0];
+    size_t highestidx = 0;
+    int runnerup = -10000;
+    int runnerupidx = -10000;
+    num = newconfidence.size();
+    for (size_t i = 1; i < num; i++) {
+        if (newconfidence[i] > highest) {
+            runnerup = highest;
+            runnerupidx = highestidx;
+            highest = newconfidence[i];
+            highestidx = i;
+        } else if (newconfidence[i] > runnerup){
+            runnerup = newconfidence[i];
+            runnerupidx = i;
+        }
+    }
+    status = U_ZERO_ERROR;
+    ALOGV("selecting: '%s' w/ %d confidence",
+            ucsdet_getName(matches[highestidx], &status), highest);
+    if (runnerupidx < 0) {
+        ALOGV("no runner up");
+        if (highest > 15) {
+            *goodmatch = true;
+        }
+    } else {
+        ALOGV("runner up: '%s' w/ %d confidence",
+                ucsdet_getName(matches[runnerupidx], &status), runnerup);
+        if ((highest - runnerup) > 15) {
+            *goodmatch = true;
+        }
+    }
+    return matches[highestidx];
+}
+
+
+bool CharacterEncodingDetector::isFrequent(const uint16_t *values, uint32_t c) {
+
+    int start = 0;
+    int end = 511; // All the tables have 512 entries
+    int mid = (start+end)/2;
+
+    while(start <= end) {
+        if(c == values[mid]) {
+            return true;
+        } else if (c > values[mid]) {
+            start = mid + 1;
+        } else {
+            end = mid - 1;
+        }
+
+        mid = (start + end) / 2;
+    }
+
+    return false;
+}
+
+
+}  // namespace android
diff --git a/media/libmedia/CharacterEncodingDetector.h b/media/libmedia/CharacterEncodingDetector.h
new file mode 100644
index 0000000..7b5ed86
--- /dev/null
+++ b/media/libmedia/CharacterEncodingDetector.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _CHARACTER_ENCODING_DETECTOR_H
+#define _CHARACTER_ENCODING_DETECTOR_H
+
+#include <media/mediascanner.h>
+
+#include "StringArray.h"
+
+#include "unicode/ucnv.h"
+#include "unicode/ucsdet.h"
+#include "unicode/ustring.h"
+
+namespace android {
+
+class CharacterEncodingDetector {
+
+    public:
+    CharacterEncodingDetector();
+        ~CharacterEncodingDetector();
+
+        void addTag(const char *name, const char *value);
+        size_t size();
+
+        void detectAndConvert();
+        status_t getTag(int index, const char **name, const char**value);
+
+    private:
+        const UCharsetMatch *getPreferred(
+                const char *input, size_t len,
+                const UCharsetMatch** ucma, size_t matches,
+                bool *goodmatch);
+
+        bool isFrequent(const uint16_t *values, uint32_t c);
+
+        // cached name and value strings, for native encoding support.
+        // TODO: replace these with byte blob arrays that don't require the data to be
+        // singlenullbyte-terminated
+        StringArray     mNames;
+        StringArray     mValues;
+
+        UConverter*     mUtf8Conv;
+};
+
+
+
+};  // namespace android
+
+#endif
diff --git a/media/libmedia/CharacterEncodingDetectorTables.h b/media/libmedia/CharacterEncodingDetectorTables.h
new file mode 100644
index 0000000..1fe1137
--- /dev/null
+++ b/media/libmedia/CharacterEncodingDetectorTables.h
@@ -0,0 +1,2092 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// The 512 most frequently occuring characters for the zhCN language in a sample of the Internet.
+// Ordered by codepoint, comment shows character and ranking by frequency
+const uint16_t frequent_zhCN[] = {
+    0x4E00, // 一, #2
+    0x4E07, // 万, #306
+    0x4E09, // 三, #138
+    0x4E0A, // 上, #16
+    0x4E0B, // 下, #25
+    0x4E0D, // 不, #7
+    0x4E0E, // 与, #133
+    0x4E13, // 专, #151
+    0x4E16, // 世, #346
+    0x4E1A, // 业, #39
+    0x4E1C, // 东, #197
+    0x4E24, // 两, #376
+    0x4E2A, // 个, #23
+    0x4E2D, // 中, #4
+    0x4E3A, // 为, #31
+    0x4E3B, // 主, #95
+    0x4E3E, // 举, #418
+    0x4E48, // 么, #93
+    0x4E4B, // 之, #131
+    0x4E50, // 乐, #130
+    0x4E5F, // 也, #145
+    0x4E66, // 书, #283
+    0x4E70, // 买, #483
+    0x4E86, // 了, #13
+    0x4E8B, // 事, #168
+    0x4E8C, // 二, #218
+    0x4E8E, // 于, #64
+    0x4E94, // 五, #430
+    0x4E9A, // 亚, #468
+    0x4E9B, // 些, #366
+    0x4EA4, // 交, #243
+    0x4EA7, // 产, #86
+    0x4EAB, // 享, #345
+    0x4EAC, // 京, #206
+    0x4EBA, // 人, #3
+    0x4EC0, // 什, #287
+    0x4ECB, // 介, #478
+    0x4ECE, // 从, #381
+    0x4ED6, // 他, #129
+    0x4EE3, // 代, #241
+    0x4EE5, // 以, #51
+    0x4EEC, // 们, #83
+    0x4EF6, // 件, #141
+    0x4EF7, // 价, #140
+    0x4EFB, // 任, #383
+    0x4F01, // 企, #439
+    0x4F18, // 优, #374
+    0x4F1A, // 会, #29
+    0x4F20, // 传, #222
+    0x4F46, // 但, #451
+    0x4F4D, // 位, #208
+    0x4F53, // 体, #98
+    0x4F55, // 何, #339
+    0x4F5C, // 作, #44
+    0x4F60, // 你, #76
+    0x4F7F, // 使, #272
+    0x4F9B, // 供, #375
+    0x4FDD, // 保, #180
+    0x4FE1, // 信, #84
+    0x4FEE, // 修, #437
+    0x503C, // 值, #450
+    0x505A, // 做, #368
+    0x5065, // 健, #484
+    0x50CF, // 像, #487
+    0x513F, // 儿, #326
+    0x5143, // 元, #202
+    0x5148, // 先, #485
+    0x5149, // 光, #254
+    0x514B, // 克, #503
+    0x514D, // 免, #349
+    0x5165, // 入, #156
+    0x5168, // 全, #47
+    0x516C, // 公, #35
+    0x5171, // 共, #448
+    0x5173, // 关, #49
+    0x5176, // 其, #195
+    0x5177, // 具, #329
+    0x5185, // 内, #109
+    0x518C, // 册, #225
+    0x519B, // 军, #466
+    0x51FA, // 出, #53
+    0x51FB, // 击, #359
+    0x5206, // 分, #22
+    0x5217, // 列, #410
+    0x521B, // 创, #399
+    0x5229, // 利, #296
+    0x522B, // 别, #372
+    0x5230, // 到, #33
+    0x5236, // 制, #192
+    0x524D, // 前, #117
+    0x529B, // 力, #173
+    0x529E, // 办, #436
+    0x529F, // 功, #455
+    0x52A0, // 加, #97
+    0x52A1, // 务, #100
+    0x52A8, // 动, #46
+    0x52A9, // 助, #365
+    0x5305, // 包, #331
+    0x5316, // 化, #155
+    0x5317, // 北, #194
+    0x533A, // 区, #105
+    0x533B, // 医, #234
+    0x5341, // 十, #294
+    0x534E, // 华, #205
+    0x5355, // 单, #259
+    0x5357, // 南, #182
+    0x535A, // 博, #153
+    0x5361, // 卡, #332
+    0x539F, // 原, #271
+    0x53BB, // 去, #282
+    0x53C2, // 参, #500
+    0x53CA, // 及, #255
+    0x53CB, // 友, #186
+    0x53CD, // 反, #422
+    0x53D1, // 发, #15
+    0x53D7, // 受, #507
+    0x53D8, // 变, #395
+    0x53E3, // 口, #293
+    0x53EA, // 只, #340
+    0x53EF, // 可, #45
+    0x53F0, // 台, #267
+    0x53F7, // 号, #121
+    0x53F8, // 司, #150
+    0x5404, // 各, #491
+    0x5408, // 合, #115
+    0x540C, // 同, #189
+    0x540D, // 名, #127
+    0x540E, // 后, #75
+    0x5411, // 向, #459
+    0x5427, // 吧, #353
+    0x544A, // 告, #318
+    0x5458, // 员, #232
+    0x5468, // 周, #347
+    0x548C, // 和, #43
+    0x54C1, // 品, #36
+    0x5546, // 商, #148
+    0x5668, // 器, #228
+    0x56DB, // 四, #352
+    0x56DE, // 回, #38
+    0x56E0, // 因, #355
+    0x56E2, // 团, #412
+    0x56ED, // 园, #470
+    0x56FD, // 国, #12
+    0x56FE, // 图, #32
+    0x5728, // 在, #10
+    0x5730, // 地, #30
+    0x573A, // 场, #177
+    0x575B, // 坛, #364
+    0x578B, // 型, #274
+    0x57CE, // 城, #172
+    0x57FA, // 基, #315
+    0x58EB, // 士, #434
+    0x58F0, // 声, #397
+    0x5904, // 处, #416
+    0x5907, // 备, #270
+    0x590D, // 复, #122
+    0x5916, // 外, #190
+    0x591A, // 多, #40
+    0x5927, // 大, #8
+    0x5929, // 天, #52
+    0x592A, // 太, #456
+    0x5934, // 头, #258
+    0x5973, // 女, #65
+    0x597D, // 好, #62
+    0x5982, // 如, #135
+    0x5A31, // 娱, #452
+    0x5B50, // 子, #37
+    0x5B57, // 字, #285
+    0x5B66, // 学, #19
+    0x5B89, // 安, #144
+    0x5B8C, // 完, #469
+    0x5B9A, // 定, #179
+    0x5B9D, // 宝, #188
+    0x5B9E, // 实, #154
+    0x5BA2, // 客, #174
+    0x5BB6, // 家, #26
+    0x5BB9, // 容, #307
+    0x5BC6, // 密, #471
+    0x5BF9, // 对, #90
+    0x5BFC, // 导, #348
+    0x5C06, // 将, #265
+    0x5C0F, // 小, #28
+    0x5C11, // 少, #379
+    0x5C14, // 尔, #490
+    0x5C31, // 就, #101
+    0x5C55, // 展, #291
+    0x5C71, // 山, #239
+    0x5DDE, // 州, #227
+    0x5DE5, // 工, #73
+    0x5DF1, // 己, #480
+    0x5DF2, // 已, #310
+    0x5E02, // 市, #78
+    0x5E03, // 布, #350
+    0x5E08, // 师, #277
+    0x5E16, // 帖, #396
+    0x5E26, // 带, #449
+    0x5E2E, // 帮, #461
+    0x5E38, // 常, #319
+    0x5E73, // 平, #217
+    0x5E74, // 年, #20
+    0x5E76, // 并, #440
+    0x5E7F, // 广, #166
+    0x5E93, // 库, #446
+    0x5E94, // 应, #187
+    0x5E97, // 店, #320
+    0x5EA6, // 度, #114
+    0x5EB7, // 康, #499
+    0x5EFA, // 建, #211
+    0x5F00, // 开, #72
+    0x5F0F, // 式, #207
+    0x5F15, // 引, #495
+    0x5F20, // 张, #385
+    0x5F3A, // 强, #404
+    0x5F53, // 当, #233
+    0x5F55, // 录, #146
+    0x5F62, // 形, #494
+    0x5F69, // 彩, #356
+    0x5F71, // 影, #214
+    0x5F88, // 很, #300
+    0x5F97, // 得, #193
+    0x5FAE, // 微, #245
+    0x5FC3, // 心, #70
+    0x5FEB, // 快, #324
+    0x6001, // 态, #508
+    0x600E, // 怎, #370
+    0x6027, // 性, #99
+    0x603B, // 总, #398
+    0x606F, // 息, #176
+    0x60A8, // 您, #251
+    0x60C5, // 情, #87
+    0x60F3, // 想, #290
+    0x610F, // 意, #184
+    0x611F, // 感, #253
+    0x620F, // 戏, #237
+    0x6210, // 成, #71
+    0x6211, // 我, #11
+    0x6216, // 或, #321
+    0x6218, // 战, #369
+    0x6237, // 户, #215
+    0x623F, // 房, #236
+    0x6240, // 所, #147
+    0x624B, // 手, #55
+    0x624D, // 才, #407
+    0x6253, // 打, #281
+    0x6280, // 技, #203
+    0x6295, // 投, #408
+    0x62A4, // 护, #502
+    0x62A5, // 报, #113
+    0x62DB, // 招, #363
+    0x6301, // 持, #403
+    0x6307, // 指, #414
+    0x636E, // 据, #409
+    0x6392, // 排, #377
+    0x63A5, // 接, #266
+    0x63A8, // 推, #244
+    0x63D0, // 提, #181
+    0x641C, // 搜, #301
+    0x64AD, // 播, #401
+    0x652F, // 支, #400
+    0x6536, // 收, #158
+    0x653E, // 放, #317
+    0x653F, // 政, #380
+    0x6548, // 效, #496
+    0x6559, // 教, #170
+    0x6570, // 数, #136
+    0x6587, // 文, #21
+    0x6599, // 料, #295
+    0x65AF, // 斯, #473
+    0x65B0, // 新, #14
+    0x65B9, // 方, #68
+    0x65C5, // 旅, #457
+    0x65E0, // 无, #164
+    0x65E5, // 日, #50
+    0x65F6, // 时, #18
+    0x660E, // 明, #132
+    0x6613, // 易, #428
+    0x661F, // 星, #240
+    0x662F, // 是, #6
+    0x663E, // 显, #486
+    0x66F4, // 更, #103
+    0x6700, // 最, #61
+    0x6708, // 月, #80
+    0x6709, // 有, #5
+    0x670D, // 服, #94
+    0x671F, // 期, #139
+    0x672C, // 本, #56
+    0x672F, // 术, #216
+    0x673A, // 机, #27
+    0x6743, // 权, #250
+    0x6761, // 条, #309
+    0x6765, // 来, #42
+    0x677F, // 板, #505
+    0x6797, // 林, #475
+    0x679C, // 果, #212
+    0x67E5, // 查, #165
+    0x6807, // 标, #269
+    0x6821, // 校, #462
+    0x6837, // 样, #314
+    0x683C, // 格, #238
+    0x6848, // 案, #378
+    0x697C, // 楼, #342
+    0x6A21, // 模, #413
+    0x6B21, // 次, #263
+    0x6B22, // 欢, #443
+    0x6B3E, // 款, #358
+    0x6B63, // 正, #219
+    0x6B64, // 此, #362
+    0x6BD4, // 比, #298
+    0x6C11, // 民, #279
+    0x6C14, // 气, #303
+    0x6C34, // 水, #163
+    0x6C42, // 求, #373
+    0x6C5F, // 江, #336
+    0x6CA1, // 没, #229
+    0x6CBB, // 治, #425
+    0x6CD5, // 法, #85
+    0x6CE8, // 注, #119
+    0x6D3B, // 活, #231
+    0x6D41, // 流, #280
+    0x6D4B, // 测, #460
+    0x6D77, // 海, #124
+    0x6D88, // 消, #415
+    0x6DF1, // 深, #477
+    0x6E05, // 清, #311
+    0x6E38, // 游, #81
+    0x6E90, // 源, #325
+    0x706B, // 火, #498
+    0x70B9, // 点, #58
+    0x70ED, // 热, #183
+    0x7136, // 然, #308
+    0x7167, // 照, #431
+    0x7231, // 爱, #223
+    0x7247, // 片, #128
+    0x7248, // 版, #91
+    0x724C, // 牌, #429
+    0x7269, // 物, #169
+    0x7279, // 特, #224
+    0x738B, // 王, #351
+    0x73A9, // 玩, #476
+    0x73B0, // 现, #125
+    0x7403, // 球, #367
+    0x7406, // 理, #69
+    0x751F, // 生, #24
+    0x7528, // 用, #17
+    0x7531, // 由, #441
+    0x7535, // 电, #34
+    0x7537, // 男, #275
+    0x754C, // 界, #419
+    0x75C5, // 病, #371
+    0x767B, // 登, #204
+    0x767D, // 白, #338
+    0x767E, // 百, #157
+    0x7684, // 的, #1
+    0x76D8, // 盘, #493
+    0x76EE, // 目, #261
+    0x76F4, // 直, #391
+    0x76F8, // 相, #143
+    0x7701, // 省, #464
+    0x770B, // 看, #54
+    0x771F, // 真, #249
+    0x7740, // 着, #302
+    0x77E5, // 知, #142
+    0x7801, // 码, #257
+    0x7814, // 研, #387
+    0x793A, // 示, #334
+    0x793E, // 社, #343
+    0x795E, // 神, #330
+    0x798F, // 福, #509
+    0x79BB, // 离, #454
+    0x79CD, // 种, #278
+    0x79D1, // 科, #126
+    0x79EF, // 积, #390
+    0x7A0B, // 程, #209
+    0x7A76, // 究, #504
+    0x7A7A, // 空, #312
+    0x7ACB, // 立, #393
+    0x7AD9, // 站, #107
+    0x7AE0, // 章, #304
+    0x7B2C, // 第, #96
+    0x7B49, // 等, #210
+    0x7B54, // 答, #256
+    0x7B80, // 简, #474
+    0x7BA1, // 管, #221
+    0x7C7B, // 类, #246
+    0x7CBE, // 精, #226
+    0x7CFB, // 系, #89
+    0x7D22, // 索, #354
+    0x7EA2, // 红, #417
+    0x7EA7, // 级, #178
+    0x7EBF, // 线, #108
+    0x7EC4, // 组, #389
+    0x7EC6, // 细, #442
+    0x7ECF, // 经, #74
+    0x7ED3, // 结, #333
+    0x7ED9, // 给, #384
+    0x7EDC, // 络, #472
+    0x7EDF, // 统, #344
+    0x7F16, // 编, #424
+    0x7F51, // 网, #9
+    0x7F6E, // 置, #411
+    0x7F8E, // 美, #60
+    0x8001, // 老, #292
+    0x8003, // 考, #288
+    0x8005, // 者, #106
+    0x800C, // 而, #297
+    0x8054, // 联, #159
+    0x80B2, // 育, #327
+    0x80FD, // 能, #59
+    0x81EA, // 自, #77
+    0x8272, // 色, #198
+    0x8282, // 节, #361
+    0x82B1, // 花, #299
+    0x82F1, // 英, #316
+    0x8350, // 荐, #402
+    0x836F, // 药, #481
+    0x8425, // 营, #394
+    0x85CF, // 藏, #337
+    0x884C, // 行, #41
+    0x8868, // 表, #104
+    0x88AB, // 被, #289
+    0x88C5, // 装, #161
+    0x897F, // 西, #199
+    0x8981, // 要, #48
+    0x89C1, // 见, #360
+    0x89C2, // 观, #423
+    0x89C4, // 规, #453
+    0x89C6, // 视, #120
+    0x89E3, // 解, #264
+    0x8A00, // 言, #433
+    0x8BA1, // 计, #191
+    0x8BA4, // 认, #482
+    0x8BA9, // 让, #421
+    0x8BAE, // 议, #427
+    0x8BAF, // 讯, #388
+    0x8BB0, // 记, #273
+    0x8BBA, // 论, #66
+    0x8BBE, // 设, #162
+    0x8BC1, // 证, #201
+    0x8BC4, // 评, #111
+    0x8BC6, // 识, #463
+    0x8BD5, // 试, #323
+    0x8BDD, // 话, #247
+    0x8BE2, // 询, #432
+    0x8BE5, // 该, #447
+    0x8BE6, // 详, #497
+    0x8BED, // 语, #268
+    0x8BF4, // 说, #112
+    0x8BF7, // 请, #213
+    0x8BFB, // 读, #341
+    0x8C03, // 调, #438
+    0x8D22, // 财, #488
+    0x8D28, // 质, #386
+    0x8D2D, // 购, #260
+    0x8D34, // 贴, #510
+    0x8D39, // 费, #242
+    0x8D44, // 资, #116
+    0x8D77, // 起, #220
+    0x8D85, // 超, #406
+    0x8DEF, // 路, #235
+    0x8EAB, // 身, #262
+    0x8F66, // 车, #82
+    0x8F6C, // 转, #322
+    0x8F7D, // 载, #175
+    0x8FBE, // 达, #435
+    0x8FC7, // 过, #118
+    0x8FD0, // 运, #357
+    0x8FD1, // 近, #492
+    0x8FD8, // 还, #171
+    0x8FD9, // 这, #57
+    0x8FDB, // 进, #160
+    0x8FDE, // 连, #489
+    0x9009, // 选, #328
+    0x901A, // 通, #137
+    0x901F, // 速, #458
+    0x9020, // 造, #511
+    0x9053, // 道, #79
+    0x90A3, // 那, #305
+    0x90E8, // 部, #102
+    0x90FD, // 都, #167
+    0x914D, // 配, #479
+    0x9152, // 酒, #444
+    0x91CC, // 里, #196
+    0x91CD, // 重, #230
+    0x91CF, // 量, #248
+    0x91D1, // 金, #134
+    0x9500, // 销, #465
+    0x957F, // 长, #152
+    0x95E8, // 门, #185
+    0x95EE, // 问, #92
+    0x95F4, // 间, #88
+    0x95FB, // 闻, #313
+    0x9605, // 阅, #467
+    0x9633, // 阳, #420
+    0x9645, // 际, #501
+    0x9650, // 限, #286
+    0x9662, // 院, #276
+    0x96C6, // 集, #284
+    0x9700, // 需, #405
+    0x9762, // 面, #123
+    0x97F3, // 音, #335
+    0x9875, // 页, #63
+    0x9879, // 项, #506
+    0x9891, // 频, #200
+    0x9898, // 题, #110
+    0x98CE, // 风, #252
+    0x98DF, // 食, #445
+    0x9996, // 首, #149
+    0x9999, // 香, #512
+    0x9A6C, // 马, #392
+    0x9A8C, // 验, #382
+    0x9AD8, // 高, #67
+    0x9F99, // 龙, #426
+};
+// the percentage of the sample covered by the above characters
+static const float frequent_zhCN_coverage=0.718950369339973;
+
+// The 512 most frequently occuring characters for the zhTW language in a sample of the Internet.
+// Ordered by codepoint, comment shows character and ranking by frequency
+const uint16_t frequent_zhTW[] = {
+    0x4E00, // 一, #2
+    0x4E09, // 三, #131
+    0x4E0A, // 上, #12
+    0x4E0B, // 下, #37
+    0x4E0D, // 不, #6
+    0x4E16, // 世, #312
+    0x4E26, // 並, #434
+    0x4E2D, // 中, #9
+    0x4E3B, // 主, #97
+    0x4E4B, // 之, #55
+    0x4E5F, // 也, #95
+    0x4E86, // 了, #19
+    0x4E8B, // 事, #128
+    0x4E8C, // 二, #187
+    0x4E94, // 五, #339
+    0x4E9B, // 些, #435
+    0x4E9E, // 亞, #432
+    0x4EA4, // 交, #264
+    0x4EAB, // 享, #160
+    0x4EBA, // 人, #3
+    0x4EC0, // 什, #483
+    0x4ECA, // 今, #380
+    0x4ECB, // 介, #468
+    0x4ED6, // 他, #65
+    0x4EE3, // 代, #284
+    0x4EE5, // 以, #26
+    0x4EF6, // 件, #234
+    0x4EFB, // 任, #381
+    0x4EFD, // 份, #447
+    0x4F46, // 但, #281
+    0x4F4D, // 位, #202
+    0x4F4F, // 住, #471
+    0x4F55, // 何, #334
+    0x4F5C, // 作, #56
+    0x4F60, // 你, #64
+    0x4F7F, // 使, #236
+    0x4F86, // 來, #38
+    0x4F9B, // 供, #397
+    0x4FBF, // 便, #440
+    0x4FC2, // 係, #506
+    0x4FDD, // 保, #161
+    0x4FE1, // 信, #268
+    0x4FEE, // 修, #473
+    0x500B, // 個, #27
+    0x5011, // 們, #109
+    0x505A, // 做, #383
+    0x5065, // 健, #415
+    0x5099, // 備, #461
+    0x50B3, // 傳, #277
+    0x50CF, // 像, #403
+    0x50F9, // 價, #93
+    0x512A, // 優, #396
+    0x5143, // 元, #158
+    0x5148, // 先, #382
+    0x5149, // 光, #216
+    0x514D, // 免, #321
+    0x5152, // 兒, #374
+    0x5165, // 入, #58
+    0x5167, // 內, #106
+    0x5168, // 全, #67
+    0x5169, // 兩, #322
+    0x516C, // 公, #53
+    0x516D, // 六, #493
+    0x5171, // 共, #456
+    0x5176, // 其, #148
+    0x5177, // 具, #328
+    0x518A, // 冊, #360
+    0x518D, // 再, #311
+    0x51FA, // 出, #44
+    0x5206, // 分, #15
+    0x5217, // 列, #259
+    0x5225, // 別, #361
+    0x5229, // 利, #251
+    0x5230, // 到, #29
+    0x5247, // 則, #511
+    0x524D, // 前, #82
+    0x5275, // 創, #409
+    0x529B, // 力, #176
+    0x529F, // 功, #430
+    0x52A0, // 加, #87
+    0x52A9, // 助, #465
+    0x52D5, // 動, #48
+    0x52D9, // 務, #102
+    0x5305, // 包, #248
+    0x5316, // 化, #223
+    0x5317, // 北, #145
+    0x5340, // 區, #60
+    0x5341, // 十, #242
+    0x5357, // 南, #261
+    0x535A, // 博, #484
+    0x5361, // 卡, #327
+    0x5370, // 印, #498
+    0x5373, // 即, #351
+    0x539F, // 原, #237
+    0x53BB, // 去, #190
+    0x53C3, // 參, #444
+    0x53C8, // 又, #426
+    0x53CA, // 及, #136
+    0x53CB, // 友, #142
+    0x53D6, // 取, #422
+    0x53D7, // 受, #410
+    0x53E3, // 口, #357
+    0x53EA, // 只, #250
+    0x53EF, // 可, #35
+    0x53F0, // 台, #34
+    0x53F8, // 司, #226
+    0x5403, // 吃, #362
+    0x5404, // 各, #454
+    0x5408, // 合, #147
+    0x540C, // 同, #173
+    0x540D, // 名, #108
+    0x544A, // 告, #186
+    0x548C, // 和, #130
+    0x54C1, // 品, #23
+    0x54E1, // 員, #150
+    0x5546, // 商, #75
+    0x554F, // 問, #120
+    0x559C, // 喜, #502
+    0x55AE, // 單, #210
+    0x55CE, // 嗎, #443
+    0x5668, // 器, #305
+    0x56DB, // 四, #318
+    0x56DE, // 回, #59
+    0x56E0, // 因, #253
+    0x570B, // 國, #21
+    0x5712, // 園, #345
+    0x5716, // 圖, #73
+    0x5718, // 團, #338
+    0x5728, // 在, #11
+    0x5730, // 地, #50
+    0x578B, // 型, #270
+    0x57CE, // 城, #466
+    0x57FA, // 基, #349
+    0x5831, // 報, #127
+    0x5834, // 場, #165
+    0x58EB, // 士, #372
+    0x5916, // 外, #152
+    0x591A, // 多, #54
+    0x5927, // 大, #8
+    0x5929, // 天, #43
+    0x592A, // 太, #343
+    0x5947, // 奇, #325
+    0x5973, // 女, #85
+    0x5979, // 她, #420
+    0x597D, // 好, #22
+    0x5982, // 如, #144
+    0x5B50, // 子, #46
+    0x5B57, // 字, #275
+    0x5B78, // 學, #49
+    0x5B89, // 安, #239
+    0x5B8C, // 完, #320
+    0x5B9A, // 定, #159
+    0x5BA2, // 客, #188
+    0x5BB6, // 家, #31
+    0x5BB9, // 容, #244
+    0x5BE6, // 實, #198
+    0x5BF6, // 寶, #367
+    0x5C07, // 將, #232
+    0x5C08, // 專, #133
+    0x5C0B, // 尋, #352
+    0x5C0D, // 對, #126
+    0x5C0E, // 導, #418
+    0x5C0F, // 小, #20
+    0x5C11, // 少, #368
+    0x5C31, // 就, #63
+    0x5C55, // 展, #341
+    0x5C71, // 山, #273
+    0x5DE5, // 工, #121
+    0x5DF1, // 己, #402
+    0x5DF2, // 已, #299
+    0x5E02, // 市, #81
+    0x5E2B, // 師, #262
+    0x5E36, // 帶, #470
+    0x5E38, // 常, #303
+    0x5E73, // 平, #297
+    0x5E74, // 年, #30
+    0x5E97, // 店, #171
+    0x5EA6, // 度, #220
+    0x5EB7, // 康, #441
+    0x5EE3, // 廣, #279
+    0x5EFA, // 建, #254
+    0x5F0F, // 式, #155
+    0x5F15, // 引, #346
+    0x5F35, // 張, #366
+    0x5F37, // 強, #437
+    0x5F71, // 影, #94
+    0x5F88, // 很, #177
+    0x5F8C, // 後, #66
+    0x5F97, // 得, #113
+    0x5F9E, // 從, #436
+    0x5FC3, // 心, #57
+    0x5FEB, // 快, #292
+    0x6027, // 性, #175
+    0x606F, // 息, #378
+    0x60A8, // 您, #252
+    0x60C5, // 情, #123
+    0x60F3, // 想, #178
+    0x610F, // 意, #168
+    0x611B, // 愛, #125
+    0x611F, // 感, #211
+    0x61C9, // 應, #164
+    0x6210, // 成, #86
+    0x6211, // 我, #7
+    0x6216, // 或, #199
+    0x6230, // 戰, #438
+    0x6232, // 戲, #309
+    0x6236, // 戶, #497
+    0x623F, // 房, #274
+    0x6240, // 所, #79
+    0x624B, // 手, #68
+    0x624D, // 才, #400
+    0x6253, // 打, #278
+    0x627E, // 找, #449
+    0x6280, // 技, #332
+    0x6295, // 投, #425
+    0x62C9, // 拉, #500
+    0x62CD, // 拍, #398
+    0x6307, // 指, #407
+    0x6392, // 排, #458
+    0x63A5, // 接, #326
+    0x63A8, // 推, #153
+    0x63D0, // 提, #235
+    0x641C, // 搜, #314
+    0x6469, // 摩, #472
+    0x6536, // 收, #249
+    0x6539, // 改, #508
+    0x653E, // 放, #331
+    0x653F, // 政, #295
+    0x6559, // 教, #184
+    0x6574, // 整, #394
+    0x6578, // 數, #134
+    0x6587, // 文, #16
+    0x6599, // 料, #167
+    0x65AF, // 斯, #476
+    0x65B0, // 新, #10
+    0x65B9, // 方, #96
+    0x65BC, // 於, #70
+    0x65C5, // 旅, #289
+    0x65E5, // 日, #18
+    0x660E, // 明, #118
+    0x6613, // 易, #482
+    0x661F, // 星, #205
+    0x662F, // 是, #5
+    0x6642, // 時, #13
+    0x66F4, // 更, #149
+    0x66F8, // 書, #209
+    0x6700, // 最, #51
+    0x6703, // 會, #14
+    0x6708, // 月, #25
+    0x6709, // 有, #4
+    0x670D, // 服, #99
+    0x671F, // 期, #139
+    0x672A, // 未, #404
+    0x672C, // 本, #45
+    0x6771, // 東, #221
+    0x677F, // 板, #364
+    0x6797, // 林, #330
+    0x679C, // 果, #179
+    0x67E5, // 查, #283
+    0x683C, // 格, #157
+    0x6848, // 案, #392
+    0x689D, // 條, #406
+    0x696D, // 業, #103
+    0x6A02, // 樂, #116
+    0x6A13, // 樓, #411
+    0x6A19, // 標, #384
+    0x6A23, // 樣, #306
+    0x6A5F, // 機, #40
+    0x6AA2, // 檢, #359
+    0x6B0A, // 權, #228
+    0x6B21, // 次, #227
+    0x6B3E, // 款, #276
+    0x6B4C, // 歌, #496
+    0x6B61, // 歡, #427
+    0x6B63, // 正, #206
+    0x6B64, // 此, #247
+    0x6BCF, // 每, #391
+    0x6BD4, // 比, #257
+    0x6C11, // 民, #230
+    0x6C23, // 氣, #200
+    0x6C34, // 水, #140
+    0x6C42, // 求, #501
+    0x6C92, // 沒, #162
+    0x6CD5, // 法, #89
+    0x6D3B, // 活, #124
+    0x6D41, // 流, #315
+    0x6D77, // 海, #258
+    0x6D88, // 消, #342
+    0x6E05, // 清, #329
+    0x6E2F, // 港, #293
+    0x6F14, // 演, #491
+    0x7063, // 灣, #195
+    0x70BA, // 為, #39
+    0x7121, // 無, #107
+    0x7136, // 然, #215
+    0x7167, // 照, #376
+    0x71B1, // 熱, #245
+    0x7247, // 片, #90
+    0x7248, // 版, #112
+    0x724C, // 牌, #467
+    0x7269, // 物, #110
+    0x7279, // 特, #183
+    0x738B, // 王, #287
+    0x73A9, // 玩, #354
+    0x73FE, // 現, #143
+    0x7403, // 球, #350
+    0x7406, // 理, #105
+    0x751F, // 生, #24
+    0x7522, // 產, #201
+    0x7528, // 用, #17
+    0x7531, // 由, #288
+    0x7537, // 男, #298
+    0x754C, // 界, #399
+    0x7559, // 留, #218
+    0x756B, // 畫, #412
+    0x7576, // 當, #185
+    0x767B, // 登, #138
+    0x767C, // 發, #28
+    0x767D, // 白, #377
+    0x767E, // 百, #393
+    0x7684, // 的, #1
+    0x76EE, // 目, #271
+    0x76F4, // 直, #379
+    0x76F8, // 相, #98
+    0x770B, // 看, #52
+    0x771F, // 真, #180
+    0x773C, // 眼, #433
+    0x77E5, // 知, #170
+    0x78BC, // 碼, #481
+    0x793A, // 示, #353
+    0x793E, // 社, #333
+    0x795E, // 神, #304
+    0x7968, // 票, #477
+    0x798F, // 福, #494
+    0x79C1, // 私, #507
+    0x79D1, // 科, #280
+    0x7A0B, // 程, #272
+    0x7A2E, // 種, #337
+    0x7A4D, // 積, #385
+    0x7A7A, // 空, #324
+    0x7ACB, // 立, #286
+    0x7AD9, // 站, #117
+    0x7AE0, // 章, #141
+    0x7B2C, // 第, #135
+    0x7B49, // 等, #240
+    0x7BA1, // 管, #340
+    0x7BC0, // 節, #431
+    0x7BC7, // 篇, #479
+    0x7C21, // 簡, #499
+    0x7CBE, // 精, #213
+    0x7CFB, // 系, #212
+    0x7D04, // 約, #462
+    0x7D05, // 紅, #452
+    0x7D1A, // 級, #267
+    0x7D30, // 細, #486
+    0x7D44, // 組, #335
+    0x7D50, // 結, #243
+    0x7D66, // 給, #355
+    0x7D71, // 統, #375
+    0x7D93, // 經, #111
+    0x7DB2, // 網, #32
+    0x7DDA, // 線, #151
+    0x7E23, // 縣, #439
+    0x7E3D, // 總, #370
+    0x7F8E, // 美, #41
+    0x7FA9, // 義, #504
+    0x8001, // 老, #290
+    0x8003, // 考, #428
+    0x8005, // 者, #92
+    0x800C, // 而, #217
+    0x805E, // 聞, #181
+    0x806F, // 聯, #310
+    0x8072, // 聲, #413
+    0x80A1, // 股, #390
+    0x80B2, // 育, #453
+    0x80FD, // 能, #71
+    0x8166, // 腦, #408
+    0x81EA, // 自, #61
+    0x81F3, // 至, #344
+    0x8207, // 與, #84
+    0x8209, // 舉, #463
+    0x8272, // 色, #192
+    0x82B1, // 花, #255
+    0x82F1, // 英, #348
+    0x83EF, // 華, #196
+    0x842C, // 萬, #316
+    0x843D, // 落, #308
+    0x8457, // 著, #233
+    0x85A6, // 薦, #401
+    0x85CF, // 藏, #503
+    0x85DD, // 藝, #488
+    0x8655, // 處, #419
+    0x865F, // 號, #191
+    0x884C, // 行, #47
+    0x8853, // 術, #395
+    0x8868, // 表, #77
+    0x88AB, // 被, #291
+    0x88DD, // 裝, #256
+    0x88E1, // 裡, #369
+    0x88FD, // 製, #510
+    0x897F, // 西, #300
+    0x8981, // 要, #36
+    0x898B, // 見, #307
+    0x8996, // 視, #204
+    0x89BA, // 覺, #450
+    0x89BD, // 覽, #387
+    0x89C0, // 觀, #365
+    0x89E3, // 解, #323
+    0x8A00, // 言, #169
+    0x8A02, // 訂, #423
+    0x8A08, // 計, #225
+    0x8A0A, // 訊, #156
+    0x8A0E, // 討, #373
+    0x8A18, // 記, #222
+    0x8A2D, // 設, #174
+    0x8A3B, // 註, #356
+    0x8A55, // 評, #246
+    0x8A66, // 試, #448
+    0x8A71, // 話, #229
+    0x8A72, // 該, #446
+    0x8A8D, // 認, #464
+    0x8A9E, // 語, #371
+    0x8AAA, // 說, #91
+    0x8ABF, // 調, #509
+    0x8ACB, // 請, #119
+    0x8AD6, // 論, #114
+    0x8B1D, // 謝, #389
+    0x8B49, // 證, #429
+    0x8B58, // 識, #416
+    0x8B70, // 議, #485
+    0x8B77, // 護, #475
+    0x8B80, // 讀, #386
+    0x8B8A, // 變, #388
+    0x8B93, // 讓, #336
+    0x8CA8, // 貨, #313
+    0x8CB7, // 買, #260
+    0x8CBB, // 費, #203
+    0x8CC7, // 資, #62
+    0x8CE3, // 賣, #294
+    0x8CEA, // 質, #457
+    0x8CFC, // 購, #189
+    0x8D77, // 起, #214
+    0x8D85, // 超, #296
+    0x8DDF, // 跟, #489
+    0x8DEF, // 路, #137
+    0x8EAB, // 身, #197
+    0x8ECA, // 車, #76
+    0x8F09, // 載, #301
+    0x8F49, // 轉, #282
+    0x8FD1, // 近, #414
+    0x9001, // 送, #363
+    0x9019, // 這, #42
+    0x901A, // 通, #207
+    0x901F, // 速, #495
+    0x9020, // 造, #455
+    0x9023, // 連, #285
+    0x9032, // 進, #231
+    0x904A, // 遊, #132
+    0x904B, // 運, #219
+    0x904E, // 過, #101
+    0x9053, // 道, #146
+    0x9054, // 達, #417
+    0x9078, // 選, #182
+    0x9084, // 還, #154
+    0x908A, // 邊, #487
+    0x90A3, // 那, #269
+    0x90E8, // 部, #78
+    0x90FD, // 都, #104
+    0x914D, // 配, #421
+    0x9152, // 酒, #512
+    0x91AB, // 醫, #358
+    0x91CD, // 重, #224
+    0x91CF, // 量, #319
+    0x91D1, // 金, #115
+    0x9304, // 錄, #302
+    0x9577, // 長, #172
+    0x9580, // 門, #193
+    0x958B, // 開, #72
+    0x9593, // 間, #80
+    0x95B1, // 閱, #405
+    0x95DC, // 關, #74
+    0x963F, // 阿, #460
+    0x9650, // 限, #265
+    0x9662, // 院, #474
+    0x9664, // 除, #478
+    0x969B, // 際, #459
+    0x96C6, // 集, #347
+    0x96E2, // 離, #442
+    0x96FB, // 電, #33
+    0x9700, // 需, #445
+    0x975E, // 非, #451
+    0x9762, // 面, #129
+    0x97F3, // 音, #194
+    0x9801, // 頁, #83
+    0x982D, // 頭, #238
+    0x984C, // 題, #122
+    0x985E, // 類, #163
+    0x98A8, // 風, #266
+    0x98DF, // 食, #208
+    0x9910, // 餐, #469
+    0x9928, // 館, #424
+    0x9996, // 首, #166
+    0x9999, // 香, #263
+    0x99AC, // 馬, #317
+    0x9A57, // 驗, #492
+    0x9AD4, // 體, #100
+    0x9AD8, // 高, #88
+    0x9EBC, // 麼, #241
+    0x9EC3, // 黃, #480
+    0x9ED1, // 黑, #490
+    0x9EDE, // 點, #69
+    0x9F8D, // 龍, #505
+};
+// the percentage of the sample covered by the above characters
+static const float frequent_zhTW_coverage=0.704841200026877;
+
+// The 512 most frequently occuring characters for the ja language in a sample of the Internet.
+// Ordered by codepoint, comment shows character and ranking by frequency
+const uint16_t frequent_ja[] = {
+    0x3005, // 々, #352
+    0x3041, // ぁ, #486
+    0x3042, // あ, #50
+    0x3044, // い, #2
+    0x3046, // う, #33
+    0x3048, // え, #83
+    0x304A, // お, #37
+    0x304B, // か, #21
+    0x304C, // が, #17
+    0x304D, // き, #51
+    0x304E, // ぎ, #324
+    0x304F, // く, #38
+    0x3050, // ぐ, #334
+    0x3051, // け, #60
+    0x3052, // げ, #296
+    0x3053, // こ, #34
+    0x3054, // ご, #100
+    0x3055, // さ, #31
+    0x3056, // ざ, #378
+    0x3057, // し, #4
+    0x3058, // じ, #121
+    0x3059, // す, #12
+    0x305A, // ず, #215
+    0x305B, // せ, #86
+    0x305D, // そ, #68
+    0x305F, // た, #11
+    0x3060, // だ, #42
+    0x3061, // ち, #67
+    0x3063, // っ, #23
+    0x3064, // つ, #73
+    0x3066, // て, #7
+    0x3067, // で, #6
+    0x3068, // と, #14
+    0x3069, // ど, #75
+    0x306A, // な, #8
+    0x306B, // に, #5
+    0x306D, // ね, #123
+    0x306E, // の, #1
+    0x306F, // は, #16
+    0x3070, // ば, #150
+    0x3071, // ぱ, #259
+    0x3072, // ひ, #364
+    0x3073, // び, #266
+    0x3075, // ふ, #484
+    0x3076, // ぶ, #330
+    0x3078, // へ, #146
+    0x3079, // べ, #207
+    0x307B, // ほ, #254
+    0x307E, // ま, #18
+    0x307F, // み, #74
+    0x3080, // む, #285
+    0x3081, // め, #78
+    0x3082, // も, #32
+    0x3083, // ゃ, #111
+    0x3084, // や, #85
+    0x3086, // ゆ, #392
+    0x3087, // ょ, #224
+    0x3088, // よ, #63
+    0x3089, // ら, #29
+    0x308A, // り, #28
+    0x308B, // る, #9
+    0x308C, // れ, #35
+    0x308D, // ろ, #127
+    0x308F, // わ, #88
+    0x3092, // を, #19
+    0x3093, // ん, #22
+    0x30A1, // ァ, #193
+    0x30A2, // ア, #27
+    0x30A3, // ィ, #70
+    0x30A4, // イ, #15
+    0x30A6, // ウ, #89
+    0x30A7, // ェ, #134
+    0x30A8, // エ, #81
+    0x30A9, // ォ, #225
+    0x30AA, // オ, #76
+    0x30AB, // カ, #52
+    0x30AC, // ガ, #147
+    0x30AD, // キ, #66
+    0x30AE, // ギ, #246
+    0x30AF, // ク, #25
+    0x30B0, // グ, #39
+    0x30B1, // ケ, #137
+    0x30B2, // ゲ, #200
+    0x30B3, // コ, #46
+    0x30B4, // ゴ, #183
+    0x30B5, // サ, #64
+    0x30B6, // ザ, #221
+    0x30B7, // シ, #48
+    0x30B8, // ジ, #55
+    0x30B9, // ス, #13
+    0x30BA, // ズ, #103
+    0x30BB, // セ, #109
+    0x30BC, // ゼ, #499
+    0x30BD, // ソ, #175
+    0x30BF, // タ, #45
+    0x30C0, // ダ, #104
+    0x30C1, // チ, #71
+    0x30C3, // ッ, #20
+    0x30C4, // ツ, #119
+    0x30C6, // テ, #59
+    0x30C7, // デ, #82
+    0x30C8, // ト, #10
+    0x30C9, // ド, #44
+    0x30CA, // ナ, #102
+    0x30CB, // ニ, #72
+    0x30CD, // ネ, #117
+    0x30CE, // ノ, #192
+    0x30CF, // ハ, #164
+    0x30D0, // バ, #62
+    0x30D1, // パ, #90
+    0x30D2, // ヒ, #398
+    0x30D3, // ビ, #77
+    0x30D4, // ピ, #135
+    0x30D5, // フ, #47
+    0x30D6, // ブ, #56
+    0x30D7, // プ, #43
+    0x30D8, // ヘ, #268
+    0x30D9, // ベ, #157
+    0x30DA, // ペ, #125
+    0x30DB, // ホ, #155
+    0x30DC, // ボ, #168
+    0x30DD, // ポ, #114
+    0x30DE, // マ, #57
+    0x30DF, // ミ, #97
+    0x30E0, // ム, #69
+    0x30E1, // メ, #53
+    0x30E2, // モ, #142
+    0x30E3, // ャ, #93
+    0x30E4, // ヤ, #258
+    0x30E5, // ュ, #79
+    0x30E6, // ユ, #405
+    0x30E7, // ョ, #98
+    0x30E9, // ラ, #26
+    0x30EA, // リ, #30
+    0x30EB, // ル, #24
+    0x30EC, // レ, #41
+    0x30ED, // ロ, #40
+    0x30EF, // ワ, #144
+    0x30F3, // ン, #3
+    0x30F4, // ヴ, #483
+    0x30FD, // ヽ, #501
+    0x4E00, // 一, #84
+    0x4E07, // 万, #337
+    0x4E09, // 三, #323
+    0x4E0A, // 上, #133
+    0x4E0B, // 下, #180
+    0x4E0D, // 不, #277
+    0x4E16, // 世, #385
+    0x4E2D, // 中, #87
+    0x4E3B, // 主, #432
+    0x4E88, // 予, #326
+    0x4E8B, // 事, #95
+    0x4E8C, // 二, #394
+    0x4E95, // 井, #468
+    0x4EA4, // 交, #410
+    0x4EAC, // 京, #260
+    0x4EBA, // 人, #61
+    0x4ECA, // 今, #184
+    0x4ECB, // 介, #358
+    0x4ED5, // 仕, #391
+    0x4ED6, // 他, #256
+    0x4ED8, // 付, #243
+    0x4EE3, // 代, #280
+    0x4EE5, // 以, #216
+    0x4EF6, // 件, #190
+    0x4F1A, // 会, #105
+    0x4F4D, // 位, #177
+    0x4F4F, // 住, #376
+    0x4F53, // 体, #223
+    0x4F55, // 何, #294
+    0x4F5C, // 作, #154
+    0x4F7F, // 使, #233
+    0x4F9B, // 供, #503
+    0x4FA1, // 価, #217
+    0x4FBF, // 便, #511
+    0x4FDD, // 保, #279
+    0x4FE1, // 信, #271
+    0x500B, // 個, #415
+    0x50CF, // 像, #178
+    0x512A, // 優, #403
+    0x5143, // 元, #384
+    0x5148, // 先, #311
+    0x5149, // 光, #488
+    0x5165, // 入, #115
+    0x5168, // 全, #173
+    0x516C, // 公, #287
+    0x5177, // 具, #447
+    0x5185, // 内, #169
+    0x5186, // 円, #131
+    0x5199, // 写, #275
+    0x51FA, // 出, #110
+    0x5206, // 分, #130
+    0x5207, // 切, #401
+    0x521D, // 初, #319
+    0x5225, // 別, #290
+    0x5229, // 利, #226
+    0x5236, // 制, #507
+    0x524D, // 前, #124
+    0x529B, // 力, #272
+    0x52A0, // 加, #249
+    0x52D5, // 動, #120
+    0x52D9, // 務, #421
+    0x52DF, // 募, #476
+    0x5316, // 化, #308
+    0x5317, // 北, #341
+    0x533A, // 区, #348
+    0x539F, // 原, #321
+    0x53C2, // 参, #452
+    0x53CB, // 友, #451
+    0x53D6, // 取, #237
+    0x53D7, // 受, #354
+    0x53E3, // 口, #289
+    0x53E4, // 古, #339
+    0x53EF, // 可, #298
+    0x53F0, // 台, #439
+    0x53F7, // 号, #361
+    0x5408, // 合, #118
+    0x540C, // 同, #263
+    0x540D, // 名, #65
+    0x5411, // 向, #434
+    0x544A, // 告, #386
+    0x5468, // 周, #393
+    0x5473, // 味, #299
+    0x548C, // 和, #350
+    0x54C1, // 品, #96
+    0x54E1, // 員, #293
+    0x5546, // 商, #198
+    0x554F, // 問, #158
+    0x55B6, // 営, #438
+    0x5668, // 器, #366
+    0x56DE, // 回, #143
+    0x56F3, // 図, #444
+    0x56FD, // 国, #153
+    0x5712, // 園, #435
+    0x571F, // 土, #239
+    0x5728, // 在, #351
+    0x5730, // 地, #163
+    0x578B, // 型, #430
+    0x5831, // 報, #112
+    0x5834, // 場, #139
+    0x58F2, // 売, #232
+    0x5909, // 変, #306
+    0x5916, // 外, #222
+    0x591A, // 多, #336
+    0x5927, // 大, #80
+    0x5929, // 天, #278
+    0x5973, // 女, #161
+    0x597D, // 好, #349
+    0x5A5A, // 婚, #479
+    0x5B50, // 子, #113
+    0x5B57, // 字, #492
+    0x5B66, // 学, #132
+    0x5B89, // 安, #295
+    0x5B9A, // 定, #145
+    0x5B9F, // 実, #220
+    0x5BA4, // 室, #482
+    0x5BAE, // 宮, #487
+    0x5BB6, // 家, #211
+    0x5BB9, // 容, #333
+    0x5BFE, // 対, #252
+    0x5C02, // 専, #474
+    0x5C0F, // 小, #212
+    0x5C11, // 少, #377
+    0x5C4B, // 屋, #284
+    0x5C71, // 山, #206
+    0x5CA1, // 岡, #429
+    0x5CF6, // 島, #297
+    0x5DDD, // 川, #253
+    0x5DE5, // 工, #374
+    0x5E02, // 市, #159
+    0x5E2F, // 帯, #416
+    0x5E38, // 常, #437
+    0x5E73, // 平, #390
+    0x5E74, // 年, #54
+    0x5E83, // 広, #367
+    0x5E97, // 店, #149
+    0x5EA6, // 度, #269
+    0x5EAB, // 庫, #380
+    0x5F0F, // 式, #265
+    0x5F15, // 引, #345
+    0x5F37, // 強, #446
+    0x5F53, // 当, #240
+    0x5F62, // 形, #502
+    0x5F8C, // 後, #230
+    0x5F97, // 得, #490
+    0x5FC3, // 心, #307
+    0x5FC5, // 必, #422
+    0x5FDC, // 応, #356
+    0x601D, // 思, #189
+    0x6027, // 性, #201
+    0x6075, // 恵, #400
+    0x60C5, // 情, #140
+    0x60F3, // 想, #477
+    0x610F, // 意, #305
+    0x611B, // 愛, #273
+    0x611F, // 感, #257
+    0x6210, // 成, #262
+    0x6226, // 戦, #365
+    0x6240, // 所, #236
+    0x624B, // 手, #160
+    0x6295, // 投, #129
+    0x6301, // 持, #355
+    0x6307, // 指, #425
+    0x63A2, // 探, #369
+    0x63B2, // 掲, #399
+    0x643A, // 携, #459
+    0x652F, // 支, #512
+    0x653E, // 放, #469
+    0x6559, // 教, #270
+    0x6570, // 数, #181
+    0x6587, // 文, #202
+    0x6599, // 料, #106
+    0x65B0, // 新, #99
+    0x65B9, // 方, #126
+    0x65C5, // 旅, #445
+    0x65E5, // 日, #36
+    0x660E, // 明, #300
+    0x6620, // 映, #418
+    0x6642, // 時, #107
+    0x66F4, // 更, #359
+    0x66F8, // 書, #174
+    0x6700, // 最, #152
+    0x6708, // 月, #49
+    0x6709, // 有, #302
+    0x671F, // 期, #332
+    0x6728, // 木, #203
+    0x672C, // 本, #92
+    0x6750, // 材, #489
+    0x6751, // 村, #466
+    0x6765, // 来, #267
+    0x6771, // 東, #191
+    0x677F, // 板, #411
+    0x679C, // 果, #441
+    0x6821, // 校, #327
+    0x682A, // 株, #412
+    0x683C, // 格, #228
+    0x691C, // 検, #179
+    0x696D, // 業, #166
+    0x697D, // 楽, #172
+    0x69D8, // 様, #255
+    0x6A5F, // 機, #235
+    0x6B21, // 次, #318
+    0x6B62, // 止, #475
+    0x6B63, // 正, #312
+    0x6C17, // 気, #116
+    0x6C34, // 水, #165
+    0x6C42, // 求, #465
+    0x6C7A, // 決, #370
+    0x6CBB, // 治, #505
+    0x6CC1, // 況, #462
+    0x6CD5, // 法, #227
+    0x6CE8, // 注, #372
+    0x6D3B, // 活, #303
+    0x6D41, // 流, #480
+    0x6D77, // 海, #274
+    0x6E08, // 済, #417
+    0x6F14, // 演, #504
+    0x706B, // 火, #264
+    0x70B9, // 点, #331
+    0x7121, // 無, #58
+    0x7248, // 版, #409
+    0x7269, // 物, #170
+    0x7279, // 特, #242
+    0x72B6, // 状, #458
+    0x73FE, // 現, #322
+    0x7406, // 理, #162
+    0x751F, // 生, #122
+    0x7523, // 産, #320
+    0x7528, // 用, #94
+    0x7530, // 田, #195
+    0x7537, // 男, #373
+    0x753A, // 町, #314
+    0x753B, // 画, #91
+    0x754C, // 界, #436
+    0x756A, // 番, #261
+    0x75C5, // 病, #428
+    0x767A, // 発, #194
+    0x767B, // 登, #231
+    0x767D, // 白, #419
+    0x7684, // 的, #251
+    0x76EE, // 目, #197
+    0x76F4, // 直, #497
+    0x76F8, // 相, #286
+    0x770C, // 県, #199
+    0x771F, // 真, #219
+    0x7740, // 着, #283
+    0x77E5, // 知, #185
+    0x77F3, // 石, #500
+    0x78BA, // 確, #383
+    0x793A, // 示, #241
+    0x793E, // 社, #167
+    0x795E, // 神, #315
+    0x798F, // 福, #423
+    0x79C1, // 私, #347
+    0x79D1, // 科, #420
+    0x7A0E, // 税, #368
+    0x7A2E, // 種, #455
+    0x7A3F, // 稿, #148
+    0x7A7A, // 空, #427
+    0x7ACB, // 立, #309
+    0x7B11, // 笑, #454
+    0x7B2C, // 第, #317
+    0x7B49, // 等, #457
+    0x7B54, // 答, #426
+    0x7BA1, // 管, #481
+    0x7CFB, // 系, #408
+    0x7D04, // 約, #276
+    0x7D20, // 素, #407
+    0x7D22, // 索, #214
+    0x7D30, // 細, #381
+    0x7D39, // 紹, #471
+    0x7D42, // 終, #456
+    0x7D44, // 組, #424
+    0x7D4C, // 経, #360
+    0x7D50, // 結, #291
+    0x7D9A, // 続, #357
+    0x7DCF, // 総, #467
+    0x7DDA, // 線, #338
+    0x7DE8, // 編, #453
+    0x7F8E, // 美, #204
+    0x8003, // 考, #387
+    0x8005, // 者, #151
+    0x805E, // 聞, #463
+    0x8077, // 職, #363
+    0x80B2, // 育, #433
+    0x80FD, // 能, #250
+    0x8179, // 腹, #396
+    0x81EA, // 自, #156
+    0x826F, // 良, #329
+    0x8272, // 色, #402
+    0x82B1, // 花, #440
+    0x82B8, // 芸, #413
+    0x82F1, // 英, #485
+    0x8449, // 葉, #472
+    0x884C, // 行, #128
+    0x8853, // 術, #460
+    0x8868, // 表, #209
+    0x88FD, // 製, #431
+    0x897F, // 西, #406
+    0x8981, // 要, #313
+    0x898B, // 見, #101
+    0x898F, // 規, #375
+    0x89A7, // 覧, #171
+    0x89E3, // 解, #388
+    0x8A00, // 言, #210
+    0x8A08, // 計, #343
+    0x8A18, // 記, #136
+    0x8A2D, // 設, #292
+    0x8A71, // 話, #213
+    0x8A73, // 詳, #371
+    0x8A8D, // 認, #404
+    0x8A9E, // 語, #234
+    0x8AAC, // 説, #494
+    0x8AAD, // 読, #301
+    0x8ABF, // 調, #443
+    0x8AC7, // 談, #448
+    0x8B77, // 護, #509
+    0x8C37, // 谷, #506
+    0x8CA9, // 販, #362
+    0x8CB7, // 買, #346
+    0x8CC7, // 資, #473
+    0x8CEA, // 質, #281
+    0x8CFC, // 購, #495
+    0x8EAB, // 身, #470
+    0x8ECA, // 車, #205
+    0x8EE2, // 転, #335
+    0x8F09, // 載, #342
+    0x8FBC, // 込, #229
+    0x8FD1, // 近, #304
+    0x8FD4, // 返, #461
+    0x8FFD, // 追, #379
+    0x9001, // 送, #186
+    0x901A, // 通, #182
+    0x901F, // 速, #340
+    0x9023, // 連, #244
+    0x904B, // 運, #382
+    0x904E, // 過, #498
+    0x9053, // 道, #282
+    0x9054, // 達, #450
+    0x9055, // 違, #414
+    0x9078, // 選, #288
+    0x90E8, // 部, #208
+    0x90FD, // 都, #344
+    0x914D, // 配, #389
+    0x91CD, // 重, #478
+    0x91CE, // 野, #245
+    0x91D1, // 金, #138
+    0x9332, // 録, #238
+    0x9577, // 長, #247
+    0x9580, // 門, #508
+    0x958B, // 開, #248
+    0x9593, // 間, #141
+    0x95A2, // 関, #188
+    0x962A, // 阪, #496
+    0x9650, // 限, #395
+    0x9662, // 院, #449
+    0x9664, // 除, #510
+    0x969B, // 際, #493
+    0x96C6, // 集, #196
+    0x96D1, // 雑, #442
+    0x96FB, // 電, #187
+    0x9762, // 面, #328
+    0x97F3, // 音, #325
+    0x984C, // 題, #310
+    0x985E, // 類, #491
+    0x98A8, // 風, #353
+    0x98DF, // 食, #218
+    0x9928, // 館, #464
+    0x99C5, // 駅, #316
+    0x9A13, // 験, #397
+    0x9AD8, // 高, #176
+    0xFF57, // w, #108
+};
+// the percentage of the sample covered by the above characters
+static const float frequent_ja_coverage=0.880569589120162;
+
+// The 512 most frequently occuring characters for the ko language in a sample of the Internet.
+// Ordered by codepoint, comment shows character and ranking by frequency
+const uint16_t frequent_ko[] = {
+    0x314B, // ㅋ, #148
+    0x314E, // ㅎ, #390
+    0x3160, // ㅠ, #354
+    0x318D, // ㆍ, #439
+    0xAC00, // 가, #6
+    0xAC01, // 각, #231
+    0xAC04, // 간, #106
+    0xAC08, // 갈, #362
+    0xAC10, // 감, #122
+    0xAC11, // 갑, #493
+    0xAC15, // 강, #155
+    0xAC19, // 같, #264
+    0xAC1C, // 개, #87
+    0xAC1D, // 객, #198
+    0xAC24, // 갤, #457
+    0xAC70, // 거, #91
+    0xAC74, // 건, #161
+    0xAC78, // 걸, #338
+    0xAC80, // 검, #184
+    0xAC83, // 것, #116
+    0xAC8C, // 게, #36
+    0xACA0, // 겠, #233
+    0xACA8, // 겨, #341
+    0xACA9, // 격, #245
+    0xACAC, // 견, #413
+    0xACB0, // 결, #202
+    0xACBD, // 경, #62
+    0xACC4, // 계, #142
+    0xACE0, // 고, #12
+    0xACE1, // 곡, #444
+    0xACE8, // 골, #379
+    0xACF3, // 곳, #388
+    0xACF5, // 공, #59
+    0xACFC, // 과, #69
+    0xAD00, // 관, #95
+    0xAD11, // 광, #235
+    0xAD50, // 교, #128
+    0xAD6C, // 구, #52
+    0xAD6D, // 국, #85
+    0xAD70, // 군, #293
+    0xAD74, // 굴, #487
+    0xAD81, // 궁, #441
+    0xAD8C, // 권, #192
+    0xADC0, // 귀, #386
+    0xADDC, // 규, #367
+    0xADF8, // 그, #30
+    0xADF9, // 극, #424
+    0xADFC, // 근, #241
+    0xAE00, // 글, #61
+    0xAE08, // 금, #138
+    0xAE09, // 급, #269
+    0xAE30, // 기, #3
+    0xAE34, // 긴, #465
+    0xAE38, // 길, #297
+    0xAE40, // 김, #205
+    0xAE4C, // 까, #171
+    0xAED8, // 께, #273
+    0xAF43, // 꽃, #475
+    0xB05D, // 끝, #505
+    0xB07C, // 끼, #490
+    0xB098, // 나, #39
+    0xB09C, // 난, #274
+    0xB0A0, // 날, #292
+    0xB0A8, // 남, #139
+    0xB0B4, // 내, #56
+    0xB108, // 너, #272
+    0xB110, // 널, #476
+    0xB118, // 넘, #492
+    0xB124, // 네, #100
+    0xB137, // 넷, #329
+    0xB140, // 녀, #288
+    0xB144, // 년, #151
+    0xB178, // 노, #149
+    0xB17C, // 논, #491
+    0xB180, // 놀, #464
+    0xB18D, // 농, #442
+    0xB204, // 누, #319
+    0xB208, // 눈, #383
+    0xB274, // 뉴, #173
+    0xB290, // 느, #368
+    0xB294, // 는, #5
+    0xB298, // 늘, #322
+    0xB2A5, // 능, #190
+    0xB2C8, // 니, #16
+    0xB2D8, // 님, #153
+    0xB2E4, // 다, #2
+    0xB2E8, // 단, #134
+    0xB2EB, // 닫, #195
+    0xB2EC, // 달, #243
+    0xB2F4, // 담, #254
+    0xB2F5, // 답, #287
+    0xB2F9, // 당, #159
+    0xB300, // 대, #33
+    0xB313, // 댓, #303
+    0xB354, // 더, #140
+    0xB358, // 던, #252
+    0xB367, // 덧, #463
+    0xB370, // 데, #104
+    0xB378, // 델, #429
+    0xB3C4, // 도, #25
+    0xB3C5, // 독, #301
+    0xB3CC, // 돌, #309
+    0xB3D9, // 동, #58
+    0xB418, // 되, #82
+    0xB41C, // 된, #189
+    0xB420, // 될, #408
+    0xB429, // 됩, #332
+    0xB450, // 두, #199
+    0xB4A4, // 뒤, #496
+    0xB4DC, // 드, #40
+    0xB4E0, // 든, #283
+    0xB4E4, // 들, #54
+    0xB4EF, // 듯, #478
+    0xB4F1, // 등, #90
+    0xB514, // 디, #133
+    0xB529, // 딩, #462
+    0xB530, // 따, #333
+    0xB54C, // 때, #240
+    0xB610, // 또, #313
+    0xB77C, // 라, #42
+    0xB77D, // 락, #355
+    0xB780, // 란, #290
+    0xB78C, // 람, #246
+    0xB78D, // 랍, #420
+    0xB791, // 랑, #270
+    0xB798, // 래, #174
+    0xB799, // 랙, #381
+    0xB79C, // 랜, #357
+    0xB7A8, // 램, #359
+    0xB7A9, // 랩, #402
+    0xB7C9, // 량, #346
+    0xB7EC, // 러, #130
+    0xB7F0, // 런, #312
+    0xB7FC, // 럼, #327
+    0xB7FD, // 럽, #447
+    0xB807, // 렇, #412
+    0xB808, // 레, #114
+    0xB80C, // 렌, #395
+    0xB824, // 려, #158
+    0xB825, // 력, #194
+    0xB828, // 련, #326
+    0xB839, // 령, #389
+    0xB85C, // 로, #4
+    0xB85D, // 록, #84
+    0xB860, // 론, #366
+    0xB8CC, // 료, #154
+    0xB8E8, // 루, #236
+    0xB958, // 류, #265
+    0xB974, // 르, #212
+    0xB978, // 른, #250
+    0xB97C, // 를, #35
+    0xB984, // 름, #276
+    0xB9AC, // 리, #19
+    0xB9AD, // 릭, #394
+    0xB9B0, // 린, #259
+    0xB9B4, // 릴, #485
+    0xB9BC, // 림, #305
+    0xB9BD, // 립, #217
+    0xB9C1, // 링, #351
+    0xB9C8, // 마, #67
+    0xB9C9, // 막, #310
+    0xB9CC, // 만, #65
+    0xB9CE, // 많, #257
+    0xB9D0, // 말, #188
+    0xB9DB, // 맛, #397
+    0xB9DD, // 망, #370
+    0xB9DE, // 맞, #399
+    0xB9E4, // 매, #125
+    0xB9E8, // 맨, #422
+    0xBA38, // 머, #311
+    0xBA39, // 먹, #377
+    0xBA3C, // 먼, #469
+    0xBA54, // 메, #147
+    0xBA70, // 며, #191
+    0xBA74, // 면, #72
+    0xBA85, // 명, #131
+    0xBAA8, // 모, #73
+    0xBAA9, // 목, #157
+    0xBAB0, // 몰, #401
+    0xBAB8, // 몸, #437
+    0xBABB, // 못, #336
+    0xBB34, // 무, #80
+    0xBB38, // 문, #57
+    0xBB3C, // 물, #94
+    0xBBA4, // 뮤, #431
+    0xBBF8, // 미, #76
+    0xBBFC, // 민, #200
+    0xBC00, // 밀, #308
+    0xBC0F, // 및, #249
+    0xBC14, // 바, #89
+    0xBC15, // 박, #226
+    0xBC18, // 반, #175
+    0xBC1B, // 받, #248
+    0xBC1C, // 발, #164
+    0xBC29, // 방, #92
+    0xBC30, // 배, #162
+    0xBC31, // 백, #256
+    0xBC84, // 버, #111
+    0xBC88, // 번, #167
+    0xBC8C, // 벌, #423
+    0xBC94, // 범, #427
+    0xBC95, // 법, #207
+    0xBCA0, // 베, #281
+    0xBCA4, // 벤, #378
+    0xBCA8, // 벨, #387
+    0xBCC0, // 변, #253
+    0xBCC4, // 별, #262
+    0xBCD1, // 병, #340
+    0xBCF4, // 보, #20
+    0xBCF5, // 복, #204
+    0xBCF8, // 본, #182
+    0xBCFC, // 볼, #385
+    0xBD09, // 봉, #405
+    0xBD80, // 부, #46
+    0xBD81, // 북, #261
+    0xBD84, // 분, #105
+    0xBD88, // 불, #225
+    0xBDF0, // 뷰, #350
+    0xBE0C, // 브, #214
+    0xBE14, // 블, #99
+    0xBE44, // 비, #55
+    0xBE4C, // 빌, #510
+    0xBE60, // 빠, #398
+    0xC0AC, // 사, #14
+    0xC0AD, // 삭, #342
+    0xC0B0, // 산, #121
+    0xC0B4, // 살, #279
+    0xC0BC, // 삼, #348
+    0xC0C1, // 상, #41
+    0xC0C8, // 새, #282
+    0xC0C9, // 색, #181
+    0xC0DD, // 생, #109
+    0xC11C, // 서, #21
+    0xC11D, // 석, #234
+    0xC120, // 선, #107
+    0xC124, // 설, #170
+    0xC131, // 성, #50
+    0xC138, // 세, #60
+    0xC139, // 섹, #456
+    0xC13C, // 센, #267
+    0xC154, // 셔, #455
+    0xC158, // 션, #237
+    0xC15C, // 셜, #448
+    0xC168, // 셨, #421
+    0xC18C, // 소, #51
+    0xC18D, // 속, #219
+    0xC190, // 손, #323
+    0xC1A1, // 송, #203
+    0xC1C4, // 쇄, #501
+    0xC1FC, // 쇼, #364
+    0xC218, // 수, #27
+    0xC219, // 숙, #467
+    0xC21C, // 순, #258
+    0xC220, // 술, #302
+    0xC26C, // 쉬, #511
+    0xC288, // 슈, #384
+    0xC2A4, // 스, #11
+    0xC2AC, // 슬, #438
+    0xC2B4, // 슴, #504
+    0xC2B5, // 습, #77
+    0xC2B9, // 승, #299
+    0xC2DC, // 시, #13
+    0xC2DD, // 식, #137
+    0xC2E0, // 신, #47
+    0xC2E4, // 실, #132
+    0xC2EC, // 심, #196
+    0xC2ED, // 십, #482
+    0xC2F6, // 싶, #352
+    0xC2F8, // 싸, #419
+    0xC4F0, // 쓰, #278
+    0xC528, // 씨, #360
+    0xC544, // 아, #23
+    0xC545, // 악, #296
+    0xC548, // 안, #71
+    0xC54A, // 않, #209
+    0xC54C, // 알, #222
+    0xC554, // 암, #460
+    0xC558, // 았, #349
+    0xC559, // 앙, #473
+    0xC55E, // 앞, #434
+    0xC560, // 애, #271
+    0xC561, // 액, #415
+    0xC571, // 앱, #477
+    0xC57C, // 야, #124
+    0xC57D, // 약, #229
+    0xC591, // 양, #177
+    0xC5B4, // 어, #24
+    0xC5B5, // 억, #407
+    0xC5B8, // 언, #294
+    0xC5BC, // 얼, #356
+    0xC5C4, // 엄, #426
+    0xC5C5, // 업, #118
+    0xC5C6, // 없, #178
+    0xC5C8, // 었, #165
+    0xC5D0, // 에, #9
+    0xC5D4, // 엔, #375
+    0xC5D8, // 엘, #506
+    0xC5EC, // 여, #66
+    0xC5ED, // 역, #186
+    0xC5EE, // 엮, #488
+    0xC5F0, // 연, #96
+    0xC5F4, // 열, #266
+    0xC5FC, // 염, #449
+    0xC600, // 였, #374
+    0xC601, // 영, #83
+    0xC608, // 예, #168
+    0xC624, // 오, #75
+    0xC628, // 온, #300
+    0xC62C, // 올, #306
+    0xC640, // 와, #119
+    0xC644, // 완, #361
+    0xC654, // 왔, #489
+    0xC655, // 왕, #418
+    0xC678, // 외, #218
+    0xC694, // 요, #43
+    0xC695, // 욕, #479
+    0xC6A9, // 용, #48
+    0xC6B0, // 우, #64
+    0xC6B1, // 욱, #503
+    0xC6B4, // 운, #108
+    0xC6B8, // 울, #223
+    0xC6C0, // 움, #317
+    0xC6C3, // 웃, #404
+    0xC6CC, // 워, #280
+    0xC6D0, // 원, #45
+    0xC6D4, // 월, #150
+    0xC6E8, // 웨, #446
+    0xC6F9, // 웹, #500
+    0xC704, // 위, #78
+    0xC720, // 유, #81
+    0xC721, // 육, #321
+    0xC724, // 윤, #416
+    0xC73C, // 으, #49
+    0xC740, // 은, #31
+    0xC744, // 을, #17
+    0xC74C, // 음, #112
+    0xC751, // 응, #461
+    0xC758, // 의, #8
+    0xC774, // 이, #1
+    0xC775, // 익, #403
+    0xC778, // 인, #18
+    0xC77C, // 일, #28
+    0xC784, // 임, #160
+    0xC785, // 입, #93
+    0xC788, // 있, #44
+    0xC790, // 자, #22
+    0xC791, // 작, #88
+    0xC798, // 잘, #347
+    0xC7A1, // 잡, #372
+    0xC7A5, // 장, #53
+    0xC7AC, // 재, #120
+    0xC7C1, // 쟁, #483
+    0xC800, // 저, #98
+    0xC801, // 적, #97
+    0xC804, // 전, #34
+    0xC808, // 절, #320
+    0xC810, // 점, #201
+    0xC811, // 접, #331
+    0xC815, // 정, #26
+    0xC81C, // 제, #29
+    0xC838, // 져, #414
+    0xC870, // 조, #86
+    0xC871, // 족, #373
+    0xC874, // 존, #432
+    0xC880, // 좀, #470
+    0xC885, // 종, #208
+    0xC88B, // 좋, #239
+    0xC8E0, // 죠, #451
+    0xC8FC, // 주, #38
+    0xC8FD, // 죽, #471
+    0xC900, // 준, #286
+    0xC904, // 줄, #392
+    0xC911, // 중, #103
+    0xC988, // 즈, #255
+    0xC98C, // 즌, #507
+    0xC990, // 즐, #371
+    0xC99D, // 증, #260
+    0xC9C0, // 지, #10
+    0xC9C1, // 직, #216
+    0xC9C4, // 진, #79
+    0xC9C8, // 질, #238
+    0xC9D1, // 집, #206
+    0xC9DC, // 짜, #411
+    0xC9F8, // 째, #494
+    0xCABD, // 쪽, #435
+    0xCC28, // 차, #146
+    0xCC29, // 착, #443
+    0xCC2C, // 찬, #481
+    0xCC30, // 찰, #440
+    0xCC38, // 참, #343
+    0xCC3D, // 창, #304
+    0xCC3E, // 찾, #335
+    0xCC44, // 채, #284
+    0xCC45, // 책, #298
+    0xCC98, // 처, #242
+    0xCC9C, // 천, #143
+    0xCCA0, // 철, #380
+    0xCCA8, // 첨, #452
+    0xCCAB, // 첫, #484
+    0xCCAD, // 청, #197
+    0xCCB4, // 체, #126
+    0xCCD0, // 쳐, #472
+    0xCD08, // 초, #220
+    0xCD1D, // 총, #406
+    0xCD5C, // 최, #179
+    0xCD94, // 추, #136
+    0xCD95, // 축, #337
+    0xCD9C, // 출, #166
+    0xCDA9, // 충, #369
+    0xCDE8, // 취, #210
+    0xCE20, // 츠, #215
+    0xCE21, // 측, #468
+    0xCE35, // 층, #512
+    0xCE58, // 치, #102
+    0xCE5C, // 친, #325
+    0xCE68, // 침, #263
+    0xCE74, // 카, #115
+    0xCE7C, // 칼, #466
+    0xCE90, // 캐, #454
+    0xCEE4, // 커, #285
+    0xCEE8, // 컨, #328
+    0xCEF4, // 컴, #417
+    0xCF00, // 케, #339
+    0xCF13, // 켓, #509
+    0xCF1C, // 켜, #508
+    0xCF54, // 코, #193
+    0xCF58, // 콘, #391
+    0xCFE0, // 쿠, #393
+    0xD035, // 퀵, #453
+    0xD06C, // 크, #101
+    0xD070, // 큰, #495
+    0xD074, // 클, #289
+    0xD0A4, // 키, #230
+    0xD0C0, // 타, #127
+    0xD0C1, // 탁, #314
+    0xD0C4, // 탄, #450
+    0xD0C8, // 탈, #436
+    0xD0DC, // 태, #221
+    0xD0DD, // 택, #275
+    0xD130, // 터, #70
+    0xD14C, // 테, #213
+    0xD150, // 텐, #324
+    0xD154, // 텔, #430
+    0xD15C, // 템, #382
+    0xD1A0, // 토, #145
+    0xD1B5, // 통, #156
+    0xD22C, // 투, #227
+    0xD2B8, // 트, #37
+    0xD2B9, // 특, #247
+    0xD2F0, // 티, #187
+    0xD305, // 팅, #410
+    0xD30C, // 파, #141
+    0xD310, // 판, #163
+    0xD314, // 팔, #499
+    0xD328, // 패, #307
+    0xD32C, // 팬, #459
+    0xD338, // 팸, #433
+    0xD37C, // 퍼, #344
+    0xD398, // 페, #172
+    0xD3B8, // 편, #251
+    0xD3C9, // 평, #291
+    0xD3EC, // 포, #68
+    0xD3ED, // 폭, #445
+    0xD3F0, // 폰, #318
+    0xD45C, // 표, #232
+    0xD480, // 풀, #497
+    0xD488, // 품, #113
+    0xD48D, // 풍, #425
+    0xD504, // 프, #110
+    0xD508, // 픈, #498
+    0xD50C, // 플, #211
+    0xD53C, // 피, #169
+    0xD544, // 필, #295
+    0xD551, // 핑, #376
+    0xD558, // 하, #7
+    0xD559, // 학, #129
+    0xD55C, // 한, #15
+    0xD560, // 할, #144
+    0xD568, // 함, #152
+    0xD569, // 합, #123
+    0xD56D, // 항, #268
+    0xD574, // 해, #32
+    0xD588, // 했, #180
+    0xD589, // 행, #135
+    0xD5A5, // 향, #345
+    0xD5C8, // 허, #396
+    0xD5D8, // 험, #316
+    0xD5E4, // 헤, #474
+    0xD604, // 현, #185
+    0xD611, // 협, #315
+    0xD615, // 형, #244
+    0xD61C, // 혜, #428
+    0xD638, // 호, #117
+    0xD63C, // 혼, #358
+    0xD648, // 홈, #330
+    0xD64D, // 홍, #363
+    0xD654, // 화, #63
+    0xD655, // 확, #183
+    0xD658, // 환, #224
+    0xD65C, // 활, #277
+    0xD669, // 황, #353
+    0xD68C, // 회, #74
+    0xD68D, // 획, #458
+    0xD69F, // 횟, #409
+    0xD6A8, // 효, #400
+    0xD6C4, // 후, #176
+    0xD6C8, // 훈, #486
+    0xD734, // 휴, #365
+    0xD754, // 흔, #480
+    0xD76C, // 희, #334
+    0xD788, // 히, #228
+    0xD798, // 힘, #502
+};
+// the percentage of the sample covered by the above characters
+static const float frequent_ko_coverage=0.948157021464184;
+
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index e02107f..1940fe7 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -58,7 +58,7 @@
     RESTORE_OUTPUT,
     OPEN_INPUT,
     CLOSE_INPUT,
-    SET_STREAM_OUTPUT,
+    INVALIDATE_STREAM,
     SET_VOICE_VOLUME,
     GET_RENDER_POSITION,
     GET_INPUT_FRAMES_LOST,
@@ -89,13 +89,12 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
-                                size_t frameCount,
+                                size_t *pFrameCount,
                                 track_flags_t *flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
                                 pid_t tid,
                                 int *sessionId,
-                                String8& name,
                                 int clientUid,
                                 status_t *status)
     {
@@ -106,9 +105,11 @@
         data.writeInt32(sampleRate);
         data.writeInt32(format);
         data.writeInt32(channelMask);
+        size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
         data.writeInt64(frameCount);
         track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
         data.writeInt32(lFlags);
+        // haveSharedBuffer
         if (sharedBuffer != 0) {
             data.writeInt32(true);
             data.writeStrongBinder(sharedBuffer->asBinder());
@@ -117,7 +118,7 @@
         }
         data.writeInt32((int32_t) output);
         data.writeInt32((int32_t) tid);
-        int lSessionId = 0;
+        int lSessionId = AUDIO_SESSION_ALLOCATE;
         if (sessionId != NULL) {
             lSessionId = *sessionId;
         }
@@ -127,6 +128,10 @@
         if (lStatus != NO_ERROR) {
             ALOGE("createTrack error: %s", strerror(-lStatus));
         } else {
+            frameCount = reply.readInt64();
+            if (pFrameCount != NULL) {
+                *pFrameCount = frameCount;
+            }
             lFlags = reply.readInt32();
             if (flags != NULL) {
                 *flags = lFlags;
@@ -135,11 +140,21 @@
             if (sessionId != NULL) {
                 *sessionId = lSessionId;
             }
-            name = reply.readString8();
             lStatus = reply.readInt32();
             track = interface_cast<IAudioTrack>(reply.readStrongBinder());
+            if (lStatus == NO_ERROR) {
+                if (track == 0) {
+                    ALOGE("createTrack should have returned an IAudioTrack");
+                    lStatus = UNKNOWN_ERROR;
+                }
+            } else {
+                if (track != 0) {
+                    ALOGE("createTrack returned an IAudioTrack but with status %d", lStatus);
+                    track.clear();
+                }
+            }
         }
-        if (status) {
+        if (status != NULL) {
             *status = lStatus;
         }
         return track;
@@ -150,7 +165,7 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
-                                size_t frameCount,
+                                size_t *pFrameCount,
                                 track_flags_t *flags,
                                 pid_t tid,
                                 int *sessionId,
@@ -163,11 +178,12 @@
         data.writeInt32(sampleRate);
         data.writeInt32(format);
         data.writeInt32(channelMask);
+        size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
         data.writeInt64(frameCount);
         track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
         data.writeInt32(lFlags);
         data.writeInt32((int32_t) tid);
-        int lSessionId = 0;
+        int lSessionId = AUDIO_SESSION_ALLOCATE;
         if (sessionId != NULL) {
             lSessionId = *sessionId;
         }
@@ -176,6 +192,10 @@
         if (lStatus != NO_ERROR) {
             ALOGE("openRecord error: %s", strerror(-lStatus));
         } else {
+            frameCount = reply.readInt64();
+            if (pFrameCount != NULL) {
+                *pFrameCount = frameCount;
+            }
             lFlags = reply.readInt32();
             if (flags != NULL) {
                 *flags = lFlags;
@@ -198,7 +218,7 @@
                 }
             }
         }
-        if (status) {
+        if (status != NULL) {
             *status = lStatus;
         }
         return record;
@@ -391,7 +411,7 @@
                                          const audio_offload_info_t *offloadInfo)
     {
         Parcel data, reply;
-        audio_devices_t devices = pDevices != NULL ? *pDevices : (audio_devices_t)0;
+        audio_devices_t devices = pDevices != NULL ? *pDevices : AUDIO_DEVICE_NONE;
         uint32_t samplingRate = pSamplingRate != NULL ? *pSamplingRate : 0;
         audio_format_t format = pFormat != NULL ? *pFormat : AUDIO_FORMAT_DEFAULT;
         audio_channel_mask_t channelMask = pChannelMask != NULL ?
@@ -405,6 +425,7 @@
         data.writeInt32(channelMask);
         data.writeInt32(latency);
         data.writeInt32((int32_t) flags);
+        // hasOffloadInfo
         if (offloadInfo == NULL) {
             data.writeInt32(0);
         } else {
@@ -415,15 +436,25 @@
         audio_io_handle_t output = (audio_io_handle_t) reply.readInt32();
         ALOGV("openOutput() returned output, %d", output);
         devices = (audio_devices_t)reply.readInt32();
-        if (pDevices != NULL) *pDevices = devices;
+        if (pDevices != NULL) {
+            *pDevices = devices;
+        }
         samplingRate = reply.readInt32();
-        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = samplingRate;
+        }
         format = (audio_format_t) reply.readInt32();
-        if (pFormat != NULL) *pFormat = format;
+        if (pFormat != NULL) {
+            *pFormat = format;
+        }
         channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask != NULL) *pChannelMask = channelMask;
+        if (pChannelMask != NULL) {
+            *pChannelMask = channelMask;
+        }
         latency = reply.readInt32();
-        if (pLatencyMs != NULL) *pLatencyMs = latency;
+        if (pLatencyMs != NULL) {
+            *pLatencyMs = latency;
+        }
         return output;
     }
 
@@ -472,7 +503,7 @@
                                         audio_channel_mask_t *pChannelMask)
     {
         Parcel data, reply;
-        audio_devices_t devices = pDevices != NULL ? *pDevices : (audio_devices_t)0;
+        audio_devices_t devices = pDevices != NULL ? *pDevices : AUDIO_DEVICE_NONE;
         uint32_t samplingRate = pSamplingRate != NULL ? *pSamplingRate : 0;
         audio_format_t format = pFormat != NULL ? *pFormat : AUDIO_FORMAT_DEFAULT;
         audio_channel_mask_t channelMask = pChannelMask != NULL ?
@@ -487,13 +518,21 @@
         remote()->transact(OPEN_INPUT, data, &reply);
         audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
         devices = (audio_devices_t)reply.readInt32();
-        if (pDevices != NULL) *pDevices = devices;
+        if (pDevices != NULL) {
+            *pDevices = devices;
+        }
         samplingRate = reply.readInt32();
-        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = samplingRate;
+        }
         format = (audio_format_t) reply.readInt32();
-        if (pFormat != NULL) *pFormat = format;
+        if (pFormat != NULL) {
+            *pFormat = format;
+        }
         channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask != NULL) *pChannelMask = channelMask;
+        if (pChannelMask != NULL) {
+            *pChannelMask = channelMask;
+        }
         return input;
     }
 
@@ -506,13 +545,12 @@
         return reply.readInt32();
     }
 
-    virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output)
+    virtual status_t invalidateStream(audio_stream_type_t stream)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32((int32_t) stream);
-        data.writeInt32((int32_t) output);
-        remote()->transact(SET_STREAM_OUTPUT, data, &reply);
+        remote()->transact(INVALIDATE_STREAM, data, &reply);
         return reply.readInt32();
     }
 
@@ -535,11 +573,11 @@
         status_t status = reply.readInt32();
         if (status == NO_ERROR) {
             uint32_t tmp = reply.readInt32();
-            if (halFrames) {
+            if (halFrames != NULL) {
                 *halFrames = tmp;
             }
             tmp = reply.readInt32();
-            if (dspFrames) {
+            if (dspFrames != NULL) {
                 *dspFrames = tmp;
             }
         }
@@ -551,8 +589,11 @@
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32((int32_t) ioHandle);
-        remote()->transact(GET_INPUT_FRAMES_LOST, data, &reply);
-        return reply.readInt32();
+        status_t status = remote()->transact(GET_INPUT_FRAMES_LOST, data, &reply);
+        if (status != NO_ERROR) {
+            return 0;
+        }
+        return (uint32_t) reply.readInt32();
     }
 
     virtual int newAudioSessionId()
@@ -560,26 +601,28 @@
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         status_t status = remote()->transact(NEW_AUDIO_SESSION_ID, data, &reply);
-        int id = 0;
+        int id = AUDIO_SESSION_ALLOCATE;
         if (status == NO_ERROR) {
             id = reply.readInt32();
         }
         return id;
     }
 
-    virtual void acquireAudioSessionId(int audioSession)
+    virtual void acquireAudioSessionId(int audioSession, int pid)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(audioSession);
+        data.writeInt32(pid);
         remote()->transact(ACQUIRE_AUDIO_SESSION_ID, data, &reply);
     }
 
-    virtual void releaseAudioSessionId(int audioSession)
+    virtual void releaseAudioSessionId(int audioSession, int pid)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(audioSession);
+        data.writeInt32(pid);
         remote()->transact(RELEASE_AUDIO_SESSION_ID, data, &reply);
     }
 
@@ -657,7 +700,7 @@
 
         if (pDesc == NULL) {
             return effect;
-            if (status) {
+            if (status != NULL) {
                 *status = BAD_VALUE;
             }
         }
@@ -675,7 +718,7 @@
         } else {
             lStatus = reply.readInt32();
             int tmp = reply.readInt32();
-            if (id) {
+            if (id != NULL) {
                 *id = tmp;
             }
             tmp = reply.readInt32();
@@ -685,7 +728,7 @@
             effect = interface_cast<IEffect>(reply.readStrongBinder());
             reply.read(pDesc, sizeof(effect_descriptor_t));
         }
-        if (status) {
+        if (status != NULL) {
             *status = lStatus;
         }
 
@@ -765,7 +808,6 @@
             pid_t tid = (pid_t) data.readInt32();
             int sessionId = data.readInt32();
             int clientUid = data.readInt32();
-            String8 name;
             status_t status;
             sp<IAudioTrack> track;
             if ((haveSharedBuffer && (buffer == 0)) ||
@@ -775,12 +817,13 @@
             } else {
                 track = createTrack(
                         (audio_stream_type_t) streamType, sampleRate, format,
-                        channelMask, frameCount, &flags, buffer, output, tid,
-                        &sessionId, name, clientUid, &status);
+                        channelMask, &frameCount, &flags, buffer, output, tid,
+                        &sessionId, clientUid, &status);
+                LOG_ALWAYS_FATAL_IF((track != 0) != (status == NO_ERROR));
             }
+            reply->writeInt64(frameCount);
             reply->writeInt32(flags);
             reply->writeInt32(sessionId);
-            reply->writeString8(name);
             reply->writeInt32(status);
             reply->writeStrongBinder(track->asBinder());
             return NO_ERROR;
@@ -797,8 +840,9 @@
             int sessionId = data.readInt32();
             status_t status;
             sp<IAudioRecord> record = openRecord(input,
-                    sampleRate, format, channelMask, frameCount, &flags, tid, &sessionId, &status);
+                    sampleRate, format, channelMask, &frameCount, &flags, tid, &sessionId, &status);
             LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));
+            reply->writeInt64(frameCount);
             reply->writeInt32(flags);
             reply->writeInt32(sessionId);
             reply->writeInt32(status);
@@ -941,7 +985,7 @@
                                                  &latency,
                                                  flags,
                                                  hasOffloadInfo ? &offloadInfo : NULL);
-            ALOGV("OPEN_OUTPUT output, %p", output);
+            ALOGV("OPEN_OUTPUT output, %d", output);
             reply->writeInt32((int32_t) output);
             reply->writeInt32(devices);
             reply->writeInt32(samplingRate);
@@ -997,11 +1041,10 @@
             reply->writeInt32(closeInput((audio_io_handle_t) data.readInt32()));
             return NO_ERROR;
         } break;
-        case SET_STREAM_OUTPUT: {
+        case INVALIDATE_STREAM: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            uint32_t stream = data.readInt32();
-            audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
-            reply->writeInt32(setStreamOutput((audio_stream_type_t) stream, output));
+            audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
+            reply->writeInt32(invalidateStream(stream));
             return NO_ERROR;
         } break;
         case SET_VOICE_VOLUME: {
@@ -1026,7 +1069,7 @@
         case GET_INPUT_FRAMES_LOST: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
-            reply->writeInt32(getInputFramesLost(ioHandle));
+            reply->writeInt32((int32_t) getInputFramesLost(ioHandle));
             return NO_ERROR;
         } break;
         case NEW_AUDIO_SESSION_ID: {
@@ -1037,13 +1080,15 @@
         case ACQUIRE_AUDIO_SESSION_ID: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             int audioSession = data.readInt32();
-            acquireAudioSessionId(audioSession);
+            int pid = data.readInt32();
+            acquireAudioSessionId(audioSession, pid);
             return NO_ERROR;
         } break;
         case RELEASE_AUDIO_SESSION_ID: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             int audioSession = data.readInt32();
-            releaseAudioSessionId(audioSession);
+            int pid = data.readInt32();
+            releaseAudioSessionId(audioSession, pid);
             return NO_ERROR;
         } break;
         case QUERY_NUM_EFFECTS: {
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 4be3c09..9bb4a49 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -137,6 +137,7 @@
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channelMask);
         data.writeInt32(static_cast <uint32_t>(flags));
+        // hasOffloadInfo
         if (offloadInfo == NULL) {
             data.writeInt32(0);
         } else {
@@ -476,10 +477,11 @@
         case START_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
-            uint32_t stream = data.readInt32();
+            audio_stream_type_t stream =
+                                static_cast <audio_stream_type_t>(data.readInt32());
             int session = data.readInt32();
             reply->writeInt32(static_cast <uint32_t>(startOutput(output,
-                                                                 (audio_stream_type_t)stream,
+                                                                 stream,
                                                                  session)));
             return NO_ERROR;
         } break;
@@ -487,10 +489,11 @@
         case STOP_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
-            uint32_t stream = data.readInt32();
+            audio_stream_type_t stream =
+                                static_cast <audio_stream_type_t>(data.readInt32());
             int session = data.readInt32();
             reply->writeInt32(static_cast <uint32_t>(stopOutput(output,
-                                                                (audio_stream_type_t)stream,
+                                                                stream,
                                                                 session)));
             return NO_ERROR;
         } break;
@@ -633,7 +636,7 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
             uint32_t inPastMs = (uint32_t)data.readInt32();
-            reply->writeInt32( isStreamActive((audio_stream_type_t) stream, inPastMs) );
+            reply->writeInt32( isStreamActive(stream, inPastMs) );
             return NO_ERROR;
         } break;
 
@@ -641,7 +644,7 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
             uint32_t inPastMs = (uint32_t)data.readInt32();
-            reply->writeInt32( isStreamActiveRemotely((audio_stream_type_t) stream, inPastMs) );
+            reply->writeInt32( isStreamActiveRemotely(stream, inPastMs) );
             return NO_ERROR;
         } break;
 
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 4a7de65..9866d70 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -50,6 +50,9 @@
         status_t status = remote()->transact(GET_CBLK, data, &reply);
         if (status == NO_ERROR) {
             cblk = interface_cast<IMemory>(reply.readStrongBinder());
+            if (cblk != 0 && cblk->pointer() == NULL) {
+                cblk.clear();
+            }
         }
         return cblk;
     }
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index e9df704..265bb1b 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -60,6 +60,9 @@
         status_t status = remote()->transact(GET_CBLK, data, &reply);
         if (status == NO_ERROR) {
             cblk = interface_cast<IMemory>(reply.readStrongBinder());
+            if (cblk != 0 && cblk->pointer() == NULL) {
+                cblk.clear();
+            }
         }
         return cblk;
     }
@@ -122,6 +125,9 @@
             status = reply.readInt32();
             if (status == NO_ERROR) {
                 *buffer = interface_cast<IMemory>(reply.readStrongBinder());
+                if (*buffer != 0 && (*buffer)->pointer() == NULL) {
+                    (*buffer).clear();
+                }
             }
         }
         return status;
diff --git a/media/libmedia/IEffect.cpp b/media/libmedia/IEffect.cpp
index a303a8f..b94012a 100644
--- a/media/libmedia/IEffect.cpp
+++ b/media/libmedia/IEffect.cpp
@@ -117,6 +117,9 @@
         status_t status = remote()->transact(GET_CBLK, data, &reply);
         if (status == NO_ERROR) {
             cblk = interface_cast<IMemory>(reply.readStrongBinder());
+            if (cblk != 0 && cblk->pointer() == NULL) {
+                cblk.clear();
+            }
         }
         return cblk;
     }
diff --git a/media/libmedia/IMediaDeathNotifier.cpp b/media/libmedia/IMediaDeathNotifier.cpp
index 9db5b1b..10b4934 100644
--- a/media/libmedia/IMediaDeathNotifier.cpp
+++ b/media/libmedia/IMediaDeathNotifier.cpp
@@ -75,7 +75,7 @@
 }
 
 void
-IMediaDeathNotifier::DeathNotifier::binderDied(const wp<IBinder>& who) {
+IMediaDeathNotifier::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
     ALOGW("media server died");
 
     // Need to do this with the lock held
diff --git a/media/libmedia/IMediaHTTPConnection.cpp b/media/libmedia/IMediaHTTPConnection.cpp
new file mode 100644
index 0000000..7e26ee6
--- /dev/null
+++ b/media/libmedia/IMediaHTTPConnection.cpp
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "IMediaHTTPConnection"
+#include <utils/Log.h>
+
+#include <media/IMediaHTTPConnection.h>
+
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <utils/String8.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+namespace android {
+
+enum {
+    CONNECT = IBinder::FIRST_CALL_TRANSACTION,
+    DISCONNECT,
+    READ_AT,
+    GET_SIZE,
+    GET_MIME_TYPE,
+    GET_URI
+};
+
+struct BpMediaHTTPConnection : public BpInterface<IMediaHTTPConnection> {
+    BpMediaHTTPConnection(const sp<IBinder> &impl)
+        : BpInterface<IMediaHTTPConnection>(impl) {
+    }
+
+    virtual bool connect(
+            const char *uri, const KeyedVector<String8, String8> *headers) {
+        Parcel data, reply;
+        data.writeInterfaceToken(
+                IMediaHTTPConnection::getInterfaceDescriptor());
+
+        String16 tmp(uri);
+        data.writeString16(tmp);
+
+        tmp = String16("");
+        if (headers != NULL) {
+            for (size_t i = 0; i < headers->size(); ++i) {
+                String16 key(headers->keyAt(i).string());
+                String16 val(headers->valueAt(i).string());
+
+                tmp.append(key);
+                tmp.append(String16(": "));
+                tmp.append(val);
+                tmp.append(String16("\r\n"));
+            }
+        }
+        data.writeString16(tmp);
+
+        remote()->transact(CONNECT, data, &reply);
+
+        int32_t exceptionCode = reply.readExceptionCode();
+
+        if (exceptionCode) {
+            return UNKNOWN_ERROR;
+        }
+
+        sp<IBinder> binder = reply.readStrongBinder();
+        mMemory = interface_cast<IMemory>(binder);
+
+        return mMemory != NULL;
+    }
+
+    virtual void disconnect() {
+        Parcel data, reply;
+        data.writeInterfaceToken(
+                IMediaHTTPConnection::getInterfaceDescriptor());
+
+        remote()->transact(DISCONNECT, data, &reply);
+    }
+
+    virtual ssize_t readAt(off64_t offset, void *buffer, size_t size) {
+        Parcel data, reply;
+        data.writeInterfaceToken(
+                IMediaHTTPConnection::getInterfaceDescriptor());
+
+        data.writeInt64(offset);
+        data.writeInt32(size);
+
+        status_t err = remote()->transact(READ_AT, data, &reply);
+        if (err != OK) {
+            ALOGE("remote readAt failed");
+            return UNKNOWN_ERROR;
+        }
+
+        int32_t exceptionCode = reply.readExceptionCode();
+
+        if (exceptionCode) {
+            return UNKNOWN_ERROR;
+        }
+
+        int32_t len = reply.readInt32();
+
+        if (len > 0) {
+            memcpy(buffer, mMemory->pointer(), len);
+        }
+
+        return len;
+    }
+
+    virtual off64_t getSize() {
+        Parcel data, reply;
+        data.writeInterfaceToken(
+                IMediaHTTPConnection::getInterfaceDescriptor());
+
+        remote()->transact(GET_SIZE, data, &reply);
+
+        int32_t exceptionCode = reply.readExceptionCode();
+
+        if (exceptionCode) {
+            return UNKNOWN_ERROR;
+        }
+
+        return reply.readInt64();
+    }
+
+    virtual status_t getMIMEType(String8 *mimeType) {
+        *mimeType = String8("");
+
+        Parcel data, reply;
+        data.writeInterfaceToken(
+                IMediaHTTPConnection::getInterfaceDescriptor());
+
+        remote()->transact(GET_MIME_TYPE, data, &reply);
+
+        int32_t exceptionCode = reply.readExceptionCode();
+
+        if (exceptionCode) {
+            return UNKNOWN_ERROR;
+        }
+
+        *mimeType = String8(reply.readString16());
+
+        return OK;
+    }
+
+    virtual status_t getUri(String8 *uri) {
+        *uri = String8("");
+
+        Parcel data, reply;
+        data.writeInterfaceToken(
+                IMediaHTTPConnection::getInterfaceDescriptor());
+
+        remote()->transact(GET_URI, data, &reply);
+
+        int32_t exceptionCode = reply.readExceptionCode();
+
+        if (exceptionCode) {
+            return UNKNOWN_ERROR;
+        }
+
+        *uri = String8(reply.readString16());
+
+        return OK;
+    }
+
+private:
+    sp<IMemory> mMemory;
+};
+
+IMPLEMENT_META_INTERFACE(
+        MediaHTTPConnection, "android.media.IMediaHTTPConnection");
+
+}  // namespace android
+
diff --git a/media/libmedia/IMediaHTTPService.cpp b/media/libmedia/IMediaHTTPService.cpp
new file mode 100644
index 0000000..1260582
--- /dev/null
+++ b/media/libmedia/IMediaHTTPService.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "IMediaHTTPService"
+#include <utils/Log.h>
+
+#include <media/IMediaHTTPService.h>
+
+#include <binder/Parcel.h>
+#include <media/IMediaHTTPConnection.h>
+
+namespace android {
+
+enum {
+    MAKE_HTTP = IBinder::FIRST_CALL_TRANSACTION,
+};
+
+struct BpMediaHTTPService : public BpInterface<IMediaHTTPService> {
+    BpMediaHTTPService(const sp<IBinder> &impl)
+        : BpInterface<IMediaHTTPService>(impl) {
+    }
+
+    virtual sp<IMediaHTTPConnection> makeHTTPConnection() {
+        Parcel data, reply;
+        data.writeInterfaceToken(
+                IMediaHTTPService::getInterfaceDescriptor());
+
+        remote()->transact(MAKE_HTTP, data, &reply);
+
+        status_t err = reply.readInt32();
+
+        if (err != OK) {
+            return NULL;
+        }
+
+        return interface_cast<IMediaHTTPConnection>(reply.readStrongBinder());
+    }
+};
+
+IMPLEMENT_META_INTERFACE(
+        MediaHTTPService, "android.media.IMediaHTTPService");
+
+}  // namespace android
+
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index a91ad49..432d890 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -18,6 +18,7 @@
 #include <stdint.h>
 #include <sys/types.h>
 #include <binder/Parcel.h>
+#include <media/IMediaHTTPService.h>
 #include <media/IMediaMetadataRetriever.h>
 #include <utils/String8.h>
 #include <utils/KeyedVector.h>
@@ -84,10 +85,16 @@
     }
 
     status_t setDataSource(
-            const char *srcUrl, const KeyedVector<String8, String8> *headers)
+            const sp<IMediaHTTPService> &httpService,
+            const char *srcUrl,
+            const KeyedVector<String8, String8> *headers)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
+        data.writeInt32(httpService != NULL);
+        if (httpService != NULL) {
+            data.writeStrongBinder(httpService->asBinder());
+        }
         data.writeCString(srcUrl);
 
         if (headers == NULL) {
@@ -195,6 +202,13 @@
         } break;
         case SET_DATA_SOURCE_URL: {
             CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
+
+            sp<IMediaHTTPService> httpService;
+            if (data.readInt32()) {
+                httpService =
+                    interface_cast<IMediaHTTPService>(data.readStrongBinder());
+            }
+
             const char* srcUrl = data.readCString();
 
             KeyedVector<String8, String8> headers;
@@ -206,7 +220,8 @@
             }
 
             reply->writeInt32(
-                    setDataSource(srcUrl, numHeaders > 0 ? &headers : NULL));
+                    setDataSource(
+                        httpService, srcUrl, numHeaders > 0 ? &headers : NULL));
 
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index e79bcd2..d778d05 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -21,6 +21,7 @@
 
 #include <binder/Parcel.h>
 
+#include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayer.h>
 #include <media/IStreamSource.h>
 
@@ -75,11 +76,17 @@
         remote()->transact(DISCONNECT, data, &reply);
     }
 
-    status_t setDataSource(const char* url,
+    status_t setDataSource(
+            const sp<IMediaHTTPService> &httpService,
+            const char* url,
             const KeyedVector<String8, String8>* headers)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        data.writeInt32(httpService != NULL);
+        if (httpService != NULL) {
+            data.writeStrongBinder(httpService->asBinder());
+        }
         data.writeCString(url);
         if (headers == NULL) {
             data.writeInt32(0);
@@ -355,6 +362,13 @@
         } break;
         case SET_DATA_SOURCE_URL: {
             CHECK_INTERFACE(IMediaPlayer, data, reply);
+
+            sp<IMediaHTTPService> httpService;
+            if (data.readInt32()) {
+                httpService =
+                    interface_cast<IMediaHTTPService>(data.readStrongBinder());
+            }
+
             const char* url = data.readCString();
             KeyedVector<String8, String8> headers;
             int32_t numHeaders = data.readInt32();
@@ -363,7 +377,8 @@
                 String8 value = data.readString8();
                 headers.add(key, value);
             }
-            reply->writeInt32(setDataSource(url, numHeaders > 0 ? &headers : NULL));
+            reply->writeInt32(setDataSource(
+                        httpService, url, numHeaders > 0 ? &headers : NULL));
             return NO_ERROR;
         } break;
         case SET_DATA_SOURCE_FD: {
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index 3c22b4c..d116b14 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -23,6 +23,7 @@
 #include <media/ICrypto.h>
 #include <media/IDrm.h>
 #include <media/IHDCP.h>
+#include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayerService.h>
 #include <media/IMediaRecorder.h>
 #include <media/IOMX.h>
@@ -48,7 +49,6 @@
     ADD_BATTERY_DATA,
     PULL_BATTERY_DATA,
     LISTEN_FOR_REMOTE_DISPLAY,
-    UPDATE_PROXY_CONFIG,
 };
 
 class BpMediaPlayerService: public BpInterface<IMediaPlayerService>
@@ -86,12 +86,21 @@
         return interface_cast<IMediaRecorder>(reply.readStrongBinder());
     }
 
-    virtual status_t decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
-                               audio_format_t* pFormat,
-                               const sp<IMemoryHeap>& heap, size_t *pSize)
+    virtual status_t decode(
+            const sp<IMediaHTTPService> &httpService,
+            const char* url,
+            uint32_t *pSampleRate,
+            int* pNumChannels,
+            audio_format_t* pFormat,
+            const sp<IMemoryHeap>& heap,
+            size_t *pSize)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
+        data.writeInt32(httpService != NULL);
+        if (httpService != NULL) {
+            data.writeStrongBinder(httpService->asBinder());
+        }
         data.writeCString(url);
         data.writeStrongBinder(heap->asBinder());
         status_t status = remote()->transact(DECODE_URL, data, &reply);
@@ -182,25 +191,6 @@
         remote()->transact(LISTEN_FOR_REMOTE_DISPLAY, data, &reply);
         return interface_cast<IRemoteDisplay>(reply.readStrongBinder());
     }
-
-    virtual status_t updateProxyConfig(
-            const char *host, int32_t port, const char *exclusionList) {
-        Parcel data, reply;
-
-        data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
-        if (host == NULL) {
-            data.writeInt32(0);
-        } else {
-            data.writeInt32(1);
-            data.writeCString(host);
-            data.writeInt32(port);
-            data.writeCString(exclusionList);
-        }
-
-        remote()->transact(UPDATE_PROXY_CONFIG, data, &reply);
-
-        return reply.readInt32();
-    }
 };
 
 IMPLEMENT_META_INTERFACE(MediaPlayerService, "android.media.IMediaPlayerService");
@@ -222,13 +212,25 @@
         } break;
         case DECODE_URL: {
             CHECK_INTERFACE(IMediaPlayerService, data, reply);
+            sp<IMediaHTTPService> httpService;
+            if (data.readInt32()) {
+                httpService =
+                    interface_cast<IMediaHTTPService>(data.readStrongBinder());
+            }
             const char* url = data.readCString();
             sp<IMemoryHeap> heap = interface_cast<IMemoryHeap>(data.readStrongBinder());
             uint32_t sampleRate;
             int numChannels;
             audio_format_t format;
             size_t size;
-            status_t status = decode(url, &sampleRate, &numChannels, &format, heap, &size);
+            status_t status =
+                decode(httpService,
+                       url,
+                       &sampleRate,
+                       &numChannels,
+                       &format,
+                       heap,
+                       &size);
             reply->writeInt32(status);
             if (status == NO_ERROR) {
                 reply->writeInt32(sampleRate);
@@ -316,24 +318,6 @@
             reply->writeStrongBinder(display->asBinder());
             return NO_ERROR;
         } break;
-        case UPDATE_PROXY_CONFIG:
-        {
-            CHECK_INTERFACE(IMediaPlayerService, data, reply);
-
-            const char *host = NULL;
-            int32_t port = 0;
-            const char *exclusionList = NULL;
-
-            if (data.readInt32()) {
-                host = data.readCString();
-                port = data.readInt32();
-                exclusionList = data.readCString();
-            }
-
-            reply->writeInt32(updateProxyConfig(host, port, exclusionList));
-
-            return OK;
-        }
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index e914b34..f0f1832 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -90,7 +90,7 @@
             pLibConfig->sampleRate,
             AUDIO_FORMAT_PCM_16_BIT,
             audio_channel_out_mask_from_count(pLibConfig->numChannels),
-            mTrackBufferSize,
+            (size_t) mTrackBufferSize,
             AUDIO_OUTPUT_FLAG_NONE);
 
     // create render and playback thread
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 8319cd7..1074da9 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -81,8 +81,14 @@
     {"timelapseqvga", CAMCORDER_QUALITY_TIME_LAPSE_QVGA},
 };
 
+#if LOG_NDEBUG
+#define UNUSED __unused
+#else
+#define UNUSED
+#endif
+
 /*static*/ void
-MediaProfiles::logVideoCodec(const MediaProfiles::VideoCodec& codec)
+MediaProfiles::logVideoCodec(const MediaProfiles::VideoCodec& codec UNUSED)
 {
     ALOGV("video codec:");
     ALOGV("codec = %d", codec.mCodec);
@@ -93,7 +99,7 @@
 }
 
 /*static*/ void
-MediaProfiles::logAudioCodec(const MediaProfiles::AudioCodec& codec)
+MediaProfiles::logAudioCodec(const MediaProfiles::AudioCodec& codec UNUSED)
 {
     ALOGV("audio codec:");
     ALOGV("codec = %d", codec.mCodec);
@@ -103,7 +109,7 @@
 }
 
 /*static*/ void
-MediaProfiles::logVideoEncoderCap(const MediaProfiles::VideoEncoderCap& cap)
+MediaProfiles::logVideoEncoderCap(const MediaProfiles::VideoEncoderCap& cap UNUSED)
 {
     ALOGV("video encoder cap:");
     ALOGV("codec = %d", cap.mCodec);
@@ -114,7 +120,7 @@
 }
 
 /*static*/ void
-MediaProfiles::logAudioEncoderCap(const MediaProfiles::AudioEncoderCap& cap)
+MediaProfiles::logAudioEncoderCap(const MediaProfiles::AudioEncoderCap& cap UNUSED)
 {
     ALOGV("audio encoder cap:");
     ALOGV("codec = %d", cap.mCodec);
@@ -124,21 +130,21 @@
 }
 
 /*static*/ void
-MediaProfiles::logVideoDecoderCap(const MediaProfiles::VideoDecoderCap& cap)
+MediaProfiles::logVideoDecoderCap(const MediaProfiles::VideoDecoderCap& cap UNUSED)
 {
     ALOGV("video decoder cap:");
     ALOGV("codec = %d", cap.mCodec);
 }
 
 /*static*/ void
-MediaProfiles::logAudioDecoderCap(const MediaProfiles::AudioDecoderCap& cap)
+MediaProfiles::logAudioDecoderCap(const MediaProfiles::AudioDecoderCap& cap UNUSED)
 {
     ALOGV("audio codec cap:");
     ALOGV("codec = %d", cap.mCodec);
 }
 
 /*static*/ void
-MediaProfiles::logVideoEditorCap(const MediaProfiles::VideoEditorCap& cap)
+MediaProfiles::logVideoEditorCap(const MediaProfiles::VideoEditorCap& cap UNUSED)
 {
     ALOGV("videoeditor cap:");
     ALOGV("mMaxInputFrameWidth = %d", cap.mMaxInputFrameWidth);
diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp
index 93a4a4c..1661f04 100644
--- a/media/libmedia/MediaScannerClient.cpp
+++ b/media/libmedia/MediaScannerClient.cpp
@@ -14,217 +14,57 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaScannerClient"
+#include <utils/Log.h>
+
 #include <media/mediascanner.h>
 
+#include "CharacterEncodingDetector.h"
 #include "StringArray.h"
 
-#include "autodetect.h"
-#include "unicode/ucnv.h"
-#include "unicode/ustring.h"
-
 namespace android {
 
 MediaScannerClient::MediaScannerClient()
-    :   mNames(NULL),
-        mValues(NULL),
-        mLocaleEncoding(kEncodingNone)
+    :   mEncodingDetector(NULL)
 {
 }
 
 MediaScannerClient::~MediaScannerClient()
 {
-    delete mNames;
-    delete mValues;
+    delete mEncodingDetector;
 }
 
 void MediaScannerClient::setLocale(const char* locale)
 {
-    if (!locale) return;
-
-    if (!strncmp(locale, "ja", 2))
-        mLocaleEncoding = kEncodingShiftJIS;
-    else if (!strncmp(locale, "ko", 2))
-        mLocaleEncoding = kEncodingEUCKR;
-    else if (!strncmp(locale, "zh", 2)) {
-        if (!strcmp(locale, "zh_CN")) {
-            // simplified chinese for mainland China
-            mLocaleEncoding = kEncodingGBK;
-        } else {
-            // assume traditional for non-mainland Chinese locales (Taiwan, Hong Kong, Singapore)
-            mLocaleEncoding = kEncodingBig5;
-        }
-    }
+    mLocale = locale; // not currently used
 }
 
 void MediaScannerClient::beginFile()
 {
-    mNames = new StringArray;
-    mValues = new StringArray;
+    delete mEncodingDetector;
+    mEncodingDetector = new CharacterEncodingDetector();
 }
 
 status_t MediaScannerClient::addStringTag(const char* name, const char* value)
 {
-    if (mLocaleEncoding != kEncodingNone) {
-        // don't bother caching strings that are all ASCII.
-        // call handleStringTag directly instead.
-        // check to see if value (which should be utf8) has any non-ASCII characters
-        bool nonAscii = false;
-        const char* chp = value;
-        char ch;
-        while ((ch = *chp++)) {
-            if (ch & 0x80) {
-                nonAscii = true;
-                break;
-            }
-        }
-
-        if (nonAscii) {
-            // save the strings for later so they can be used for native encoding detection
-            mNames->push_back(name);
-            mValues->push_back(value);
-            return OK;
-        }
-        // else fall through
-    }
-
-    // autodetection is not necessary, so no need to cache the values
-    // pass directly to the client instead
-    return handleStringTag(name, value);
-}
-
-static uint32_t possibleEncodings(const char* s)
-{
-    uint32_t result = kEncodingAll;
-    // if s contains a native encoding, then it was mistakenly encoded in utf8 as if it were latin-1
-    // so we need to reverse the latin-1 -> utf8 conversion to get the native chars back
-    uint8_t ch1, ch2;
-    uint8_t* chp = (uint8_t *)s;
-
-    while ((ch1 = *chp++)) {
-        if (ch1 & 0x80) {
-            ch2 = *chp++;
-            ch1 = ((ch1 << 6) & 0xC0) | (ch2 & 0x3F);
-            // ch1 is now the first byte of the potential native char
-
-            ch2 = *chp++;
-            if (ch2 & 0x80)
-                ch2 = ((ch2 << 6) & 0xC0) | (*chp++ & 0x3F);
-            // ch2 is now the second byte of the potential native char
-            int ch = (int)ch1 << 8 | (int)ch2;
-            result &= findPossibleEncodings(ch);
-        }
-        // else ASCII character, which could be anything
-    }
-
-    return result;
-}
-
-void MediaScannerClient::convertValues(uint32_t encoding)
-{
-    const char* enc = NULL;
-    switch (encoding) {
-        case kEncodingShiftJIS:
-            enc = "shift-jis";
-            break;
-        case kEncodingGBK:
-            enc = "gbk";
-            break;
-        case kEncodingBig5:
-            enc = "Big5";
-            break;
-        case kEncodingEUCKR:
-            enc = "EUC-KR";
-            break;
-    }
-
-    if (enc) {
-        UErrorCode status = U_ZERO_ERROR;
-
-        UConverter *conv = ucnv_open(enc, &status);
-        if (U_FAILURE(status)) {
-            ALOGE("could not create UConverter for %s", enc);
-            return;
-        }
-        UConverter *utf8Conv = ucnv_open("UTF-8", &status);
-        if (U_FAILURE(status)) {
-            ALOGE("could not create UConverter for UTF-8");
-            ucnv_close(conv);
-            return;
-        }
-
-        // for each value string, convert from native encoding to UTF-8
-        for (int i = 0; i < mNames->size(); i++) {
-            // first we need to untangle the utf8 and convert it back to the original bytes
-            // since we are reducing the length of the string, we can do this in place
-            uint8_t* src = (uint8_t *)mValues->getEntry(i);
-            int len = strlen((char *)src);
-            uint8_t* dest = src;
-
-            uint8_t uch;
-            while ((uch = *src++)) {
-                if (uch & 0x80)
-                    *dest++ = ((uch << 6) & 0xC0) | (*src++ & 0x3F);
-                else
-                    *dest++ = uch;
-            }
-            *dest = 0;
-
-            // now convert from native encoding to UTF-8
-            const char* source = mValues->getEntry(i);
-            int targetLength = len * 3 + 1;
-            char* buffer = new char[targetLength];
-            // don't normally check for NULL, but in this case targetLength may be large
-            if (!buffer)
-                break;
-            char* target = buffer;
-
-            ucnv_convertEx(utf8Conv, conv, &target, target + targetLength,
-                    &source, (const char *)dest, NULL, NULL, NULL, NULL, TRUE, TRUE, &status);
-            if (U_FAILURE(status)) {
-                ALOGE("ucnv_convertEx failed: %d", status);
-                mValues->setEntry(i, "???");
-            } else {
-                // zero terminate
-                *target = 0;
-                mValues->setEntry(i, buffer);
-            }
-
-            delete[] buffer;
-        }
-
-        ucnv_close(conv);
-        ucnv_close(utf8Conv);
-    }
+    mEncodingDetector->addTag(name, value);
+    return OK;
 }
 
 void MediaScannerClient::endFile()
 {
-    if (mLocaleEncoding != kEncodingNone) {
-        int size = mNames->size();
-        uint32_t encoding = kEncodingAll;
+    mEncodingDetector->detectAndConvert();
 
-        // compute a bit mask containing all possible encodings
-        for (int i = 0; i < mNames->size(); i++)
-            encoding &= possibleEncodings(mValues->getEntry(i));
-
-        // if the locale encoding matches, then assume we have a native encoding.
-        if (encoding & mLocaleEncoding)
-            convertValues(mLocaleEncoding);
-
-        // finally, push all name/value pairs to the client
-        for (int i = 0; i < mNames->size(); i++) {
-            status_t status = handleStringTag(mNames->getEntry(i), mValues->getEntry(i));
-            if (status) {
-                break;
-            }
+    int size = mEncodingDetector->size();
+    if (size) {
+        for (int i = 0; i < size; i++) {
+            const char *name;
+            const char *value;
+            mEncodingDetector->getTag(i, &name, &value);
+            handleStringTag(name, value);
         }
     }
-    // else addStringTag() has done all the work so we have nothing to do
-
-    delete mNames;
-    delete mValues;
-    mNames = NULL;
-    mValues = NULL;
 }
 
 }  // namespace android
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index 22e9fad..a55e09c 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -21,6 +21,7 @@
 #define USE_SHARED_MEM_BUFFER
 
 #include <media/AudioTrack.h>
+#include <media/IMediaHTTPService.h>
 #include <media/mediaplayer.h>
 #include <media/SoundPool.h>
 #include "SoundPoolThread.h"
@@ -199,7 +200,7 @@
     return NULL;
 }
 
-int SoundPool::load(const char* path, int priority)
+int SoundPool::load(const char* path, int priority __unused)
 {
     ALOGV("load: path=%s, priority=%d", path, priority);
     Mutex::Autolock lock(&mLock);
@@ -209,7 +210,7 @@
     return sample->sampleID();
 }
 
-int SoundPool::load(int fd, int64_t offset, int64_t length, int priority)
+int SoundPool::load(int fd, int64_t offset, int64_t length, int priority __unused)
 {
     ALOGV("load: fd=%d, offset=%lld, length=%lld, priority=%d",
             fd, offset, length, priority);
@@ -496,7 +497,14 @@
 
     ALOGV("Start decode");
     if (mUrl) {
-        status = MediaPlayer::decode(mUrl, &sampleRate, &numChannels, &format, mHeap, &mSize);
+        status = MediaPlayer::decode(
+                NULL /* httpService */,
+                mUrl,
+                &sampleRate,
+                &numChannels,
+                &format,
+                mHeap,
+                &mSize);
     } else {
         status = MediaPlayer::decode(mFd, mOffset, mLength, &sampleRate, &numChannels, &format,
                                      mHeap, &mSize);
@@ -579,7 +587,7 @@
         uint32_t sampleRate = uint32_t(float(sample->sampleRate()) * rate + 0.5);
         uint32_t totalFrames = (kDefaultBufferCount * afFrameCount * sampleRate) / afSampleRate;
         uint32_t bufferFrames = (totalFrames + (kDefaultBufferCount - 1)) / kDefaultBufferCount;
-        uint32_t frameCount = 0;
+        size_t frameCount = 0;
 
         if (loop) {
             frameCount = sample->size()/numChannels/
@@ -600,16 +608,15 @@
         // wrong audio audio buffer size  (mAudioBufferSize)
         unsigned long toggle = mToggle ^ 1;
         void *userData = (void *)((unsigned long)this | toggle);
-        uint32_t channels = (numChannels == 2) ?
-                AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO;
+        audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(numChannels);
 
         // do not create a new audio track if current track is compatible with sample parameters
 #ifdef USE_SHARED_MEM_BUFFER
         newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
-                channels, sample->getIMemory(), AUDIO_OUTPUT_FLAG_FAST, callback, userData);
+                channelMask, sample->getIMemory(), AUDIO_OUTPUT_FLAG_FAST, callback, userData);
 #else
         newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
-                channels, frameCount, AUDIO_OUTPUT_FLAG_FAST, callback, userData,
+                channelMask, frameCount, AUDIO_OUTPUT_FLAG_FAST, callback, userData,
                 bufferFrames);
 #endif
         oldTrack = mAudioTrack;
@@ -730,7 +737,8 @@
                     count = b->size;
                 }
                 memcpy(q, p, count);
-//              ALOGV("fill: q=%p, p=%p, mPos=%u, b->size=%u, count=%d", q, p, mPos, b->size, count);
+//              ALOGV("fill: q=%p, p=%p, mPos=%u, b->size=%u, count=%d", q, p, mPos, b->size,
+//                      count);
             } else if (mPos < mAudioBufferSize) {
                 count = mAudioBufferSize - mPos;
                 if (count > b->size) {
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index adef3be..61b6d36 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -1057,7 +1057,7 @@
                       0,    // notificationFrames
                       0,    // sharedBuffer
                       mThreadCanCallJava,
-                      0,    // sessionId
+                      AUDIO_SESSION_ALLOCATE,
                       AudioTrack::TRANSFER_CALLBACK);
 
     if (mpAudioTrack->initCheck() != NO_ERROR) {
diff --git a/media/libmedia/autodetect.cpp b/media/libmedia/autodetect.cpp
deleted file mode 100644
index be5c3b2..0000000
--- a/media/libmedia/autodetect.cpp
+++ /dev/null
@@ -1,885 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "autodetect.h"
-
-struct CharRange {
-    uint16_t first;
-    uint16_t last;
-};
-
-#define ARRAY_SIZE(x)   (sizeof(x) / sizeof(*x))
-
-// generated from http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP932.TXT
-static const CharRange kShiftJISRanges[] = {
-    { 0x8140, 0x817E },
-    { 0x8180, 0x81AC },
-    { 0x81B8, 0x81BF },
-    { 0x81C8, 0x81CE },
-    { 0x81DA, 0x81E8 },
-    { 0x81F0, 0x81F7 },
-    { 0x81FC, 0x81FC },
-    { 0x824F, 0x8258 },
-    { 0x8260, 0x8279 },
-    { 0x8281, 0x829A },
-    { 0x829F, 0x82F1 },
-    { 0x8340, 0x837E },
-    { 0x8380, 0x8396 },
-    { 0x839F, 0x83B6 },
-    { 0x83BF, 0x83D6 },
-    { 0x8440, 0x8460 },
-    { 0x8470, 0x847E },
-    { 0x8480, 0x8491 },
-    { 0x849F, 0x84BE },
-    { 0x8740, 0x875D },
-    { 0x875F, 0x8775 },
-    { 0x877E, 0x877E },
-    { 0x8780, 0x879C },
-    { 0x889F, 0x88FC },
-    { 0x8940, 0x897E },
-    { 0x8980, 0x89FC },
-    { 0x8A40, 0x8A7E },
-    { 0x8A80, 0x8AFC },
-    { 0x8B40, 0x8B7E },
-    { 0x8B80, 0x8BFC },
-    { 0x8C40, 0x8C7E },
-    { 0x8C80, 0x8CFC },
-    { 0x8D40, 0x8D7E },
-    { 0x8D80, 0x8DFC },
-    { 0x8E40, 0x8E7E },
-    { 0x8E80, 0x8EFC },
-    { 0x8F40, 0x8F7E },
-    { 0x8F80, 0x8FFC },
-    { 0x9040, 0x907E },
-    { 0x9080, 0x90FC },
-    { 0x9140, 0x917E },
-    { 0x9180, 0x91FC },
-    { 0x9240, 0x927E },
-    { 0x9280, 0x92FC },
-    { 0x9340, 0x937E },
-    { 0x9380, 0x93FC },
-    { 0x9440, 0x947E },
-    { 0x9480, 0x94FC },
-    { 0x9540, 0x957E },
-    { 0x9580, 0x95FC },
-    { 0x9640, 0x967E },
-    { 0x9680, 0x96FC },
-    { 0x9740, 0x977E },
-    { 0x9780, 0x97FC },
-    { 0x9840, 0x9872 },
-    { 0x989F, 0x98FC },
-    { 0x9940, 0x997E },
-    { 0x9980, 0x99FC },
-    { 0x9A40, 0x9A7E },
-    { 0x9A80, 0x9AFC },
-    { 0x9B40, 0x9B7E },
-    { 0x9B80, 0x9BFC },
-    { 0x9C40, 0x9C7E },
-    { 0x9C80, 0x9CFC },
-    { 0x9D40, 0x9D7E },
-    { 0x9D80, 0x9DFC },
-    { 0x9E40, 0x9E7E },
-    { 0x9E80, 0x9EFC },
-    { 0x9F40, 0x9F7E },
-    { 0x9F80, 0x9FFC },
-    { 0xE040, 0xE07E },
-    { 0xE080, 0xE0FC },
-    { 0xE140, 0xE17E },
-    { 0xE180, 0xE1FC },
-    { 0xE240, 0xE27E },
-    { 0xE280, 0xE2FC },
-    { 0xE340, 0xE37E },
-    { 0xE380, 0xE3FC },
-    { 0xE440, 0xE47E },
-    { 0xE480, 0xE4FC },
-    { 0xE540, 0xE57E },
-    { 0xE580, 0xE5FC },
-    { 0xE640, 0xE67E },
-    { 0xE680, 0xE6FC },
-    { 0xE740, 0xE77E },
-    { 0xE780, 0xE7FC },
-    { 0xE840, 0xE87E },
-    { 0xE880, 0xE8FC },
-    { 0xE940, 0xE97E },
-    { 0xE980, 0xE9FC },
-    { 0xEA40, 0xEA7E },
-    { 0xEA80, 0xEAA4 },
-    { 0xED40, 0xED7E },
-    { 0xED80, 0xEDFC },
-    { 0xEE40, 0xEE7E },
-    { 0xEE80, 0xEEEC },
-    { 0xEEEF, 0xEEFC },
-    { 0xFA40, 0xFA7E },
-    { 0xFA80, 0xFAFC },
-    { 0xFB40, 0xFB7E },
-    { 0xFB80, 0xFBFC },
-    { 0xFC40, 0xFC4B },
-};
-
-// generated from http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP936.TXT
-static const CharRange kGBKRanges[] = {
-    { 0x8140, 0x817E },
-    { 0x8180, 0x81FE },
-    { 0x8240, 0x827E },
-    { 0x8280, 0x82FE },
-    { 0x8340, 0x837E },
-    { 0x8380, 0x83FE },
-    { 0x8440, 0x847E },
-    { 0x8480, 0x84FE },
-    { 0x8540, 0x857E },
-    { 0x8580, 0x85FE },
-    { 0x8640, 0x867E },
-    { 0x8680, 0x86FE },
-    { 0x8740, 0x877E },
-    { 0x8780, 0x87FE },
-    { 0x8840, 0x887E },
-    { 0x8880, 0x88FE },
-    { 0x8940, 0x897E },
-    { 0x8980, 0x89FE },
-    { 0x8A40, 0x8A7E },
-    { 0x8A80, 0x8AFE },
-    { 0x8B40, 0x8B7E },
-    { 0x8B80, 0x8BFE },
-    { 0x8C40, 0x8C7E },
-    { 0x8C80, 0x8CFE },
-    { 0x8D40, 0x8D7E },
-    { 0x8D80, 0x8DFE },
-    { 0x8E40, 0x8E7E },
-    { 0x8E80, 0x8EFE },
-    { 0x8F40, 0x8F7E },
-    { 0x8F80, 0x8FFE },
-    { 0x9040, 0x907E },
-    { 0x9080, 0x90FE },
-    { 0x9140, 0x917E },
-    { 0x9180, 0x91FE },
-    { 0x9240, 0x927E },
-    { 0x9280, 0x92FE },
-    { 0x9340, 0x937E },
-    { 0x9380, 0x93FE },
-    { 0x9440, 0x947E },
-    { 0x9480, 0x94FE },
-    { 0x9540, 0x957E },
-    { 0x9580, 0x95FE },
-    { 0x9640, 0x967E },
-    { 0x9680, 0x96FE },
-    { 0x9740, 0x977E },
-    { 0x9780, 0x97FE },
-    { 0x9840, 0x987E },
-    { 0x9880, 0x98FE },
-    { 0x9940, 0x997E },
-    { 0x9980, 0x99FE },
-    { 0x9A40, 0x9A7E },
-    { 0x9A80, 0x9AFE },
-    { 0x9B40, 0x9B7E },
-    { 0x9B80, 0x9BFE },
-    { 0x9C40, 0x9C7E },
-    { 0x9C80, 0x9CFE },
-    { 0x9D40, 0x9D7E },
-    { 0x9D80, 0x9DFE },
-    { 0x9E40, 0x9E7E },
-    { 0x9E80, 0x9EFE },
-    { 0x9F40, 0x9F7E },
-    { 0x9F80, 0x9FFE },
-    { 0xA040, 0xA07E },
-    { 0xA080, 0xA0FE },
-    { 0xA1A1, 0xA1FE },
-    { 0xA2A1, 0xA2AA },
-    { 0xA2B1, 0xA2E2 },
-    { 0xA2E5, 0xA2EE },
-    { 0xA2F1, 0xA2FC },
-    { 0xA3A1, 0xA3FE },
-    { 0xA4A1, 0xA4F3 },
-    { 0xA5A1, 0xA5F6 },
-    { 0xA6A1, 0xA6B8 },
-    { 0xA6C1, 0xA6D8 },
-    { 0xA6E0, 0xA6EB },
-    { 0xA6EE, 0xA6F2 },
-    { 0xA6F4, 0xA6F5 },
-    { 0xA7A1, 0xA7C1 },
-    { 0xA7D1, 0xA7F1 },
-    { 0xA840, 0xA87E },
-    { 0xA880, 0xA895 },
-    { 0xA8A1, 0xA8BB },
-    { 0xA8BD, 0xA8BE },
-    { 0xA8C0, 0xA8C0 },
-    { 0xA8C5, 0xA8E9 },
-    { 0xA940, 0xA957 },
-    { 0xA959, 0xA95A },
-    { 0xA95C, 0xA95C },
-    { 0xA960, 0xA97E },
-    { 0xA980, 0xA988 },
-    { 0xA996, 0xA996 },
-    { 0xA9A4, 0xA9EF },
-    { 0xAA40, 0xAA7E },
-    { 0xAA80, 0xAAA0 },
-    { 0xAB40, 0xAB7E },
-    { 0xAB80, 0xABA0 },
-    { 0xAC40, 0xAC7E },
-    { 0xAC80, 0xACA0 },
-    { 0xAD40, 0xAD7E },
-    { 0xAD80, 0xADA0 },
-    { 0xAE40, 0xAE7E },
-    { 0xAE80, 0xAEA0 },
-    { 0xAF40, 0xAF7E },
-    { 0xAF80, 0xAFA0 },
-    { 0xB040, 0xB07E },
-    { 0xB080, 0xB0FE },
-    { 0xB140, 0xB17E },
-    { 0xB180, 0xB1FE },
-    { 0xB240, 0xB27E },
-    { 0xB280, 0xB2FE },
-    { 0xB340, 0xB37E },
-    { 0xB380, 0xB3FE },
-    { 0xB440, 0xB47E },
-    { 0xB480, 0xB4FE },
-    { 0xB540, 0xB57E },
-    { 0xB580, 0xB5FE },
-    { 0xB640, 0xB67E },
-    { 0xB680, 0xB6FE },
-    { 0xB740, 0xB77E },
-    { 0xB780, 0xB7FE },
-    { 0xB840, 0xB87E },
-    { 0xB880, 0xB8FE },
-    { 0xB940, 0xB97E },
-    { 0xB980, 0xB9FE },
-    { 0xBA40, 0xBA7E },
-    { 0xBA80, 0xBAFE },
-    { 0xBB40, 0xBB7E },
-    { 0xBB80, 0xBBFE },
-    { 0xBC40, 0xBC7E },
-    { 0xBC80, 0xBCFE },
-    { 0xBD40, 0xBD7E },
-    { 0xBD80, 0xBDFE },
-    { 0xBE40, 0xBE7E },
-    { 0xBE80, 0xBEFE },
-    { 0xBF40, 0xBF7E },
-    { 0xBF80, 0xBFFE },
-    { 0xC040, 0xC07E },
-    { 0xC080, 0xC0FE },
-    { 0xC140, 0xC17E },
-    { 0xC180, 0xC1FE },
-    { 0xC240, 0xC27E },
-    { 0xC280, 0xC2FE },
-    { 0xC340, 0xC37E },
-    { 0xC380, 0xC3FE },
-    { 0xC440, 0xC47E },
-    { 0xC480, 0xC4FE },
-    { 0xC540, 0xC57E },
-    { 0xC580, 0xC5FE },
-    { 0xC640, 0xC67E },
-    { 0xC680, 0xC6FE },
-    { 0xC740, 0xC77E },
-    { 0xC780, 0xC7FE },
-    { 0xC840, 0xC87E },
-    { 0xC880, 0xC8FE },
-    { 0xC940, 0xC97E },
-    { 0xC980, 0xC9FE },
-    { 0xCA40, 0xCA7E },
-    { 0xCA80, 0xCAFE },
-    { 0xCB40, 0xCB7E },
-    { 0xCB80, 0xCBFE },
-    { 0xCC40, 0xCC7E },
-    { 0xCC80, 0xCCFE },
-    { 0xCD40, 0xCD7E },
-    { 0xCD80, 0xCDFE },
-    { 0xCE40, 0xCE7E },
-    { 0xCE80, 0xCEFE },
-    { 0xCF40, 0xCF7E },
-    { 0xCF80, 0xCFFE },
-    { 0xD040, 0xD07E },
-    { 0xD080, 0xD0FE },
-    { 0xD140, 0xD17E },
-    { 0xD180, 0xD1FE },
-    { 0xD240, 0xD27E },
-    { 0xD280, 0xD2FE },
-    { 0xD340, 0xD37E },
-    { 0xD380, 0xD3FE },
-    { 0xD440, 0xD47E },
-    { 0xD480, 0xD4FE },
-    { 0xD540, 0xD57E },
-    { 0xD580, 0xD5FE },
-    { 0xD640, 0xD67E },
-    { 0xD680, 0xD6FE },
-    { 0xD740, 0xD77E },
-    { 0xD780, 0xD7F9 },
-    { 0xD840, 0xD87E },
-    { 0xD880, 0xD8FE },
-    { 0xD940, 0xD97E },
-    { 0xD980, 0xD9FE },
-    { 0xDA40, 0xDA7E },
-    { 0xDA80, 0xDAFE },
-    { 0xDB40, 0xDB7E },
-    { 0xDB80, 0xDBFE },
-    { 0xDC40, 0xDC7E },
-    { 0xDC80, 0xDCFE },
-    { 0xDD40, 0xDD7E },
-    { 0xDD80, 0xDDFE },
-    { 0xDE40, 0xDE7E },
-    { 0xDE80, 0xDEFE },
-    { 0xDF40, 0xDF7E },
-    { 0xDF80, 0xDFFE },
-    { 0xE040, 0xE07E },
-    { 0xE080, 0xE0FE },
-    { 0xE140, 0xE17E },
-    { 0xE180, 0xE1FE },
-    { 0xE240, 0xE27E },
-    { 0xE280, 0xE2FE },
-    { 0xE340, 0xE37E },
-    { 0xE380, 0xE3FE },
-    { 0xE440, 0xE47E },
-    { 0xE480, 0xE4FE },
-    { 0xE540, 0xE57E },
-    { 0xE580, 0xE5FE },
-    { 0xE640, 0xE67E },
-    { 0xE680, 0xE6FE },
-    { 0xE740, 0xE77E },
-    { 0xE780, 0xE7FE },
-    { 0xE840, 0xE87E },
-    { 0xE880, 0xE8FE },
-    { 0xE940, 0xE97E },
-    { 0xE980, 0xE9FE },
-    { 0xEA40, 0xEA7E },
-    { 0xEA80, 0xEAFE },
-    { 0xEB40, 0xEB7E },
-    { 0xEB80, 0xEBFE },
-    { 0xEC40, 0xEC7E },
-    { 0xEC80, 0xECFE },
-    { 0xED40, 0xED7E },
-    { 0xED80, 0xEDFE },
-    { 0xEE40, 0xEE7E },
-    { 0xEE80, 0xEEFE },
-    { 0xEF40, 0xEF7E },
-    { 0xEF80, 0xEFFE },
-    { 0xF040, 0xF07E },
-    { 0xF080, 0xF0FE },
-    { 0xF140, 0xF17E },
-    { 0xF180, 0xF1FE },
-    { 0xF240, 0xF27E },
-    { 0xF280, 0xF2FE },
-    { 0xF340, 0xF37E },
-    { 0xF380, 0xF3FE },
-    { 0xF440, 0xF47E },
-    { 0xF480, 0xF4FE },
-    { 0xF540, 0xF57E },
-    { 0xF580, 0xF5FE },
-    { 0xF640, 0xF67E },
-    { 0xF680, 0xF6FE },
-    { 0xF740, 0xF77E },
-    { 0xF780, 0xF7FE },
-    { 0xF840, 0xF87E },
-    { 0xF880, 0xF8A0 },
-    { 0xF940, 0xF97E },
-    { 0xF980, 0xF9A0 },
-    { 0xFA40, 0xFA7E },
-    { 0xFA80, 0xFAA0 },
-    { 0xFB40, 0xFB7E },
-    { 0xFB80, 0xFBA0 },
-    { 0xFC40, 0xFC7E },
-    { 0xFC80, 0xFCA0 },
-    { 0xFD40, 0xFD7E },
-    { 0xFD80, 0xFDA0 },
-    { 0xFE40, 0xFE4F },
-};
-
-// generated from http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP949.TXT
-static const CharRange kEUCKRRanges[] = {
-    { 0x8141, 0x815A },
-    { 0x8161, 0x817A },
-    { 0x8181, 0x81FE },
-    { 0x8241, 0x825A },
-    { 0x8261, 0x827A },
-    { 0x8281, 0x82FE },
-    { 0x8341, 0x835A },
-    { 0x8361, 0x837A },
-    { 0x8381, 0x83FE },
-    { 0x8441, 0x845A },
-    { 0x8461, 0x847A },
-    { 0x8481, 0x84FE },
-    { 0x8541, 0x855A },
-    { 0x8561, 0x857A },
-    { 0x8581, 0x85FE },
-    { 0x8641, 0x865A },
-    { 0x8661, 0x867A },
-    { 0x8681, 0x86FE },
-    { 0x8741, 0x875A },
-    { 0x8761, 0x877A },
-    { 0x8781, 0x87FE },
-    { 0x8841, 0x885A },
-    { 0x8861, 0x887A },
-    { 0x8881, 0x88FE },
-    { 0x8941, 0x895A },
-    { 0x8961, 0x897A },
-    { 0x8981, 0x89FE },
-    { 0x8A41, 0x8A5A },
-    { 0x8A61, 0x8A7A },
-    { 0x8A81, 0x8AFE },
-    { 0x8B41, 0x8B5A },
-    { 0x8B61, 0x8B7A },
-    { 0x8B81, 0x8BFE },
-    { 0x8C41, 0x8C5A },
-    { 0x8C61, 0x8C7A },
-    { 0x8C81, 0x8CFE },
-    { 0x8D41, 0x8D5A },
-    { 0x8D61, 0x8D7A },
-    { 0x8D81, 0x8DFE },
-    { 0x8E41, 0x8E5A },
-    { 0x8E61, 0x8E7A },
-    { 0x8E81, 0x8EFE },
-    { 0x8F41, 0x8F5A },
-    { 0x8F61, 0x8F7A },
-    { 0x8F81, 0x8FFE },
-    { 0x9041, 0x905A },
-    { 0x9061, 0x907A },
-    { 0x9081, 0x90FE },
-    { 0x9141, 0x915A },
-    { 0x9161, 0x917A },
-    { 0x9181, 0x91FE },
-    { 0x9241, 0x925A },
-    { 0x9261, 0x927A },
-    { 0x9281, 0x92FE },
-    { 0x9341, 0x935A },
-    { 0x9361, 0x937A },
-    { 0x9381, 0x93FE },
-    { 0x9441, 0x945A },
-    { 0x9461, 0x947A },
-    { 0x9481, 0x94FE },
-    { 0x9541, 0x955A },
-    { 0x9561, 0x957A },
-    { 0x9581, 0x95FE },
-    { 0x9641, 0x965A },
-    { 0x9661, 0x967A },
-    { 0x9681, 0x96FE },
-    { 0x9741, 0x975A },
-    { 0x9761, 0x977A },
-    { 0x9781, 0x97FE },
-    { 0x9841, 0x985A },
-    { 0x9861, 0x987A },
-    { 0x9881, 0x98FE },
-    { 0x9941, 0x995A },
-    { 0x9961, 0x997A },
-    { 0x9981, 0x99FE },
-    { 0x9A41, 0x9A5A },
-    { 0x9A61, 0x9A7A },
-    { 0x9A81, 0x9AFE },
-    { 0x9B41, 0x9B5A },
-    { 0x9B61, 0x9B7A },
-    { 0x9B81, 0x9BFE },
-    { 0x9C41, 0x9C5A },
-    { 0x9C61, 0x9C7A },
-    { 0x9C81, 0x9CFE },
-    { 0x9D41, 0x9D5A },
-    { 0x9D61, 0x9D7A },
-    { 0x9D81, 0x9DFE },
-    { 0x9E41, 0x9E5A },
-    { 0x9E61, 0x9E7A },
-    { 0x9E81, 0x9EFE },
-    { 0x9F41, 0x9F5A },
-    { 0x9F61, 0x9F7A },
-    { 0x9F81, 0x9FFE },
-    { 0xA041, 0xA05A },
-    { 0xA061, 0xA07A },
-    { 0xA081, 0xA0FE },
-    { 0xA141, 0xA15A },
-    { 0xA161, 0xA17A },
-    { 0xA181, 0xA1FE },
-    { 0xA241, 0xA25A },
-    { 0xA261, 0xA27A },
-    { 0xA281, 0xA2E7 },
-    { 0xA341, 0xA35A },
-    { 0xA361, 0xA37A },
-    { 0xA381, 0xA3FE },
-    { 0xA441, 0xA45A },
-    { 0xA461, 0xA47A },
-    { 0xA481, 0xA4FE },
-    { 0xA541, 0xA55A },
-    { 0xA561, 0xA57A },
-    { 0xA581, 0xA5AA },
-    { 0xA5B0, 0xA5B9 },
-    { 0xA5C1, 0xA5D8 },
-    { 0xA5E1, 0xA5F8 },
-    { 0xA641, 0xA65A },
-    { 0xA661, 0xA67A },
-    { 0xA681, 0xA6E4 },
-    { 0xA741, 0xA75A },
-    { 0xA761, 0xA77A },
-    { 0xA781, 0xA7EF },
-    { 0xA841, 0xA85A },
-    { 0xA861, 0xA87A },
-    { 0xA881, 0xA8A4 },
-    { 0xA8A6, 0xA8A6 },
-    { 0xA8A8, 0xA8AF },
-    { 0xA8B1, 0xA8FE },
-    { 0xA941, 0xA95A },
-    { 0xA961, 0xA97A },
-    { 0xA981, 0xA9FE },
-    { 0xAA41, 0xAA5A },
-    { 0xAA61, 0xAA7A },
-    { 0xAA81, 0xAAF3 },
-    { 0xAB41, 0xAB5A },
-    { 0xAB61, 0xAB7A },
-    { 0xAB81, 0xABF6 },
-    { 0xAC41, 0xAC5A },
-    { 0xAC61, 0xAC7A },
-    { 0xAC81, 0xACC1 },
-    { 0xACD1, 0xACF1 },
-    { 0xAD41, 0xAD5A },
-    { 0xAD61, 0xAD7A },
-    { 0xAD81, 0xADA0 },
-    { 0xAE41, 0xAE5A },
-    { 0xAE61, 0xAE7A },
-    { 0xAE81, 0xAEA0 },
-    { 0xAF41, 0xAF5A },
-    { 0xAF61, 0xAF7A },
-    { 0xAF81, 0xAFA0 },
-    { 0xB041, 0xB05A },
-    { 0xB061, 0xB07A },
-    { 0xB081, 0xB0FE },
-    { 0xB141, 0xB15A },
-    { 0xB161, 0xB17A },
-    { 0xB181, 0xB1FE },
-    { 0xB241, 0xB25A },
-    { 0xB261, 0xB27A },
-    { 0xB281, 0xB2FE },
-    { 0xB341, 0xB35A },
-    { 0xB361, 0xB37A },
-    { 0xB381, 0xB3FE },
-    { 0xB441, 0xB45A },
-    { 0xB461, 0xB47A },
-    { 0xB481, 0xB4FE },
-    { 0xB541, 0xB55A },
-    { 0xB561, 0xB57A },
-    { 0xB581, 0xB5FE },
-    { 0xB641, 0xB65A },
-    { 0xB661, 0xB67A },
-    { 0xB681, 0xB6FE },
-    { 0xB741, 0xB75A },
-    { 0xB761, 0xB77A },
-    { 0xB781, 0xB7FE },
-    { 0xB841, 0xB85A },
-    { 0xB861, 0xB87A },
-    { 0xB881, 0xB8FE },
-    { 0xB941, 0xB95A },
-    { 0xB961, 0xB97A },
-    { 0xB981, 0xB9FE },
-    { 0xBA41, 0xBA5A },
-    { 0xBA61, 0xBA7A },
-    { 0xBA81, 0xBAFE },
-    { 0xBB41, 0xBB5A },
-    { 0xBB61, 0xBB7A },
-    { 0xBB81, 0xBBFE },
-    { 0xBC41, 0xBC5A },
-    { 0xBC61, 0xBC7A },
-    { 0xBC81, 0xBCFE },
-    { 0xBD41, 0xBD5A },
-    { 0xBD61, 0xBD7A },
-    { 0xBD81, 0xBDFE },
-    { 0xBE41, 0xBE5A },
-    { 0xBE61, 0xBE7A },
-    { 0xBE81, 0xBEFE },
-    { 0xBF41, 0xBF5A },
-    { 0xBF61, 0xBF7A },
-    { 0xBF81, 0xBFFE },
-    { 0xC041, 0xC05A },
-    { 0xC061, 0xC07A },
-    { 0xC081, 0xC0FE },
-    { 0xC141, 0xC15A },
-    { 0xC161, 0xC17A },
-    { 0xC181, 0xC1FE },
-    { 0xC241, 0xC25A },
-    { 0xC261, 0xC27A },
-    { 0xC281, 0xC2FE },
-    { 0xC341, 0xC35A },
-    { 0xC361, 0xC37A },
-    { 0xC381, 0xC3FE },
-    { 0xC441, 0xC45A },
-    { 0xC461, 0xC47A },
-    { 0xC481, 0xC4FE },
-    { 0xC541, 0xC55A },
-    { 0xC561, 0xC57A },
-    { 0xC581, 0xC5FE },
-    { 0xC641, 0xC652 },
-    { 0xC6A1, 0xC6FE },
-    { 0xC7A1, 0xC7FE },
-    { 0xC8A1, 0xC8FE },
-    { 0xCAA1, 0xCAFE },
-    { 0xCBA1, 0xCBFE },
-    { 0xCCA1, 0xCCFE },
-    { 0xCDA1, 0xCDFE },
-    { 0xCEA1, 0xCEFE },
-    { 0xCFA1, 0xCFFE },
-    { 0xD0A1, 0xD0FE },
-    { 0xD1A1, 0xD1FE },
-    { 0xD2A1, 0xD2FE },
-    { 0xD3A1, 0xD3FE },
-    { 0xD4A1, 0xD4FE },
-    { 0xD5A1, 0xD5FE },
-    { 0xD6A1, 0xD6FE },
-    { 0xD7A1, 0xD7FE },
-    { 0xD8A1, 0xD8FE },
-    { 0xD9A1, 0xD9FE },
-    { 0xDAA1, 0xDAFE },
-    { 0xDBA1, 0xDBFE },
-    { 0xDCA1, 0xDCFE },
-    { 0xDDA1, 0xDDFE },
-    { 0xDEA1, 0xDEFE },
-    { 0xDFA1, 0xDFFE },
-    { 0xE0A1, 0xE0FE },
-    { 0xE1A1, 0xE1FE },
-    { 0xE2A1, 0xE2FE },
-    { 0xE3A1, 0xE3FE },
-    { 0xE4A1, 0xE4FE },
-    { 0xE5A1, 0xE5FE },
-    { 0xE6A1, 0xE6FE },
-    { 0xE7A1, 0xE7FE },
-    { 0xE8A1, 0xE8FE },
-    { 0xE9A1, 0xE9FE },
-    { 0xEAA1, 0xEAFE },
-    { 0xEBA1, 0xEBFE },
-    { 0xECA1, 0xECFE },
-    { 0xEDA1, 0xEDFE },
-    { 0xEEA1, 0xEEFE },
-    { 0xEFA1, 0xEFFE },
-    { 0xF0A1, 0xF0FE },
-    { 0xF1A1, 0xF1FE },
-    { 0xF2A1, 0xF2FE },
-    { 0xF3A1, 0xF3FE },
-    { 0xF4A1, 0xF4FE },
-    { 0xF5A1, 0xF5FE },
-    { 0xF6A1, 0xF6FE },
-    { 0xF7A1, 0xF7FE },
-    { 0xF8A1, 0xF8FE },
-    { 0xF9A1, 0xF9FE },
-    { 0xFAA1, 0xFAFE },
-    { 0xFBA1, 0xFBFE },
-    { 0xFCA1, 0xFCFE },
-    { 0xFDA1, 0xFDFE },
-};
-
-// generated from http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP950.TXT
-static const CharRange kBig5Ranges[] = {
-    { 0xA140, 0xA17E },
-    { 0xA1A1, 0xA1FE },
-    { 0xA240, 0xA27E },
-    { 0xA2A1, 0xA2FE },
-    { 0xA340, 0xA37E },
-    { 0xA3A1, 0xA3BF },
-    { 0xA3E1, 0xA3E1 },
-    { 0xA440, 0xA47E },
-    { 0xA4A1, 0xA4FE },
-    { 0xA540, 0xA57E },
-    { 0xA5A1, 0xA5FE },
-    { 0xA640, 0xA67E },
-    { 0xA6A1, 0xA6FE },
-    { 0xA740, 0xA77E },
-    { 0xA7A1, 0xA7FE },
-    { 0xA840, 0xA87E },
-    { 0xA8A1, 0xA8FE },
-    { 0xA940, 0xA97E },
-    { 0xA9A1, 0xA9FE },
-    { 0xAA40, 0xAA7E },
-    { 0xAAA1, 0xAAFE },
-    { 0xAB40, 0xAB7E },
-    { 0xABA1, 0xABFE },
-    { 0xAC40, 0xAC7E },
-    { 0xACA1, 0xACFE },
-    { 0xAD40, 0xAD7E },
-    { 0xADA1, 0xADFE },
-    { 0xAE40, 0xAE7E },
-    { 0xAEA1, 0xAEFE },
-    { 0xAF40, 0xAF7E },
-    { 0xAFA1, 0xAFFE },
-    { 0xB040, 0xB07E },
-    { 0xB0A1, 0xB0FE },
-    { 0xB140, 0xB17E },
-    { 0xB1A1, 0xB1FE },
-    { 0xB240, 0xB27E },
-    { 0xB2A1, 0xB2FE },
-    { 0xB340, 0xB37E },
-    { 0xB3A1, 0xB3FE },
-    { 0xB440, 0xB47E },
-    { 0xB4A1, 0xB4FE },
-    { 0xB540, 0xB57E },
-    { 0xB5A1, 0xB5FE },
-    { 0xB640, 0xB67E },
-    { 0xB6A1, 0xB6FE },
-    { 0xB740, 0xB77E },
-    { 0xB7A1, 0xB7FE },
-    { 0xB840, 0xB87E },
-    { 0xB8A1, 0xB8FE },
-    { 0xB940, 0xB97E },
-    { 0xB9A1, 0xB9FE },
-    { 0xBA40, 0xBA7E },
-    { 0xBAA1, 0xBAFE },
-    { 0xBB40, 0xBB7E },
-    { 0xBBA1, 0xBBFE },
-    { 0xBC40, 0xBC7E },
-    { 0xBCA1, 0xBCFE },
-    { 0xBD40, 0xBD7E },
-    { 0xBDA1, 0xBDFE },
-    { 0xBE40, 0xBE7E },
-    { 0xBEA1, 0xBEFE },
-    { 0xBF40, 0xBF7E },
-    { 0xBFA1, 0xBFFE },
-    { 0xC040, 0xC07E },
-    { 0xC0A1, 0xC0FE },
-    { 0xC140, 0xC17E },
-    { 0xC1A1, 0xC1FE },
-    { 0xC240, 0xC27E },
-    { 0xC2A1, 0xC2FE },
-    { 0xC340, 0xC37E },
-    { 0xC3A1, 0xC3FE },
-    { 0xC440, 0xC47E },
-    { 0xC4A1, 0xC4FE },
-    { 0xC540, 0xC57E },
-    { 0xC5A1, 0xC5FE },
-    { 0xC640, 0xC67E },
-    { 0xC940, 0xC97E },
-    { 0xC9A1, 0xC9FE },
-    { 0xCA40, 0xCA7E },
-    { 0xCAA1, 0xCAFE },
-    { 0xCB40, 0xCB7E },
-    { 0xCBA1, 0xCBFE },
-    { 0xCC40, 0xCC7E },
-    { 0xCCA1, 0xCCFE },
-    { 0xCD40, 0xCD7E },
-    { 0xCDA1, 0xCDFE },
-    { 0xCE40, 0xCE7E },
-    { 0xCEA1, 0xCEFE },
-    { 0xCF40, 0xCF7E },
-    { 0xCFA1, 0xCFFE },
-    { 0xD040, 0xD07E },
-    { 0xD0A1, 0xD0FE },
-    { 0xD140, 0xD17E },
-    { 0xD1A1, 0xD1FE },
-    { 0xD240, 0xD27E },
-    { 0xD2A1, 0xD2FE },
-    { 0xD340, 0xD37E },
-    { 0xD3A1, 0xD3FE },
-    { 0xD440, 0xD47E },
-    { 0xD4A1, 0xD4FE },
-    { 0xD540, 0xD57E },
-    { 0xD5A1, 0xD5FE },
-    { 0xD640, 0xD67E },
-    { 0xD6A1, 0xD6FE },
-    { 0xD740, 0xD77E },
-    { 0xD7A1, 0xD7FE },
-    { 0xD840, 0xD87E },
-    { 0xD8A1, 0xD8FE },
-    { 0xD940, 0xD97E },
-    { 0xD9A1, 0xD9FE },
-    { 0xDA40, 0xDA7E },
-    { 0xDAA1, 0xDAFE },
-    { 0xDB40, 0xDB7E },
-    { 0xDBA1, 0xDBFE },
-    { 0xDC40, 0xDC7E },
-    { 0xDCA1, 0xDCFE },
-    { 0xDD40, 0xDD7E },
-    { 0xDDA1, 0xDDFE },
-    { 0xDE40, 0xDE7E },
-    { 0xDEA1, 0xDEFE },
-    { 0xDF40, 0xDF7E },
-    { 0xDFA1, 0xDFFE },
-    { 0xE040, 0xE07E },
-    { 0xE0A1, 0xE0FE },
-    { 0xE140, 0xE17E },
-    { 0xE1A1, 0xE1FE },
-    { 0xE240, 0xE27E },
-    { 0xE2A1, 0xE2FE },
-    { 0xE340, 0xE37E },
-    { 0xE3A1, 0xE3FE },
-    { 0xE440, 0xE47E },
-    { 0xE4A1, 0xE4FE },
-    { 0xE540, 0xE57E },
-    { 0xE5A1, 0xE5FE },
-    { 0xE640, 0xE67E },
-    { 0xE6A1, 0xE6FE },
-    { 0xE740, 0xE77E },
-    { 0xE7A1, 0xE7FE },
-    { 0xE840, 0xE87E },
-    { 0xE8A1, 0xE8FE },
-    { 0xE940, 0xE97E },
-    { 0xE9A1, 0xE9FE },
-    { 0xEA40, 0xEA7E },
-    { 0xEAA1, 0xEAFE },
-    { 0xEB40, 0xEB7E },
-    { 0xEBA1, 0xEBFE },
-    { 0xEC40, 0xEC7E },
-    { 0xECA1, 0xECFE },
-    { 0xED40, 0xED7E },
-    { 0xEDA1, 0xEDFE },
-    { 0xEE40, 0xEE7E },
-    { 0xEEA1, 0xEEFE },
-    { 0xEF40, 0xEF7E },
-    { 0xEFA1, 0xEFFE },
-    { 0xF040, 0xF07E },
-    { 0xF0A1, 0xF0FE },
-    { 0xF140, 0xF17E },
-    { 0xF1A1, 0xF1FE },
-    { 0xF240, 0xF27E },
-    { 0xF2A1, 0xF2FE },
-    { 0xF340, 0xF37E },
-    { 0xF3A1, 0xF3FE },
-    { 0xF440, 0xF47E },
-    { 0xF4A1, 0xF4FE },
-    { 0xF540, 0xF57E },
-    { 0xF5A1, 0xF5FE },
-    { 0xF640, 0xF67E },
-    { 0xF6A1, 0xF6FE },
-    { 0xF740, 0xF77E },
-    { 0xF7A1, 0xF7FE },
-    { 0xF840, 0xF87E },
-    { 0xF8A1, 0xF8FE },
-    { 0xF940, 0xF97E },
-    { 0xF9A1, 0xF9FE },
-};
-
-static bool charMatchesEncoding(int ch, const CharRange* encodingRanges, int rangeCount) {
-    // Use binary search to see if the character is contained in the encoding
-    int low = 0;
-    int high = rangeCount;
-
-    while (low < high) {
-        int i = (low + high) / 2;
-        const CharRange* range = &encodingRanges[i];
-        if (ch >= range->first && ch <= range->last)
-            return true;
-        if (ch > range->last)
-            low = i + 1;
-        else
-            high = i;
-    }
-
-    return false;
-}
-
-extern uint32_t findPossibleEncodings(int ch)
-{
-    // ASCII matches everything
-    if (ch < 256) return kEncodingAll;
-
-    int result = kEncodingNone;
-
-    if (charMatchesEncoding(ch, kShiftJISRanges, ARRAY_SIZE(kShiftJISRanges)))
-        result |= kEncodingShiftJIS;
-    if (charMatchesEncoding(ch, kGBKRanges, ARRAY_SIZE(kGBKRanges)))
-        result |= kEncodingGBK;
-    if (charMatchesEncoding(ch, kBig5Ranges, ARRAY_SIZE(kBig5Ranges)))
-        result |= kEncodingBig5;
-    if (charMatchesEncoding(ch, kEUCKRRanges, ARRAY_SIZE(kEUCKRRanges)))
-        result |= kEncodingEUCKR;
-
-    return result;
-}
diff --git a/media/libmedia/autodetect.h b/media/libmedia/autodetect.h
deleted file mode 100644
index 9675db3..0000000
--- a/media/libmedia/autodetect.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AUTODETECT_H
-#define AUTODETECT_H
-
-#include <inttypes.h>
-
-// flags used for native encoding detection
-enum {
-    kEncodingNone               = 0,
-    kEncodingShiftJIS           = (1 << 0),
-    kEncodingGBK                = (1 << 1),
-    kEncodingBig5               = (1 << 2),
-    kEncodingEUCKR              = (1 << 3),
-
-    kEncodingAll                = (kEncodingShiftJIS | kEncodingGBK | kEncodingBig5 | kEncodingEUCKR),
-};
-
-
-// returns a bitfield containing the possible native encodings for the given character
-extern uint32_t findPossibleEncodings(int ch);
-
-#endif // AUTODETECT_H
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index 110b94c..1d6bb6f 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -21,6 +21,7 @@
 #include <binder/IServiceManager.h>
 #include <binder/IPCThreadState.h>
 #include <media/mediametadataretriever.h>
+#include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayerService.h>
 #include <utils/Log.h>
 #include <dlfcn.h>
@@ -93,7 +94,9 @@
 }
 
 status_t MediaMetadataRetriever::setDataSource(
-        const char *srcUrl, const KeyedVector<String8, String8> *headers)
+        const sp<IMediaHTTPService> &httpService,
+        const char *srcUrl,
+        const KeyedVector<String8, String8> *headers)
 {
     ALOGV("setDataSource");
     Mutex::Autolock _l(mLock);
@@ -106,7 +109,7 @@
         return UNKNOWN_ERROR;
     }
     ALOGV("data source (%s)", srcUrl);
-    return mRetriever->setDataSource(srcUrl, headers);
+    return mRetriever->setDataSource(httpService, srcUrl, headers);
 }
 
 status_t MediaMetadataRetriever::setDataSource(int fd, int64_t offset, int64_t length)
@@ -157,7 +160,7 @@
     return mRetriever->extractAlbumArt();
 }
 
-void MediaMetadataRetriever::DeathNotifier::binderDied(const wp<IBinder>& who) {
+void MediaMetadataRetriever::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
     Mutex::Autolock lock(MediaMetadataRetriever::sServiceLock);
     MediaMetadataRetriever::sService.clear();
     ALOGW("MediaMetadataRetriever server died!");
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 7a6f31d..0be01a9 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -58,7 +58,7 @@
     mVideoWidth = mVideoHeight = 0;
     mLockThreadId = 0;
     mAudioSessionId = AudioSystem::newAudioSessionId();
-    AudioSystem::acquireAudioSessionId(mAudioSessionId);
+    AudioSystem::acquireAudioSessionId(mAudioSessionId, -1);
     mSendLevel = 0;
     mRetransmitEndpointValid = false;
 }
@@ -66,7 +66,7 @@
 MediaPlayer::~MediaPlayer()
 {
     ALOGV("destructor");
-    AudioSystem::releaseAudioSessionId(mAudioSessionId);
+    AudioSystem::releaseAudioSessionId(mAudioSessionId, -1);
     disconnect();
     IPCThreadState::self()->flushCommands();
 }
@@ -136,6 +136,7 @@
 }
 
 status_t MediaPlayer::setDataSource(
+        const sp<IMediaHTTPService> &httpService,
         const char *url, const KeyedVector<String8, String8> *headers)
 {
     ALOGV("setDataSource(%s)", url);
@@ -145,7 +146,7 @@
         if (service != 0) {
             sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
             if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
-                (NO_ERROR != player->setDataSource(url, headers))) {
+                (NO_ERROR != player->setDataSource(httpService, url, headers))) {
                 player.clear();
             }
             err = attachNewPlayer(player);
@@ -530,6 +531,14 @@
     return OK;
 }
 
+status_t MediaPlayer::getAudioStreamType(audio_stream_type_t *type)
+{
+    ALOGV("getAudioStreamType");
+    Mutex::Autolock _l(mLock);
+    *type = mStreamType;
+    return OK;
+}
+
 status_t MediaPlayer::setLooping(int loop)
 {
     ALOGV("MediaPlayer::setLooping");
@@ -575,8 +584,8 @@
         return BAD_VALUE;
     }
     if (sessionId != mAudioSessionId) {
-        AudioSystem::acquireAudioSessionId(sessionId);
-        AudioSystem::releaseAudioSessionId(mAudioSessionId);
+        AudioSystem::acquireAudioSessionId(sessionId, -1);
+        AudioSystem::releaseAudioSessionId(mAudioSessionId, -1);
         mAudioSessionId = sessionId;
     }
     return NO_ERROR;
@@ -776,15 +785,20 @@
     }
 }
 
-/*static*/ status_t MediaPlayer::decode(const char* url, uint32_t *pSampleRate,
-                                           int* pNumChannels, audio_format_t* pFormat,
-                                           const sp<IMemoryHeap>& heap, size_t *pSize)
+/*static*/ status_t MediaPlayer::decode(
+        const sp<IMediaHTTPService> &httpService,
+        const char* url,
+        uint32_t *pSampleRate,
+        int* pNumChannels,
+        audio_format_t* pFormat,
+        const sp<IMemoryHeap>& heap,
+        size_t *pSize)
 {
     ALOGV("decode(%s)", url);
     status_t status;
     const sp<IMediaPlayerService>& service = getMediaPlayerService();
     if (service != 0) {
-        status = service->decode(url, pSampleRate, pNumChannels, pFormat, heap, pSize);
+        status = service->decode(httpService, url, pSampleRate, pNumChannels, pFormat, heap, pSize);
     } else {
         ALOGE("Unable to locate media service");
         status = DEAD_OBJECT;
@@ -832,15 +846,4 @@
     return mPlayer->setNextPlayer(next == NULL ? NULL : next->mPlayer);
 }
 
-status_t MediaPlayer::updateProxyConfig(
-        const char *host, int32_t port, const char *exclusionList) {
-    const sp<IMediaPlayerService>& service = getMediaPlayerService();
-
-    if (service != NULL) {
-        return service->updateProxyConfig(host, port, exclusionList);
-    }
-
-    return INVALID_OPERATION;
-}
-
 }; // namespace android
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 85c9464..caf2dfc 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -45,7 +45,6 @@
     libstagefright_rtsp         \
 
 LOCAL_C_INCLUDES :=                                                 \
-    $(call include-path-for, graphics corecg)                       \
     $(TOP)/frameworks/av/media/libstagefright/include               \
     $(TOP)/frameworks/av/media/libstagefright/rtsp                  \
     $(TOP)/frameworks/av/media/libstagefright/wifi-display          \
diff --git a/media/libmediaplayerservice/HDCP.cpp b/media/libmediaplayerservice/HDCP.cpp
index c2ac1a3..afe3936 100644
--- a/media/libmediaplayerservice/HDCP.cpp
+++ b/media/libmediaplayerservice/HDCP.cpp
@@ -107,11 +107,7 @@
         return NO_INIT;
     }
 
-    // TO-DO:
-    // Only support HDCP_CAPS_ENCRYPT (byte-array to byte-array) for now.
-    // use mHDCPModule->getCaps() when the HDCP libraries get updated.
-    //return mHDCPModule->getCaps();
-    return HDCPModule::HDCP_CAPS_ENCRYPT;
+    return mHDCPModule->getCaps();
 }
 
 status_t HDCP::encrypt(
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index 90aed39..74e5013 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -67,6 +67,12 @@
         return NU_PLAYER;
     }
 
+    // TODO: remove this EXPERIMENTAL developer settings property
+    if (property_get("persist.sys.media.use-nuplayer", value, NULL)
+            && !strcasecmp("true", value)) {
+        return NU_PLAYER;
+    }
+
     return STAGEFRIGHT_PLAYER;
 }
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index a392b76..778eb9a 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -44,6 +44,7 @@
 #include <utils/SystemClock.h>
 #include <utils/Vector.h>
 
+#include <media/IMediaHTTPService.h>
 #include <media/IRemoteDisplay.h>
 #include <media/IRemoteDisplayClient.h>
 #include <media/MediaPlayerInterface.h>
@@ -306,11 +307,6 @@
     return new RemoteDisplay(client, iface.string());
 }
 
-status_t MediaPlayerService::updateProxyConfig(
-        const char *host, int32_t port, const char *exclusionList) {
-    return HTTPBase::UpdateProxyConfig(host, port, exclusionList);
-}
-
 status_t MediaPlayerService::AudioCache::dump(int fd, const Vector<String16>& args) const
 {
     const size_t SIZE = 256;
@@ -590,7 +586,8 @@
     }
 
     if (!p->hardwareOutput()) {
-        mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid());
+        mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(),
+                mPid);
         static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
     }
 
@@ -622,7 +619,9 @@
 }
 
 status_t MediaPlayerService::Client::setDataSource(
-        const char *url, const KeyedVector<String8, String8> *headers)
+        const sp<IMediaHTTPService> &httpService,
+        const char *url,
+        const KeyedVector<String8, String8> *headers)
 {
     ALOGV("setDataSource(%s)", url);
     if (url == NULL)
@@ -657,7 +656,7 @@
             return NO_INIT;
         }
 
-        setDataSource_post(p, p->setDataSource(url, headers));
+        setDataSource_post(p, p->setDataSource(httpService, url, headers));
         return mStatus;
     }
 }
@@ -1176,9 +1175,14 @@
 }
 #endif
 
-status_t MediaPlayerService::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
-                                       audio_format_t* pFormat,
-                                       const sp<IMemoryHeap>& heap, size_t *pSize)
+status_t MediaPlayerService::decode(
+        const sp<IMediaHTTPService> &httpService,
+        const char* url,
+        uint32_t *pSampleRate,
+        int* pNumChannels,
+        audio_format_t* pFormat,
+        const sp<IMemoryHeap>& heap,
+        size_t *pSize)
 {
     ALOGV("decode(%s)", url);
     sp<MediaPlayerBase> player;
@@ -1206,7 +1210,7 @@
     static_cast<MediaPlayerInterface*>(player.get())->setAudioSink(cache);
 
     // set data source
-    if (player->setDataSource(url) != NO_ERROR) goto Exit;
+    if (player->setDataSource(httpService, url) != NO_ERROR) goto Exit;
 
     ALOGV("prepare");
     player->prepareAsync();
@@ -1296,13 +1300,14 @@
 
 #undef LOG_TAG
 #define LOG_TAG "AudioSink"
-MediaPlayerService::AudioOutput::AudioOutput(int sessionId, int uid)
+MediaPlayerService::AudioOutput::AudioOutput(int sessionId, int uid, int pid)
     : mCallback(NULL),
       mCallbackCookie(NULL),
       mCallbackData(NULL),
       mBytesWritten(0),
       mSessionId(sessionId),
       mUid(uid),
+      mPid(pid),
       mFlags(AUDIO_OUTPUT_FLAG_NONE) {
     ALOGV("AudioOutput(%d)", sessionId);
     mStreamType = AUDIO_STREAM_MUSIC;
@@ -1450,7 +1455,7 @@
                 format, bufferCount, mSessionId, flags);
     uint32_t afSampleRate;
     size_t afFrameCount;
-    uint32_t frameCount;
+    size_t frameCount;
 
     // offloading is only supported in callback mode for now.
     // offloadInfo must be present if offload flag is set
@@ -1551,7 +1556,8 @@
                     mSessionId,
                     AudioTrack::TRANSFER_CALLBACK,
                     offloadInfo,
-                    mUid);
+                    mUid,
+                    mPid);
         } else {
             t = new AudioTrack(
                     mStreamType,
@@ -1566,7 +1572,8 @@
                     mSessionId,
                     AudioTrack::TRANSFER_DEFAULT,
                     NULL, // offload info
-                    mUid);
+                    mUid,
+                    mPid);
         }
 
         if ((t == 0) || (t->initCheck() != NO_ERROR)) {
@@ -1672,7 +1679,7 @@
 
 ssize_t MediaPlayerService::AudioOutput::write(const void* buffer, size_t size)
 {
-    LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
+    LOG_ALWAYS_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
 
     //ALOGV("write(%p, %u)", buffer, size);
     if (mTrack != 0) {
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 9c084e1..448f27a 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -72,7 +72,7 @@
         class CallbackData;
 
      public:
-                                AudioOutput(int sessionId, int uid);
+                                AudioOutput(int sessionId, int uid, int pid);
         virtual                 ~AudioOutput();
 
         virtual bool            ready() const { return mTrack != 0; }
@@ -140,6 +140,7 @@
         float                   mMsecsPerFrame;
         int                     mSessionId;
         int                     mUid;
+        int                     mPid;
         float                   mSendLevel;
         int                     mAuxEffectId;
         static bool             mIsOnEmulator;
@@ -211,12 +212,12 @@
         virtual void            flush() {}
         virtual void            pause() {}
         virtual void            close() {}
-                void            setAudioStreamType(audio_stream_type_t streamType) {}
+                void            setAudioStreamType(audio_stream_type_t streamType __unused) {}
                 // stream type is not used for AudioCache
         virtual audio_stream_type_t getAudioStreamType() const { return AUDIO_STREAM_DEFAULT; }
 
-                void            setVolume(float left, float right) {}
-        virtual status_t        setPlaybackRatePermille(int32_t ratePermille) { return INVALID_OPERATION; }
+                void            setVolume(float left __unused, float right __unused) {}
+        virtual status_t        setPlaybackRatePermille(int32_t ratePermille __unused) { return INVALID_OPERATION; }
                 uint32_t        sampleRate() const { return mSampleRate; }
                 audio_format_t  format() const { return mFormat; }
                 size_t          size() const { return mSize; }
@@ -256,9 +257,15 @@
 
     virtual sp<IMediaPlayer>    create(const sp<IMediaPlayerClient>& client, int audioSessionId);
 
-    virtual status_t            decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
-                                       audio_format_t* pFormat,
-                                       const sp<IMemoryHeap>& heap, size_t *pSize);
+    virtual status_t            decode(
+            const sp<IMediaHTTPService> &httpService,
+            const char* url,
+            uint32_t *pSampleRate,
+            int* pNumChannels,
+            audio_format_t* pFormat,
+            const sp<IMemoryHeap>& heap,
+            size_t *pSize);
+
     virtual status_t            decode(int fd, int64_t offset, int64_t length,
                                        uint32_t *pSampleRate, int* pNumChannels,
                                        audio_format_t* pFormat,
@@ -272,9 +279,6 @@
             const String8& iface);
     virtual status_t            dump(int fd, const Vector<String16>& args);
 
-    virtual status_t        updateProxyConfig(
-            const char *host, int32_t port, const char *exclusionList);
-
             void                removeClient(wp<Client> client);
 
     // For battery usage tracking purpose
@@ -356,6 +360,7 @@
         sp<MediaPlayerBase>     createPlayer(player_type playerType);
 
         virtual status_t        setDataSource(
+                        const sp<IMediaHTTPService> &httpService,
                         const char *url,
                         const KeyedVector<String8, String8> *headers);
 
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 348957f..c61cf89 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -31,6 +31,7 @@
 #include <binder/MemoryHeapBase.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
+#include <media/IMediaHTTPService.h>
 #include <media/MediaMetadataRetrieverInterface.h>
 #include <media/MediaPlayerInterface.h>
 #include <private/media/VideoFrame.h>
@@ -106,7 +107,9 @@
 }
 
 status_t MetadataRetrieverClient::setDataSource(
-        const char *url, const KeyedVector<String8, String8> *headers)
+        const sp<IMediaHTTPService> &httpService,
+        const char *url,
+        const KeyedVector<String8, String8> *headers)
 {
     ALOGV("setDataSource(%s)", url);
     Mutex::Autolock lock(mLock);
@@ -127,7 +130,7 @@
     ALOGV("player type = %d", playerType);
     sp<MediaMetadataRetrieverBase> p = createRetriever(playerType);
     if (p == NULL) return NO_INIT;
-    status_t ret = p->setDataSource(url, headers);
+    status_t ret = p->setDataSource(httpService, url, headers);
     if (ret == NO_ERROR) mRetriever = p;
     return ret;
 }
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index f08f933..9d3fbe9 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -30,6 +30,7 @@
 
 namespace android {
 
+struct IMediaHTTPService;
 class IMediaPlayerService;
 class MemoryDealer;
 
@@ -43,7 +44,9 @@
     virtual void                    disconnect();
 
     virtual status_t                setDataSource(
-            const char *url, const KeyedVector<String8, String8> *headers);
+            const sp<IMediaHTTPService> &httpService,
+            const char *url,
+            const KeyedVector<String8, String8> *headers);
 
     virtual status_t                setDataSource(int fd, int64_t offset, int64_t length);
     virtual sp<IMemory>             getFrameAtTime(int64_t timeUs, int option);
diff --git a/media/libmediaplayerservice/MidiFile.cpp b/media/libmediaplayerservice/MidiFile.cpp
index 0a6aa90..deeddd1 100644
--- a/media/libmediaplayerservice/MidiFile.cpp
+++ b/media/libmediaplayerservice/MidiFile.cpp
@@ -114,7 +114,9 @@
 }
 
 status_t MidiFile::setDataSource(
-        const char* path, const KeyedVector<String8, String8> *) {
+        const sp<IMediaHTTPService> &httpService,
+        const char* path,
+        const KeyedVector<String8, String8> *) {
     ALOGV("MidiFile::setDataSource url=%s", path);
     Mutex::Autolock lock(mMutex);
 
diff --git a/media/libmediaplayerservice/MidiFile.h b/media/libmediaplayerservice/MidiFile.h
index 24d59b4..12802ba 100644
--- a/media/libmediaplayerservice/MidiFile.h
+++ b/media/libmediaplayerservice/MidiFile.h
@@ -32,7 +32,9 @@
     virtual status_t    initCheck();
 
     virtual status_t    setDataSource(
-            const char* path, const KeyedVector<String8, String8> *headers);
+            const sp<IMediaHTTPService> &httpService,
+            const char* path,
+            const KeyedVector<String8, String8> *headers);
 
     virtual status_t    setDataSource(int fd, int64_t offset, int64_t length);
     virtual status_t    setVideoSurfaceTexture(
diff --git a/media/libmediaplayerservice/MidiMetadataRetriever.cpp b/media/libmediaplayerservice/MidiMetadataRetriever.cpp
index 465209f..f3cf6ef 100644
--- a/media/libmediaplayerservice/MidiMetadataRetriever.cpp
+++ b/media/libmediaplayerservice/MidiMetadataRetriever.cpp
@@ -22,6 +22,8 @@
 #include "MidiMetadataRetriever.h"
 #include <media/mediametadataretriever.h>
 
+#include <media/IMediaHTTPService.h>
+
 namespace android {
 
 static status_t ERROR_NOT_OPEN = -1;
@@ -36,7 +38,9 @@
 }
 
 status_t MidiMetadataRetriever::setDataSource(
-        const char *url, const KeyedVector<String8, String8> *headers)
+        const sp<IMediaHTTPService> &httpService,
+        const char *url,
+        const KeyedVector<String8, String8> *headers)
 {
     ALOGV("setDataSource: %s", url? url: "NULL pointer");
     Mutex::Autolock lock(mLock);
@@ -44,7 +48,7 @@
     if (mMidiPlayer == 0) {
         mMidiPlayer = new MidiFile();
     }
-    return mMidiPlayer->setDataSource(url, headers);
+    return mMidiPlayer->setDataSource(httpService, url, headers);
 }
 
 status_t MidiMetadataRetriever::setDataSource(int fd, int64_t offset, int64_t length)
diff --git a/media/libmediaplayerservice/MidiMetadataRetriever.h b/media/libmediaplayerservice/MidiMetadataRetriever.h
index 4cee42d..b8214ee 100644
--- a/media/libmediaplayerservice/MidiMetadataRetriever.h
+++ b/media/libmediaplayerservice/MidiMetadataRetriever.h
@@ -32,7 +32,9 @@
                                    ~MidiMetadataRetriever() {}
 
     virtual status_t                setDataSource(
-            const char *url, const KeyedVector<String8, String8> *headers);
+            const sp<IMediaHTTPService> &httpService,
+            const char *url,
+            const KeyedVector<String8, String8> *headers);
 
     virtual status_t                setDataSource(int fd, int64_t offset, int64_t length);
     virtual const char*             extractMetadata(int keyCode);
diff --git a/media/libmediaplayerservice/StagefrightPlayer.cpp b/media/libmediaplayerservice/StagefrightPlayer.cpp
index 42b7766..b37aee3 100644
--- a/media/libmediaplayerservice/StagefrightPlayer.cpp
+++ b/media/libmediaplayerservice/StagefrightPlayer.cpp
@@ -54,8 +54,10 @@
 }
 
 status_t StagefrightPlayer::setDataSource(
-        const char *url, const KeyedVector<String8, String8> *headers) {
-    return mPlayer->setDataSource(url, headers);
+        const sp<IMediaHTTPService> &httpService,
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
+    return mPlayer->setDataSource(httpService, url, headers);
 }
 
 // Warning: The filedescriptor passed into this method will only be valid until
diff --git a/media/libmediaplayerservice/StagefrightPlayer.h b/media/libmediaplayerservice/StagefrightPlayer.h
index 600945e..e6c30ff 100644
--- a/media/libmediaplayerservice/StagefrightPlayer.h
+++ b/media/libmediaplayerservice/StagefrightPlayer.h
@@ -34,7 +34,9 @@
     virtual status_t setUID(uid_t uid);
 
     virtual status_t setDataSource(
-            const char *url, const KeyedVector<String8, String8> *headers);
+            const sp<IMediaHTTPService> &httpService,
+            const char *url,
+            const KeyedVector<String8, String8> *headers);
 
     virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 4da74e1..5b7a236 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -25,8 +25,10 @@
 #include <binder/IServiceManager.h>
 
 #include <media/IMediaPlayerService.h>
-#include <media/openmax/OMX_Audio.h>
+#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/AudioSource.h>
 #include <media/stagefright/AMRWriter.h>
 #include <media/stagefright/AACWriter.h>
@@ -36,13 +38,12 @@
 #include <media/stagefright/MPEG4Writer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaCodecSource.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
-#include <media/stagefright/SurfaceMediaSource.h>
 #include <media/MediaProfiles.h>
 #include <camera/ICamera.h>
 #include <camera/CameraParameters.h>
-#include <gui/Surface.h>
 
 #include <utils/Errors.h>
 #include <sys/types.h>
@@ -72,8 +73,7 @@
       mAudioSource(AUDIO_SOURCE_CNT),
       mVideoSource(VIDEO_SOURCE_LIST_END),
       mCaptureTimeLapse(false),
-      mStarted(false),
-      mSurfaceMediaSource(NULL) {
+      mStarted(false) {
 
     ALOGV("Constructor");
     reset();
@@ -82,10 +82,19 @@
 StagefrightRecorder::~StagefrightRecorder() {
     ALOGV("Destructor");
     stop();
+
+    if (mLooper != NULL) {
+        mLooper->stop();
+    }
 }
 
 status_t StagefrightRecorder::init() {
     ALOGV("init");
+
+    mLooper = new ALooper;
+    mLooper->setName("recorder_looper");
+    mLooper->start();
+
     return OK;
 }
 
@@ -94,7 +103,7 @@
 // while encoding GL Frames
 sp<IGraphicBufferProducer> StagefrightRecorder::querySurfaceMediaSource() const {
     ALOGV("Get SurfaceMediaSource");
-    return mSurfaceMediaSource->getBufferQueue();
+    return mGraphicBufferProducer;
 }
 
 status_t StagefrightRecorder::setAudioSource(audio_source_t as) {
@@ -234,7 +243,7 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setOutputFile(const char *path) {
+status_t StagefrightRecorder::setOutputFile(const char * /* path */) {
     ALOGE("setOutputFile(const char*) must not be called");
     // We don't actually support this at all, as the media_server process
     // no longer has permissions to create files.
@@ -681,10 +690,10 @@
             return setParamTimeLapseEnable(timeLapseEnable);
         }
     } else if (key == "time-between-time-lapse-frame-capture") {
-        int64_t timeBetweenTimeLapseFrameCaptureMs;
-        if (safe_strtoi64(value.string(), &timeBetweenTimeLapseFrameCaptureMs)) {
+        int64_t timeBetweenTimeLapseFrameCaptureUs;
+        if (safe_strtoi64(value.string(), &timeBetweenTimeLapseFrameCaptureUs)) {
             return setParamTimeBetweenTimeLapseFrameCapture(
-                    1000LL * timeBetweenTimeLapseFrameCaptureMs);
+                    timeBetweenTimeLapseFrameCaptureUs);
         }
     } else {
         ALOGE("setParameter: failed to find key %s", key.string());
@@ -739,19 +748,15 @@
     return OK;
 }
 
-status_t StagefrightRecorder::prepare() {
-    return OK;
-}
-
-status_t StagefrightRecorder::start() {
-    CHECK_GE(mOutputFd, 0);
+status_t StagefrightRecorder::prepareInternal() {
+    ALOGV("prepare");
+    if (mOutputFd < 0) {
+        ALOGE("Output file descriptor is invalid");
+        return INVALID_OPERATION;
+    }
 
     // Get UID here for permission checking
     mClientUid = IPCThreadState::self()->getCallingUid();
-    if (mWriter != NULL) {
-        ALOGE("File writer is not avaialble");
-        return UNKNOWN_ERROR;
-    }
 
     status_t status = OK;
 
@@ -759,25 +764,25 @@
         case OUTPUT_FORMAT_DEFAULT:
         case OUTPUT_FORMAT_THREE_GPP:
         case OUTPUT_FORMAT_MPEG_4:
-            status = startMPEG4Recording();
+            status = setupMPEG4Recording();
             break;
 
         case OUTPUT_FORMAT_AMR_NB:
         case OUTPUT_FORMAT_AMR_WB:
-            status = startAMRRecording();
+            status = setupAMRRecording();
             break;
 
         case OUTPUT_FORMAT_AAC_ADIF:
         case OUTPUT_FORMAT_AAC_ADTS:
-            status = startAACRecording();
+            status = setupAACRecording();
             break;
 
         case OUTPUT_FORMAT_RTP_AVP:
-            status = startRTPRecording();
+            status = setupRTPRecording();
             break;
 
         case OUTPUT_FORMAT_MPEG2TS:
-            status = startMPEG2TSRecording();
+            status = setupMPEG2TSRecording();
             break;
 
         default:
@@ -786,6 +791,72 @@
             break;
     }
 
+    return status;
+}
+
+status_t StagefrightRecorder::prepare() {
+    if (mVideoSource == VIDEO_SOURCE_SURFACE) {
+        return prepareInternal();
+    }
+    return OK;
+}
+
+status_t StagefrightRecorder::start() {
+    ALOGV("start");
+    if (mOutputFd < 0) {
+        ALOGE("Output file descriptor is invalid");
+        return INVALID_OPERATION;
+    }
+
+    status_t status = OK;
+
+    if (mVideoSource != VIDEO_SOURCE_SURFACE) {
+        status = prepareInternal();
+        if (status != OK) {
+            return status;
+        }
+    }
+
+    if (mWriter == NULL) {
+        ALOGE("File writer is not avaialble");
+        return UNKNOWN_ERROR;
+    }
+
+    switch (mOutputFormat) {
+        case OUTPUT_FORMAT_DEFAULT:
+        case OUTPUT_FORMAT_THREE_GPP:
+        case OUTPUT_FORMAT_MPEG_4:
+        {
+            sp<MetaData> meta = new MetaData;
+            setupMPEG4MetaData(&meta);
+            status = mWriter->start(meta.get());
+            break;
+        }
+
+        case OUTPUT_FORMAT_AMR_NB:
+        case OUTPUT_FORMAT_AMR_WB:
+        case OUTPUT_FORMAT_AAC_ADIF:
+        case OUTPUT_FORMAT_AAC_ADTS:
+        case OUTPUT_FORMAT_RTP_AVP:
+        case OUTPUT_FORMAT_MPEG2TS:
+        {
+            status = mWriter->start();
+            break;
+        }
+
+        default:
+        {
+            ALOGE("Unsupported output file format: %d", mOutputFormat);
+            status = UNKNOWN_ERROR;
+            break;
+        }
+    }
+
+    if (status != OK) {
+        mWriter.clear();
+        mWriter = NULL;
+    }
+
     if ((status == OK) && (!mStarted)) {
         mStarted = true;
 
@@ -817,58 +888,54 @@
         return NULL;
     }
 
-    sp<MetaData> encMeta = new MetaData;
+    sp<AMessage> format = new AMessage;
     const char *mime;
     switch (mAudioEncoder) {
         case AUDIO_ENCODER_AMR_NB:
         case AUDIO_ENCODER_DEFAULT:
-            mime = MEDIA_MIMETYPE_AUDIO_AMR_NB;
+            format->setString("mime", MEDIA_MIMETYPE_AUDIO_AMR_NB);
             break;
         case AUDIO_ENCODER_AMR_WB:
-            mime = MEDIA_MIMETYPE_AUDIO_AMR_WB;
+            format->setString("mime", MEDIA_MIMETYPE_AUDIO_AMR_WB);
             break;
         case AUDIO_ENCODER_AAC:
-            mime = MEDIA_MIMETYPE_AUDIO_AAC;
-            encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectLC);
+            format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
+            format->setInt32("aac-profile", OMX_AUDIO_AACObjectLC);
             break;
         case AUDIO_ENCODER_HE_AAC:
-            mime = MEDIA_MIMETYPE_AUDIO_AAC;
-            encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectHE);
+            format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
+            format->setInt32("aac-profile", OMX_AUDIO_AACObjectHE);
             break;
         case AUDIO_ENCODER_AAC_ELD:
-            mime = MEDIA_MIMETYPE_AUDIO_AAC;
-            encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectELD);
+            format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
+            format->setInt32("aac-profile", OMX_AUDIO_AACObjectELD);
             break;
 
         default:
             ALOGE("Unknown audio encoder: %d", mAudioEncoder);
             return NULL;
     }
-    encMeta->setCString(kKeyMIMEType, mime);
 
     int32_t maxInputSize;
     CHECK(audioSource->getFormat()->findInt32(
                 kKeyMaxInputSize, &maxInputSize));
 
-    encMeta->setInt32(kKeyMaxInputSize, maxInputSize);
-    encMeta->setInt32(kKeyChannelCount, mAudioChannels);
-    encMeta->setInt32(kKeySampleRate, mSampleRate);
-    encMeta->setInt32(kKeyBitRate, mAudioBitRate);
+    format->setInt32("max-input-size", maxInputSize);
+    format->setInt32("channel-count", mAudioChannels);
+    format->setInt32("sample-rate", mSampleRate);
+    format->setInt32("bitrate", mAudioBitRate);
     if (mAudioTimeScale > 0) {
-        encMeta->setInt32(kKeyTimeScale, mAudioTimeScale);
+        format->setInt32("time-scale", mAudioTimeScale);
     }
 
-    OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
     sp<MediaSource> audioEncoder =
-        OMXCodec::Create(client.interface(), encMeta,
-                         true /* createEncoder */, audioSource);
+            MediaCodecSource::Create(mLooper, format, audioSource);
     mAudioSourceNode = audioSource;
 
     return audioEncoder;
 }
 
-status_t StagefrightRecorder::startAACRecording() {
+status_t StagefrightRecorder::setupAACRecording() {
     // FIXME:
     // Add support for OUTPUT_FORMAT_AAC_ADIF
     CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_AAC_ADTS);
@@ -879,16 +946,10 @@
     CHECK(mAudioSource != AUDIO_SOURCE_CNT);
 
     mWriter = new AACWriter(mOutputFd);
-    status_t status = startRawAudioRecording();
-    if (status != OK) {
-        mWriter.clear();
-        mWriter = NULL;
-    }
-
-    return status;
+    return setupRawAudioRecording();
 }
 
-status_t StagefrightRecorder::startAMRRecording() {
+status_t StagefrightRecorder::setupAMRRecording() {
     CHECK(mOutputFormat == OUTPUT_FORMAT_AMR_NB ||
           mOutputFormat == OUTPUT_FORMAT_AMR_WB);
 
@@ -908,15 +969,10 @@
     }
 
     mWriter = new AMRWriter(mOutputFd);
-    status_t status = startRawAudioRecording();
-    if (status != OK) {
-        mWriter.clear();
-        mWriter = NULL;
-    }
-    return status;
+    return setupRawAudioRecording();
 }
 
-status_t StagefrightRecorder::startRawAudioRecording() {
+status_t StagefrightRecorder::setupRawAudioRecording() {
     if (mAudioSource >= AUDIO_SOURCE_CNT) {
         ALOGE("Invalid audio source: %d", mAudioSource);
         return BAD_VALUE;
@@ -942,12 +998,11 @@
         mWriter->setMaxFileSize(mMaxFileSizeBytes);
     }
     mWriter->setListener(mListener);
-    mWriter->start();
 
     return OK;
 }
 
-status_t StagefrightRecorder::startRTPRecording() {
+status_t StagefrightRecorder::setupRTPRecording() {
     CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_RTP_AVP);
 
     if ((mAudioSource != AUDIO_SOURCE_CNT
@@ -974,7 +1029,7 @@
             return err;
         }
 
-        err = setupVideoEncoder(mediaSource, mVideoBitRate, &source);
+        err = setupVideoEncoder(mediaSource, &source);
         if (err != OK) {
             return err;
         }
@@ -984,10 +1039,10 @@
     mWriter->addSource(source);
     mWriter->setListener(mListener);
 
-    return mWriter->start();
+    return OK;
 }
 
-status_t StagefrightRecorder::startMPEG2TSRecording() {
+status_t StagefrightRecorder::setupMPEG2TSRecording() {
     CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_MPEG2TS);
 
     sp<MediaWriter> writer = new MPEG2TSWriter(mOutputFd);
@@ -1018,7 +1073,7 @@
         }
 
         sp<MediaSource> encoder;
-        err = setupVideoEncoder(mediaSource, mVideoBitRate, &encoder);
+        err = setupVideoEncoder(mediaSource, &encoder);
 
         if (err != OK) {
             return err;
@@ -1037,7 +1092,7 @@
 
     mWriter = writer;
 
-    return mWriter->start();
+    return OK;
 }
 
 void StagefrightRecorder::clipVideoFrameRate() {
@@ -1278,49 +1333,14 @@
             return err;
         }
         *mediaSource = cameraSource;
-    } else if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) {
-        // If using GRAlloc buffers, setup surfacemediasource.
-        // Later a handle to that will be passed
-        // to the client side when queried
-        status_t err = setupSurfaceMediaSource();
-        if (err != OK) {
-            return err;
-        }
-        *mediaSource = mSurfaceMediaSource;
+    } else if (mVideoSource == VIDEO_SOURCE_SURFACE) {
+        *mediaSource = NULL;
     } else {
         return INVALID_OPERATION;
     }
     return OK;
 }
 
-// setupSurfaceMediaSource creates a source with the given
-// width and height and framerate.
-// TODO: This could go in a static function inside SurfaceMediaSource
-// similar to that in CameraSource
-status_t StagefrightRecorder::setupSurfaceMediaSource() {
-    status_t err = OK;
-    mSurfaceMediaSource = new SurfaceMediaSource(mVideoWidth, mVideoHeight);
-    if (mSurfaceMediaSource == NULL) {
-        return NO_INIT;
-    }
-
-    if (mFrameRate == -1) {
-        int32_t frameRate = 0;
-        CHECK (mSurfaceMediaSource->getFormat()->findInt32(
-                                        kKeyFrameRate, &frameRate));
-        ALOGI("Frame rate is not explicitly set. Use the current frame "
-             "rate (%d fps)", frameRate);
-        mFrameRate = frameRate;
-    } else {
-        err = mSurfaceMediaSource->setFrameRate(mFrameRate);
-    }
-    CHECK(mFrameRate != -1);
-
-    mIsMetaDataStoredInVideoBuffers =
-        mSurfaceMediaSource->isMetaDataStoredInVideoBuffers();
-    return err;
-}
-
 status_t StagefrightRecorder::setupCameraSource(
         sp<CameraSource> *cameraSource) {
     status_t err = OK;
@@ -1384,25 +1404,22 @@
 
 status_t StagefrightRecorder::setupVideoEncoder(
         sp<MediaSource> cameraSource,
-        int32_t videoBitRate,
         sp<MediaSource> *source) {
     source->clear();
 
-    sp<MetaData> enc_meta = new MetaData;
-    enc_meta->setInt32(kKeyBitRate, videoBitRate);
-    enc_meta->setInt32(kKeyFrameRate, mFrameRate);
+    sp<AMessage> format = new AMessage();
 
     switch (mVideoEncoder) {
         case VIDEO_ENCODER_H263:
-            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
+            format->setString("mime", MEDIA_MIMETYPE_VIDEO_H263);
             break;
 
         case VIDEO_ENCODER_MPEG_4_SP:
-            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
+            format->setString("mime", MEDIA_MIMETYPE_VIDEO_MPEG4);
             break;
 
         case VIDEO_ENCODER_H264:
-            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+            format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
             break;
 
         default:
@@ -1410,59 +1427,80 @@
             break;
     }
 
-    sp<MetaData> meta = cameraSource->getFormat();
+    if (cameraSource != NULL) {
+        sp<MetaData> meta = cameraSource->getFormat();
 
-    int32_t width, height, stride, sliceHeight, colorFormat;
-    CHECK(meta->findInt32(kKeyWidth, &width));
-    CHECK(meta->findInt32(kKeyHeight, &height));
-    CHECK(meta->findInt32(kKeyStride, &stride));
-    CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));
-    CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));
+        int32_t width, height, stride, sliceHeight, colorFormat;
+        CHECK(meta->findInt32(kKeyWidth, &width));
+        CHECK(meta->findInt32(kKeyHeight, &height));
+        CHECK(meta->findInt32(kKeyStride, &stride));
+        CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));
+        CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));
 
-    enc_meta->setInt32(kKeyWidth, width);
-    enc_meta->setInt32(kKeyHeight, height);
-    enc_meta->setInt32(kKeyIFramesInterval, mIFramesIntervalSec);
-    enc_meta->setInt32(kKeyStride, stride);
-    enc_meta->setInt32(kKeySliceHeight, sliceHeight);
-    enc_meta->setInt32(kKeyColorFormat, colorFormat);
+        format->setInt32("width", width);
+        format->setInt32("height", height);
+        format->setInt32("stride", stride);
+        format->setInt32("slice-height", sliceHeight);
+        format->setInt32("color-format", colorFormat);
+    } else {
+        format->setInt32("width", mVideoWidth);
+        format->setInt32("height", mVideoHeight);
+        format->setInt32("stride", mVideoWidth);
+        format->setInt32("slice-height", mVideoWidth);
+        format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
+
+        // set up time lapse/slow motion for surface source
+        if (mCaptureTimeLapse) {
+            if (mTimeBetweenTimeLapseFrameCaptureUs <= 0) {
+                ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
+                    mTimeBetweenTimeLapseFrameCaptureUs);
+                return BAD_VALUE;
+            }
+            format->setInt64("time-lapse",
+                    mTimeBetweenTimeLapseFrameCaptureUs);
+        }
+    }
+
+    format->setInt32("bitrate", mVideoBitRate);
+    format->setInt32("frame-rate", mFrameRate);
+    format->setInt32("i-frame-interval", mIFramesIntervalSec);
+
     if (mVideoTimeScale > 0) {
-        enc_meta->setInt32(kKeyTimeScale, mVideoTimeScale);
+        format->setInt32("time-scale", mVideoTimeScale);
     }
     if (mVideoEncoderProfile != -1) {
-        enc_meta->setInt32(kKeyVideoProfile, mVideoEncoderProfile);
+        format->setInt32("profile", mVideoEncoderProfile);
     }
     if (mVideoEncoderLevel != -1) {
-        enc_meta->setInt32(kKeyVideoLevel, mVideoEncoderLevel);
+        format->setInt32("level", mVideoEncoderLevel);
     }
 
-    OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
-
-    uint32_t encoder_flags = 0;
+    uint32_t flags = 0;
     if (mIsMetaDataStoredInVideoBuffers) {
-        encoder_flags |= OMXCodec::kStoreMetaDataInVideoBuffers;
+        flags |= MediaCodecSource::FLAG_USE_METADATA_INPUT;
     }
 
-    // Do not wait for all the input buffers to become available.
-    // This give timelapse video recording faster response in
-    // receiving output from video encoder component.
-    if (mCaptureTimeLapse) {
-        encoder_flags |= OMXCodec::kOnlySubmitOneInputBufferAtOneTime;
+    if (cameraSource == NULL) {
+        flags |= MediaCodecSource::FLAG_USE_SURFACE_INPUT;
     }
 
-    sp<MediaSource> encoder = OMXCodec::Create(
-            client.interface(), enc_meta,
-            true /* createEncoder */, cameraSource,
-            NULL, encoder_flags);
+    sp<MediaCodecSource> encoder =
+            MediaCodecSource::Create(mLooper, format, cameraSource, flags);
     if (encoder == NULL) {
         ALOGW("Failed to create the encoder");
         // When the encoder fails to be created, we need
         // release the camera source due to the camera's lock
         // and unlock mechanism.
-        cameraSource->stop();
+        if (cameraSource != NULL) {
+            cameraSource->stop();
+        }
         return UNKNOWN_ERROR;
     }
 
+    if (cameraSource == NULL) {
+        mGraphicBufferProducer = encoder->getGraphicBufferProducer();
+    }
+
     *source = encoder;
 
     return OK;
@@ -1496,16 +1534,12 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setupMPEG4Recording(
-        int outputFd,
-        int32_t videoWidth, int32_t videoHeight,
-        int32_t videoBitRate,
-        int32_t *totalBitRate,
-        sp<MediaWriter> *mediaWriter) {
-    mediaWriter->clear();
-    *totalBitRate = 0;
+status_t StagefrightRecorder::setupMPEG4Recording() {
+    mWriter.clear();
+    mTotalBitRate = 0;
+
     status_t err = OK;
-    sp<MediaWriter> writer = new MPEG4Writer(outputFd);
+    sp<MediaWriter> writer = new MPEG4Writer(mOutputFd);
 
     if (mVideoSource < VIDEO_SOURCE_LIST_END) {
 
@@ -1516,13 +1550,13 @@
         }
 
         sp<MediaSource> encoder;
-        err = setupVideoEncoder(mediaSource, videoBitRate, &encoder);
+        err = setupVideoEncoder(mediaSource, &encoder);
         if (err != OK) {
             return err;
         }
 
         writer->addSource(encoder);
-        *totalBitRate += videoBitRate;
+        mTotalBitRate += mVideoBitRate;
     }
 
     // Audio source is added at the end if it exists.
@@ -1531,7 +1565,7 @@
     if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
         err = setupAudioEncoder(writer);
         if (err != OK) return err;
-        *totalBitRate += mAudioBitRate;
+        mTotalBitRate += mAudioBitRate;
     }
 
     if (mInterleaveDurationUs > 0) {
@@ -1549,22 +1583,28 @@
         writer->setMaxFileSize(mMaxFileSizeBytes);
     }
 
-    mStartTimeOffsetMs = mEncoderProfiles->getStartTimeOffsetMs(mCameraId);
+    if (mVideoSource == VIDEO_SOURCE_DEFAULT
+            || mVideoSource == VIDEO_SOURCE_CAMERA) {
+        mStartTimeOffsetMs = mEncoderProfiles->getStartTimeOffsetMs(mCameraId);
+    } else if (mVideoSource == VIDEO_SOURCE_SURFACE) {
+        // surface source doesn't need large initial delay
+        mStartTimeOffsetMs = 200;
+    }
     if (mStartTimeOffsetMs > 0) {
         reinterpret_cast<MPEG4Writer *>(writer.get())->
             setStartTimeOffsetMs(mStartTimeOffsetMs);
     }
 
     writer->setListener(mListener);
-    *mediaWriter = writer;
+    mWriter = writer;
     return OK;
 }
 
-void StagefrightRecorder::setupMPEG4MetaData(int64_t startTimeUs, int32_t totalBitRate,
-        sp<MetaData> *meta) {
+void StagefrightRecorder::setupMPEG4MetaData(sp<MetaData> *meta) {
+    int64_t startTimeUs = systemTime() / 1000;
     (*meta)->setInt64(kKeyTime, startTimeUs);
     (*meta)->setInt32(kKeyFileType, mOutputFormat);
-    (*meta)->setInt32(kKeyBitRate, totalBitRate);
+    (*meta)->setInt32(kKeyBitRate, mTotalBitRate);
     (*meta)->setInt32(kKey64BitFileOffset, mUse64BitFileOffset);
     if (mMovieTimeScale > 0) {
         (*meta)->setInt32(kKeyTimeScale, mMovieTimeScale);
@@ -1577,27 +1617,6 @@
     }
 }
 
-status_t StagefrightRecorder::startMPEG4Recording() {
-    int32_t totalBitRate;
-    status_t err = setupMPEG4Recording(
-            mOutputFd, mVideoWidth, mVideoHeight,
-            mVideoBitRate, &totalBitRate, &mWriter);
-    if (err != OK) {
-        return err;
-    }
-
-    int64_t startTimeUs = systemTime() / 1000;
-    sp<MetaData> meta = new MetaData;
-    setupMPEG4MetaData(startTimeUs, totalBitRate, &meta);
-
-    err = mWriter->start(meta.get());
-    if (err != OK) {
-        return err;
-    }
-
-    return OK;
-}
-
 status_t StagefrightRecorder::pause() {
     ALOGV("pause");
     if (mWriter == NULL) {
@@ -1637,6 +1656,8 @@
         mWriter.clear();
     }
 
+    mGraphicBufferProducer.clear();
+
     if (mOutputFd >= 0) {
         ::close(mOutputFd);
         mOutputFd = -1;
@@ -1656,7 +1677,6 @@
         addBatteryData(params);
     }
 
-
     return err;
 }
 
@@ -1708,6 +1728,7 @@
     mRotationDegrees = 0;
     mLatitudex10000 = -3600000;
     mLongitudex10000 = -3600000;
+    mTotalBitRate = 0;
 
     mOutputFd = -1;
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 31f09e0..377d168 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -37,6 +37,7 @@
 class MediaProfiles;
 class IGraphicBufferProducer;
 class SurfaceMediaSource;
+class ALooper;
 
 struct StagefrightRecorder : public MediaRecorderBase {
     StagefrightRecorder();
@@ -106,6 +107,7 @@
     int32_t mLatitudex10000;
     int32_t mLongitudex10000;
     int32_t mStartTimeOffsetMs;
+    int32_t mTotalBitRate;
 
     bool mCaptureTimeLapse;
     int64_t mTimeBetweenTimeLapseFrameCaptureUs;
@@ -122,22 +124,17 @@
     // An <IGraphicBufferProducer> pointer
     // will be sent to the client side using which the
     // frame buffers will be queued and dequeued
-    sp<SurfaceMediaSource> mSurfaceMediaSource;
+    sp<IGraphicBufferProducer> mGraphicBufferProducer;
+    sp<ALooper> mLooper;
 
-    status_t setupMPEG4Recording(
-        int outputFd,
-        int32_t videoWidth, int32_t videoHeight,
-        int32_t videoBitRate,
-        int32_t *totalBitRate,
-        sp<MediaWriter> *mediaWriter);
-    void setupMPEG4MetaData(int64_t startTimeUs, int32_t totalBitRate,
-        sp<MetaData> *meta);
-    status_t startMPEG4Recording();
-    status_t startAMRRecording();
-    status_t startAACRecording();
-    status_t startRawAudioRecording();
-    status_t startRTPRecording();
-    status_t startMPEG2TSRecording();
+    status_t prepareInternal();
+    status_t setupMPEG4Recording();
+    void setupMPEG4MetaData(sp<MetaData> *meta);
+    status_t setupAMRRecording();
+    status_t setupAACRecording();
+    status_t setupRawAudioRecording();
+    status_t setupRTPRecording();
+    status_t setupMPEG2TSRecording();
     sp<MediaSource> createAudioSource();
     status_t checkVideoEncoderCapabilities(
             bool *supportsCameraSourceMetaDataMode);
@@ -147,14 +144,8 @@
     // depending on the videosource type
     status_t setupMediaSource(sp<MediaSource> *mediaSource);
     status_t setupCameraSource(sp<CameraSource> *cameraSource);
-    // setup the surfacemediasource for the encoder
-    status_t setupSurfaceMediaSource();
-
     status_t setupAudioEncoder(const sp<MediaWriter>& writer);
-    status_t setupVideoEncoder(
-            sp<MediaSource> cameraSource,
-            int32_t videoBitRate,
-            sp<MediaSource> *source);
+    status_t setupVideoEncoder(sp<MediaSource> cameraSource, sp<MediaSource> *source);
 
     // Encoding parameter handling utilities
     status_t setParameter(const String8 &key, const String8 &value);
diff --git a/media/libmediaplayerservice/TestPlayerStub.cpp b/media/libmediaplayerservice/TestPlayerStub.cpp
index 5d9728a..5795773 100644
--- a/media/libmediaplayerservice/TestPlayerStub.cpp
+++ b/media/libmediaplayerservice/TestPlayerStub.cpp
@@ -113,7 +113,9 @@
 // Create the test player.
 // Call setDataSource on the test player with the url in param.
 status_t TestPlayerStub::setDataSource(
-        const char *url, const KeyedVector<String8, String8> *headers) {
+        const sp<IMediaHTTPService> &httpService,
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
     if (!isTestUrl(url) || NULL != mHandle) {
         return INVALID_OPERATION;
     }
@@ -162,7 +164,7 @@
     }
 
     mPlayer = (*mNewPlayer)();
-    return mPlayer->setDataSource(mContentUrl, headers);
+    return mPlayer->setDataSource(httpService, mContentUrl, headers);
 }
 
 // Internal cleanup.
diff --git a/media/libmediaplayerservice/TestPlayerStub.h b/media/libmediaplayerservice/TestPlayerStub.h
index a3802eb..55bf2c8 100644
--- a/media/libmediaplayerservice/TestPlayerStub.h
+++ b/media/libmediaplayerservice/TestPlayerStub.h
@@ -66,7 +66,9 @@
 
     // @param url Should be a test url. See class comment.
     virtual status_t setDataSource(
-            const char* url, const KeyedVector<String8, String8> *headers);
+            const sp<IMediaHTTPService> &httpService,
+            const char* url,
+            const KeyedVector<String8, String8> *headers);
 
     // Test player for a file descriptor source is not supported.
     virtual status_t setDataSource(int, int64_t, int64_t)  {
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index f946c1c..f97ba57 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -11,7 +11,6 @@
         NuPlayerStreamListener.cpp      \
         RTSPSource.cpp                  \
         StreamingSource.cpp             \
-        mp4/MP4Source.cpp               \
 
 LOCAL_C_INCLUDES := \
 	$(TOP)/frameworks/av/media/libstagefright/httplive            \
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index b04e7a6..06aac33 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -33,17 +33,16 @@
 
 NuPlayer::GenericSource::GenericSource(
         const sp<AMessage> &notify,
+        const sp<IMediaHTTPService> &httpService,
         const char *url,
-        const KeyedVector<String8, String8> *headers,
-        bool uidValid,
-        uid_t uid)
+        const KeyedVector<String8, String8> *headers)
     : Source(notify),
       mDurationUs(0ll),
       mAudioIsVorbis(false) {
     DataSource::RegisterDefaultSniffers();
 
     sp<DataSource> dataSource =
-        DataSource::CreateFromURI(url, headers);
+        DataSource::CreateFromURI(httpService, url, headers);
     CHECK(dataSource != NULL);
 
     initFromDataSource(dataSource);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 2da680c..20d597e 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -33,10 +33,9 @@
 struct NuPlayer::GenericSource : public NuPlayer::Source {
     GenericSource(
             const sp<AMessage> &notify,
+            const sp<IMediaHTTPService> &httpService,
             const char *url,
-            const KeyedVector<String8, String8> *headers,
-            bool uidValid = false,
-            uid_t uid = 0);
+            const KeyedVector<String8, String8> *headers);
 
     GenericSource(
             const sp<AMessage> &notify,
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 510dcc9..cbedf5c 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -24,6 +24,7 @@
 #include "LiveDataSource.h"
 #include "LiveSession.h"
 
+#include <media/IMediaHTTPService.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -34,13 +35,12 @@
 
 NuPlayer::HTTPLiveSource::HTTPLiveSource(
         const sp<AMessage> &notify,
+        const sp<IMediaHTTPService> &httpService,
         const char *url,
-        const KeyedVector<String8, String8> *headers,
-        bool uidValid, uid_t uid)
+        const KeyedVector<String8, String8> *headers)
     : Source(notify),
+      mHTTPService(httpService),
       mURL(url),
-      mUIDValid(uidValid),
-      mUID(uid),
       mFlags(0),
       mFinalResult(OK),
       mOffset(0),
@@ -79,8 +79,7 @@
     mLiveSession = new LiveSession(
             notify,
             (mFlags & kFlagIncognito) ? LiveSession::kFlagIncognito : 0,
-            mUIDValid,
-            mUID);
+            mHTTPService);
 
     mLiveLooper->registerHandler(mLiveSession);
 
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index bcc3f8b..4d7251f 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -28,10 +28,9 @@
 struct NuPlayer::HTTPLiveSource : public NuPlayer::Source {
     HTTPLiveSource(
             const sp<AMessage> &notify,
+            const sp<IMediaHTTPService> &httpService,
             const char *url,
-            const KeyedVector<String8, String8> *headers,
-            bool uidValid = false,
-            uid_t uid = 0);
+            const KeyedVector<String8, String8> *headers);
 
     virtual void prepareAsync();
     virtual void start();
@@ -61,10 +60,9 @@
         kWhatFetchSubtitleData,
     };
 
+    sp<IMediaHTTPService> mHTTPService;
     AString mURL;
     KeyedVector<String8, String8> mExtraHeaders;
-    bool mUIDValid;
-    uid_t mUID;
     uint32_t mFlags;
     status_t mFinalResult;
     off64_t mOffset;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 25d55a3..d8d939a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -28,18 +28,13 @@
 #include "RTSPSource.h"
 #include "StreamingSource.h"
 #include "GenericSource.h"
-#include "mp4/MP4Source.h"
 
 #include "ATSParser.h"
 
-#include "SoftwareRenderer.h"
-
-#include <cutils/properties.h> // for property_get
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/ACodec.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
@@ -148,7 +143,6 @@
     : mUIDValid(false),
       mSourceFlags(0),
       mVideoIsAVC(false),
-      mNeedsSwRenderer(false),
       mAudioEOS(false),
       mVideoEOS(false),
       mScanSourcesPending(false),
@@ -183,14 +177,7 @@
 
     sp<AMessage> notify = new AMessage(kWhatSourceNotify, id());
 
-    char prop[PROPERTY_VALUE_MAX];
-    if (property_get("media.stagefright.use-mp4source", prop, NULL)
-            && (!strcmp(prop, "1") || !strcasecmp(prop, "true"))) {
-        msg->setObject("source", new MP4Source(notify, source));
-    } else {
-        msg->setObject("source", new StreamingSource(notify, source));
-    }
-
+    msg->setObject("source", new StreamingSource(notify, source));
     msg->post();
 }
 
@@ -212,7 +199,9 @@
 }
 
 void NuPlayer::setDataSourceAsync(
-        const char *url, const KeyedVector<String8, String8> *headers) {
+        const sp<IMediaHTTPService> &httpService,
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
     sp<AMessage> msg = new AMessage(kWhatSetDataSource, id());
     size_t len = strlen(url);
 
@@ -220,16 +209,18 @@
 
     sp<Source> source;
     if (IsHTTPLiveURL(url)) {
-        source = new HTTPLiveSource(notify, url, headers, mUIDValid, mUID);
+        source = new HTTPLiveSource(notify, httpService, url, headers);
     } else if (!strncasecmp(url, "rtsp://", 7)) {
-        source = new RTSPSource(notify, url, headers, mUIDValid, mUID);
+        source = new RTSPSource(
+                notify, httpService, url, headers, mUIDValid, mUID);
     } else if ((!strncasecmp(url, "http://", 7)
                 || !strncasecmp(url, "https://", 8))
                     && ((len >= 4 && !strcasecmp(".sdp", &url[len - 4]))
                     || strstr(url, ".sdp?"))) {
-        source = new RTSPSource(notify, url, headers, mUIDValid, mUID, true);
+        source = new RTSPSource(
+                notify, httpService, url, headers, mUIDValid, mUID, true);
     } else {
-        source = new GenericSource(notify, url, headers, mUIDValid, mUID);
+        source = new GenericSource(notify, httpService, url, headers);
     }
 
     msg->setObject("source", source);
@@ -447,7 +438,6 @@
             ALOGV("kWhatStart");
 
             mVideoIsAVC = false;
-            mNeedsSwRenderer = false;
             mAudioEOS = false;
             mVideoEOS = false;
             mSkipRenderingAudioUntilMediaTimeUs = -1;
@@ -538,24 +528,21 @@
         {
             bool audio = msg->what() == kWhatAudioNotify;
 
-            sp<AMessage> codecRequest;
-            CHECK(msg->findMessage("codec-request", &codecRequest));
-
             int32_t what;
-            CHECK(codecRequest->findInt32("what", &what));
+            CHECK(msg->findInt32("what", &what));
 
-            if (what == ACodec::kWhatFillThisBuffer) {
+            if (what == Decoder::kWhatFillThisBuffer) {
                 status_t err = feedDecoderInputData(
-                        audio, codecRequest);
+                        audio, msg);
 
                 if (err == -EWOULDBLOCK) {
                     if (mSource->feedMoreTSData() == OK) {
                         msg->post(10000ll);
                     }
                 }
-            } else if (what == ACodec::kWhatEOS) {
+            } else if (what == Decoder::kWhatEOS) {
                 int32_t err;
-                CHECK(codecRequest->findInt32("err", &err));
+                CHECK(msg->findInt32("err", &err));
 
                 if (err == ERROR_END_OF_STREAM) {
                     ALOGV("got %s decoder EOS", audio ? "audio" : "video");
@@ -566,7 +553,7 @@
                 }
 
                 mRenderer->queueEOS(audio, err);
-            } else if (what == ACodec::kWhatFlushCompleted) {
+            } else if (what == Decoder::kWhatFlushCompleted) {
                 bool needShutdown;
 
                 if (audio) {
@@ -595,14 +582,17 @@
                 }
 
                 finishFlushIfPossible();
-            } else if (what == ACodec::kWhatOutputFormatChanged) {
+            } else if (what == Decoder::kWhatOutputFormatChanged) {
+                sp<AMessage> format;
+                CHECK(msg->findMessage("format", &format));
+
                 if (audio) {
                     int32_t numChannels;
-                    CHECK(codecRequest->findInt32(
+                    CHECK(format->findInt32(
                                 "channel-count", &numChannels));
 
                     int32_t sampleRate;
-                    CHECK(codecRequest->findInt32("sample-rate", &sampleRate));
+                    CHECK(format->findInt32("sample-rate", &sampleRate));
 
                     ALOGV("Audio output format changed to %d Hz, %d channels",
                          sampleRate, numChannels);
@@ -626,7 +616,7 @@
                     }
 
                     int32_t channelMask;
-                    if (!codecRequest->findInt32("channel-mask", &channelMask)) {
+                    if (!format->findInt32("channel-mask", &channelMask)) {
                         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
                     }
 
@@ -647,11 +637,11 @@
                     // video
 
                     int32_t width, height;
-                    CHECK(codecRequest->findInt32("width", &width));
-                    CHECK(codecRequest->findInt32("height", &height));
+                    CHECK(format->findInt32("width", &width));
+                    CHECK(format->findInt32("height", &height));
 
                     int32_t cropLeft, cropTop, cropRight, cropBottom;
-                    CHECK(codecRequest->findRect(
+                    CHECK(format->findRect(
                                 "crop",
                                 &cropLeft, &cropTop, &cropRight, &cropBottom));
 
@@ -684,22 +674,8 @@
 
                     notifyListener(
                             MEDIA_SET_VIDEO_SIZE, displayWidth, displayHeight);
-
-                    if (mNeedsSwRenderer && mNativeWindow != NULL) {
-                        int32_t colorFormat;
-                        CHECK(codecRequest->findInt32("color-format", &colorFormat));
-
-                        sp<MetaData> meta = new MetaData;
-                        meta->setInt32(kKeyWidth, width);
-                        meta->setInt32(kKeyHeight, height);
-                        meta->setRect(kKeyCropRect, cropLeft, cropTop, cropRight, cropBottom);
-                        meta->setInt32(kKeyColorFormat, colorFormat);
-
-                        mRenderer->setSoftRenderer(
-                                new SoftwareRenderer(mNativeWindow->getNativeWindow(), meta));
-                    }
                 }
-            } else if (what == ACodec::kWhatShutdownCompleted) {
+            } else if (what == Decoder::kWhatShutdownCompleted) {
                 ALOGV("%s shutdown completed", audio ? "audio" : "video");
                 if (audio) {
                     mAudioDecoder.clear();
@@ -714,22 +690,15 @@
                 }
 
                 finishFlushIfPossible();
-            } else if (what == ACodec::kWhatError) {
+            } else if (what == Decoder::kWhatError) {
                 ALOGE("Received error from %s decoder, aborting playback.",
                      audio ? "audio" : "video");
 
                 mRenderer->queueEOS(audio, UNKNOWN_ERROR);
-            } else if (what == ACodec::kWhatDrainThisBuffer) {
-                renderBuffer(audio, codecRequest);
-            } else if (what == ACodec::kWhatComponentAllocated) {
-                if (!audio) {
-                    AString name;
-                    CHECK(codecRequest->findString("componentName", &name));
-                    mNeedsSwRenderer = name.startsWith("OMX.google.");
-                }
-            } else if (what != ACodec::kWhatComponentConfigured
-                    && what != ACodec::kWhatBuffersAllocated) {
-                ALOGV("Unhandled codec notification %d '%c%c%c%c'.",
+            } else if (what == Decoder::kWhatDrainThisBuffer) {
+                renderBuffer(audio, msg);
+            } else {
+                ALOGV("Unhandled decoder notification %d '%c%c%c%c'.",
                       what,
                       what >> 24,
                       (what >> 16) & 0xff,
@@ -930,8 +899,7 @@
 
     *decoder = audio ? new Decoder(notify) :
                        new Decoder(notify, mNativeWindow);
-    looper()->registerHandler(*decoder);
-
+    (*decoder)->init();
     (*decoder)->configure(format);
 
     return OK;
@@ -1531,7 +1499,7 @@
     notify->post();
 }
 
-void NuPlayer::Source::onMessageReceived(const sp<AMessage> &msg) {
+void NuPlayer::Source::onMessageReceived(const sp<AMessage> & /* msg */) {
     TRESPASS();
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 590e1f2..f1d3d55 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -24,7 +24,6 @@
 
 namespace android {
 
-struct ACodec;
 struct MetaData;
 struct NuPlayerDriver;
 
@@ -38,7 +37,9 @@
     void setDataSourceAsync(const sp<IStreamSource> &source);
 
     void setDataSourceAsync(
-            const char *url, const KeyedVector<String8, String8> *headers);
+            const sp<IMediaHTTPService> &httpService,
+            const char *url,
+            const KeyedVector<String8, String8> *headers);
 
     void setDataSourceAsync(int fd, int64_t offset, int64_t length);
 
@@ -116,7 +117,6 @@
     sp<MediaPlayerBase::AudioSink> mAudioSink;
     sp<Decoder> mVideoDecoder;
     bool mVideoIsAVC;
-    bool mNeedsSwRenderer;
     sp<Decoder> mAudioDecoder;
     sp<Renderer> mRenderer;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 2423fd5..469c9ca 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -17,14 +17,17 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "NuPlayerDecoder"
 #include <utils/Log.h>
+#include <inttypes.h>
 
 #include "NuPlayerDecoder.h"
 
+#include <media/ICrypto.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/ACodec.h>
+#include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
 
 namespace android {
 
@@ -32,70 +35,404 @@
         const sp<AMessage> &notify,
         const sp<NativeWindowWrapper> &nativeWindow)
     : mNotify(notify),
-      mNativeWindow(nativeWindow) {
+      mNativeWindow(nativeWindow),
+      mBufferGeneration(0),
+      mComponentName("decoder") {
+    // Every decoder has its own looper because MediaCodec operations
+    // are blocking, but NuPlayer needs asynchronous operations.
+    mDecoderLooper = new ALooper;
+    mDecoderLooper->setName("NuPlayerDecoder");
+    mDecoderLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+
+    mCodecLooper = new ALooper;
+    mCodecLooper->setName("NuPlayerDecoder-MC");
+    mCodecLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
 }
 
 NuPlayer::Decoder::~Decoder() {
 }
 
-void NuPlayer::Decoder::configure(const sp<AMessage> &format) {
+void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) {
     CHECK(mCodec == NULL);
 
+    ++mBufferGeneration;
+
     AString mime;
     CHECK(format->findString("mime", &mime));
 
-    sp<AMessage> notifyMsg =
-        new AMessage(kWhatCodecNotify, id());
-
-    mCSDIndex = 0;
-    for (size_t i = 0;; ++i) {
-        sp<ABuffer> csd;
-        if (!format->findBuffer(StringPrintf("csd-%d", i).c_str(), &csd)) {
-            break;
-        }
-
-        mCSD.push(csd);
+    sp<Surface> surface = NULL;
+    if (mNativeWindow != NULL) {
+        surface = mNativeWindow->getSurfaceTextureClient();
     }
 
+    mComponentName = mime;
+    mComponentName.append(" decoder");
+    ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), surface.get());
+
+    mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false /* encoder */);
+    if (mCodec == NULL) {
+        ALOGE("Failed to create %s decoder", mime.c_str());
+        handleError(UNKNOWN_ERROR);
+        return;
+    }
+
+    mCodec->getName(&mComponentName);
+
     if (mNativeWindow != NULL) {
-        format->setObject("native-window", mNativeWindow);
+        // disconnect from surface as MediaCodec will reconnect
+        CHECK_EQ((int)NO_ERROR,
+                native_window_api_disconnect(
+                        surface.get(),
+                        NATIVE_WINDOW_API_MEDIA));
+    }
+    status_t err = mCodec->configure(
+            format, surface, NULL /* crypto */, 0 /* flags */);
+    if (err != OK) {
+        ALOGE("Failed to configure %s decoder (err=%d)", mComponentName.c_str(), err);
+        handleError(err);
+        return;
+    }
+    // the following should work in configured state
+    CHECK_EQ((status_t)OK, mCodec->getOutputFormat(&mOutputFormat));
+    CHECK_EQ((status_t)OK, mCodec->getInputFormat(&mInputFormat));
+
+    err = mCodec->start();
+    if (err != OK) {
+        ALOGE("Failed to start %s decoder (err=%d)", mComponentName.c_str(), err);
+        handleError(err);
+        return;
     }
 
-    // Current video decoders do not return from OMX_FillThisBuffer
-    // quickly, violating the OpenMAX specs, until that is remedied
-    // we need to invest in an extra looper to free the main event
-    // queue.
-    bool needDedicatedLooper = !strncasecmp(mime.c_str(), "video/", 6);
+    // the following should work after start
+    CHECK_EQ((status_t)OK, mCodec->getInputBuffers(&mInputBuffers));
+    CHECK_EQ((status_t)OK, mCodec->getOutputBuffers(&mOutputBuffers));
+    ALOGV("[%s] got %zu input and %zu output buffers",
+            mComponentName.c_str(),
+            mInputBuffers.size(),
+            mOutputBuffers.size());
 
-    mFormat = format;
-    mCodec = new ACodec;
+    requestCodecNotification();
+}
 
-    if (needDedicatedLooper && mCodecLooper == NULL) {
-        mCodecLooper = new ALooper;
-        mCodecLooper->setName("NuPlayerDecoder");
-        mCodecLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+void NuPlayer::Decoder::requestCodecNotification() {
+    if (mCodec != NULL) {
+        sp<AMessage> reply = new AMessage(kWhatCodecNotify, id());
+        reply->setInt32("generation", mBufferGeneration);
+        mCodec->requestActivityNotification(reply);
+    }
+}
+
+bool NuPlayer::Decoder::isStaleReply(const sp<AMessage> &msg) {
+    int32_t generation;
+    CHECK(msg->findInt32("generation", &generation));
+    return generation != mBufferGeneration;
+}
+
+void NuPlayer::Decoder::init() {
+    mDecoderLooper->registerHandler(this);
+}
+
+void NuPlayer::Decoder::configure(const sp<AMessage> &format) {
+    sp<AMessage> msg = new AMessage(kWhatConfigure, id());
+    msg->setMessage("format", format);
+    msg->post();
+}
+
+void NuPlayer::Decoder::handleError(int32_t err)
+{
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+bool NuPlayer::Decoder::handleAnInputBuffer() {
+    size_t bufferIx = -1;
+    status_t res = mCodec->dequeueInputBuffer(&bufferIx);
+    ALOGV("[%s] dequeued input: %d",
+            mComponentName.c_str(), res == OK ? (int)bufferIx : res);
+    if (res != OK) {
+        if (res != -EAGAIN) {
+            handleError(res);
+        }
+        return false;
     }
 
-    (needDedicatedLooper ? mCodecLooper : looper())->registerHandler(mCodec);
+    CHECK_LT(bufferIx, mInputBuffers.size());
 
-    mCodec->setNotificationMessage(notifyMsg);
-    mCodec->initiateSetup(format);
+    sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, id());
+    reply->setSize("buffer-ix", bufferIx);
+    reply->setInt32("generation", mBufferGeneration);
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatFillThisBuffer);
+    notify->setBuffer("buffer", mInputBuffers[bufferIx]);
+    notify->setMessage("reply", reply);
+    notify->post();
+    return true;
+}
+
+void android::NuPlayer::Decoder::onInputBufferFilled(const sp<AMessage> &msg) {
+    size_t bufferIx;
+    CHECK(msg->findSize("buffer-ix", &bufferIx));
+    CHECK_LT(bufferIx, mInputBuffers.size());
+    sp<ABuffer> codecBuffer = mInputBuffers[bufferIx];
+
+    sp<ABuffer> buffer;
+    bool hasBuffer = msg->findBuffer("buffer", &buffer);
+    if (buffer == NULL /* includes !hasBuffer */) {
+        int32_t streamErr = ERROR_END_OF_STREAM;
+        CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
+
+        if (streamErr == OK) {
+            /* buffers are returned to hold on to */
+            return;
+        }
+
+        // attempt to queue EOS
+        status_t err = mCodec->queueInputBuffer(
+                bufferIx,
+                0,
+                0,
+                0,
+                MediaCodec::BUFFER_FLAG_EOS);
+        if (streamErr == ERROR_END_OF_STREAM && err != OK) {
+            streamErr = err;
+            // err will not be ERROR_END_OF_STREAM
+        }
+
+        if (streamErr != ERROR_END_OF_STREAM) {
+            handleError(streamErr);
+        }
+    } else {
+        int64_t timeUs = 0;
+        uint32_t flags = 0;
+        CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+        int32_t eos;
+        // we do not expect CODECCONFIG or SYNCFRAME for decoder
+        if (buffer->meta()->findInt32("eos", &eos) && eos) {
+            flags |= MediaCodec::BUFFER_FLAG_EOS;
+        }
+
+        // copy into codec buffer
+        if (buffer != codecBuffer) {
+            CHECK_LE(buffer->size(), codecBuffer->capacity());
+            codecBuffer->setRange(0, buffer->size());
+            memcpy(codecBuffer->data(), buffer->data(), buffer->size());
+        }
+
+        status_t err = mCodec->queueInputBuffer(
+                        bufferIx,
+                        codecBuffer->offset(),
+                        codecBuffer->size(),
+                        timeUs,
+                        flags);
+        if (err != OK) {
+            ALOGE("Failed to queue input buffer for %s (err=%d)",
+                    mComponentName.c_str(), err);
+            handleError(err);
+        }
+    }
+}
+
+bool NuPlayer::Decoder::handleAnOutputBuffer() {
+    size_t bufferIx = -1;
+    size_t offset;
+    size_t size;
+    int64_t timeUs;
+    uint32_t flags;
+    status_t res = mCodec->dequeueOutputBuffer(
+            &bufferIx, &offset, &size, &timeUs, &flags);
+
+    if (res != OK) {
+        ALOGV("[%s] dequeued output: %d", mComponentName.c_str(), res);
+    } else {
+        ALOGV("[%s] dequeued output: %d (time=%lld flags=%" PRIu32 ")",
+                mComponentName.c_str(), (int)bufferIx, timeUs, flags);
+    }
+
+    if (res == INFO_OUTPUT_BUFFERS_CHANGED) {
+        res = mCodec->getOutputBuffers(&mOutputBuffers);
+        if (res != OK) {
+            ALOGE("Failed to get output buffers for %s after INFO event (err=%d)",
+                    mComponentName.c_str(), res);
+            handleError(res);
+            return false;
+        }
+        // NuPlayer ignores this
+        return true;
+    } else if (res == INFO_FORMAT_CHANGED) {
+        sp<AMessage> format = new AMessage();
+        res = mCodec->getOutputFormat(&format);
+        if (res != OK) {
+            ALOGE("Failed to get output format for %s after INFO event (err=%d)",
+                    mComponentName.c_str(), res);
+            handleError(res);
+            return false;
+        }
+
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatOutputFormatChanged);
+        notify->setMessage("format", format);
+        notify->post();
+        return true;
+    } else if (res == INFO_DISCONTINUITY) {
+        // nothing to do
+        return true;
+    } else if (res != OK) {
+        if (res != -EAGAIN) {
+            handleError(res);
+        }
+        return false;
+    }
+
+    CHECK_LT(bufferIx, mOutputBuffers.size());
+    sp<ABuffer> buffer = mOutputBuffers[bufferIx];
+    buffer->setRange(offset, size);
+    buffer->meta()->clear();
+    buffer->meta()->setInt64("timeUs", timeUs);
+    if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+        buffer->meta()->setInt32("eos", true);
+    }
+    // we do not expect CODECCONFIG or SYNCFRAME for decoder
+
+    sp<AMessage> reply = new AMessage(kWhatRenderBuffer, id());
+    reply->setSize("buffer-ix", bufferIx);
+    reply->setInt32("generation", mBufferGeneration);
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatDrainThisBuffer);
+    notify->setBuffer("buffer", buffer);
+    notify->setMessage("reply", reply);
+    notify->post();
+
+    // FIXME: This should be handled after rendering is complete,
+    // but Renderer needs it now
+    if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+        ALOGV("queueing eos [%s]", mComponentName.c_str());
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatEOS);
+        notify->setInt32("err", ERROR_END_OF_STREAM);
+        notify->post();
+    }
+    return true;
+}
+
+void NuPlayer::Decoder::onRenderBuffer(const sp<AMessage> &msg) {
+    status_t err;
+    int32_t render;
+    size_t bufferIx;
+    CHECK(msg->findSize("buffer-ix", &bufferIx));
+    if (msg->findInt32("render", &render) && render) {
+        err = mCodec->renderOutputBufferAndRelease(bufferIx);
+    } else {
+        err = mCodec->releaseOutputBuffer(bufferIx);
+    }
+    if (err != OK) {
+        ALOGE("failed to release output buffer for %s (err=%d)",
+                mComponentName.c_str(), err);
+        handleError(err);
+    }
+}
+
+void NuPlayer::Decoder::onFlush() {
+    status_t err = OK;
+    if (mCodec != NULL) {
+        err = mCodec->flush();
+        ++mBufferGeneration;
+    }
+
+    if (err != OK) {
+        ALOGE("failed to flush %s (err=%d)", mComponentName.c_str(), err);
+        handleError(err);
+        return;
+    }
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatFlushCompleted);
+    notify->post();
+}
+
+void NuPlayer::Decoder::onShutdown() {
+    status_t err = OK;
+    if (mCodec != NULL) {
+        err = mCodec->release();
+        mCodec = NULL;
+        ++mBufferGeneration;
+
+        if (mNativeWindow != NULL) {
+            // reconnect to surface as MediaCodec disconnected from it
+            CHECK_EQ((int)NO_ERROR,
+                    native_window_api_connect(
+                            mNativeWindow->getNativeWindow().get(),
+                            NATIVE_WINDOW_API_MEDIA));
+        }
+        mComponentName = "decoder";
+    }
+
+    if (err != OK) {
+        ALOGE("failed to release %s (err=%d)", mComponentName.c_str(), err);
+        handleError(err);
+        return;
+    }
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatShutdownCompleted);
+    notify->post();
 }
 
 void NuPlayer::Decoder::onMessageReceived(const sp<AMessage> &msg) {
+    ALOGV("[%s] onMessage: %s", mComponentName.c_str(), msg->debugString().c_str());
+
     switch (msg->what()) {
+        case kWhatConfigure:
+        {
+            sp<AMessage> format;
+            CHECK(msg->findMessage("format", &format));
+            onConfigure(format);
+            break;
+        }
+
         case kWhatCodecNotify:
         {
-            int32_t what;
-            CHECK(msg->findInt32("what", &what));
+            if (!isStaleReply(msg)) {
+                while (handleAnInputBuffer()) {
+                }
 
-            if (what == ACodec::kWhatFillThisBuffer) {
-                onFillThisBuffer(msg);
-            } else {
-                sp<AMessage> notify = mNotify->dup();
-                notify->setMessage("codec-request", msg);
-                notify->post();
+                while (handleAnOutputBuffer()) {
+                }
             }
+
+            requestCodecNotification();
+            break;
+        }
+
+        case kWhatInputBufferFilled:
+        {
+            if (!isStaleReply(msg)) {
+                onInputBufferFilled(msg);
+            }
+            break;
+        }
+
+        case kWhatRenderBuffer:
+        {
+            if (!isStaleReply(msg)) {
+                onRenderBuffer(msg);
+            }
+            break;
+        }
+
+        case kWhatFlush:
+        {
+            onFlush();
+            break;
+        }
+
+        case kWhatShutdown:
+        {
+            onShutdown();
             break;
         }
 
@@ -105,47 +442,16 @@
     }
 }
 
-void NuPlayer::Decoder::onFillThisBuffer(const sp<AMessage> &msg) {
-    sp<AMessage> reply;
-    CHECK(msg->findMessage("reply", &reply));
-
-#if 0
-    sp<ABuffer> outBuffer;
-    CHECK(msg->findBuffer("buffer", &outBuffer));
-#else
-    sp<ABuffer> outBuffer;
-#endif
-
-    if (mCSDIndex < mCSD.size()) {
-        outBuffer = mCSD.editItemAt(mCSDIndex++);
-        outBuffer->meta()->setInt64("timeUs", 0);
-
-        reply->setBuffer("buffer", outBuffer);
-        reply->post();
-        return;
-    }
-
-    sp<AMessage> notify = mNotify->dup();
-    notify->setMessage("codec-request", msg);
-    notify->post();
-}
-
 void NuPlayer::Decoder::signalFlush() {
-    if (mCodec != NULL) {
-        mCodec->signalFlush();
-    }
+    (new AMessage(kWhatFlush, id()))->post();
 }
 
 void NuPlayer::Decoder::signalResume() {
-    if (mCodec != NULL) {
-        mCodec->signalResume();
-    }
+    // nothing to do
 }
 
 void NuPlayer::Decoder::initiateShutdown() {
-    if (mCodec != NULL) {
-        mCodec->initiateShutdown();
-    }
+    (new AMessage(kWhatShutdown, id()))->post();
 }
 
 bool NuPlayer::Decoder::supportsSeamlessAudioFormatChange(const sp<AMessage> &targetFormat) const {
@@ -163,14 +469,16 @@
         const char * keys[] = { "channel-count", "sample-rate", "is-adts" };
         for (unsigned int i = 0; i < sizeof(keys) / sizeof(keys[0]); i++) {
             int32_t oldVal, newVal;
-            if (!mFormat->findInt32(keys[i], &oldVal) || !targetFormat->findInt32(keys[i], &newVal)
-                    || oldVal != newVal) {
+            if (!mOutputFormat->findInt32(keys[i], &oldVal) ||
+                    !targetFormat->findInt32(keys[i], &newVal) ||
+                    oldVal != newVal) {
                 return false;
             }
         }
 
         sp<ABuffer> oldBuf, newBuf;
-        if (mFormat->findBuffer("csd-0", &oldBuf) && targetFormat->findBuffer("csd-0", &newBuf)) {
+        if (mOutputFormat->findBuffer("csd-0", &oldBuf) &&
+                targetFormat->findBuffer("csd-0", &newBuf)) {
             if (oldBuf->size() != newBuf->size()) {
                 return false;
             }
@@ -181,7 +489,7 @@
 }
 
 bool NuPlayer::Decoder::supportsSeamlessFormatChange(const sp<AMessage> &targetFormat) const {
-    if (mFormat == NULL) {
+    if (mOutputFormat == NULL) {
         return false;
     }
 
@@ -190,7 +498,7 @@
     }
 
     AString oldMime, newMime;
-    if (!mFormat->findString("mime", &oldMime)
+    if (!mOutputFormat->findString("mime", &oldMime)
             || !targetFormat->findString("mime", &newMime)
             || !(oldMime == newMime)) {
         return false;
@@ -201,7 +509,10 @@
     if (audio) {
         seamless = supportsSeamlessAudioFormatChange(targetFormat);
     } else {
-        seamless = mCodec != NULL && mCodec->isConfiguredForAdaptivePlayback();
+        int32_t isAdaptive;
+        seamless = (mCodec != NULL &&
+                mInputFormat->findInt32("adaptive-playback", &isAdaptive) &&
+                isAdaptive);
     }
 
     ALOGV("%s seamless support for %s", seamless ? "yes" : "no", oldMime.c_str());
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 78ea74a..94243fc 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -25,12 +25,14 @@
 namespace android {
 
 struct ABuffer;
+struct MediaCodec;
 
 struct NuPlayer::Decoder : public AHandler {
     Decoder(const sp<AMessage> &notify,
             const sp<NativeWindowWrapper> &nativeWindow = NULL);
 
     void configure(const sp<AMessage> &format);
+    void init();
 
     void signalFlush();
     void signalResume();
@@ -38,7 +40,18 @@
 
     bool supportsSeamlessFormatChange(const sp<AMessage> &to) const;
 
+    enum {
+        kWhatFillThisBuffer      = 'flTB',
+        kWhatDrainThisBuffer     = 'drTB',
+        kWhatOutputFormatChanged = 'fmtC',
+        kWhatFlushCompleted      = 'flsC',
+        kWhatShutdownCompleted   = 'shDC',
+        kWhatEOS                 = 'eos ',
+        kWhatError               = 'err ',
+    };
+
 protected:
+
     virtual ~Decoder();
 
     virtual void onMessageReceived(const sp<AMessage> &msg);
@@ -46,21 +59,40 @@
 private:
     enum {
         kWhatCodecNotify        = 'cdcN',
+        kWhatConfigure          = 'conf',
+        kWhatInputBufferFilled  = 'inpF',
+        kWhatRenderBuffer       = 'rndr',
+        kWhatFlush              = 'flus',
+        kWhatShutdown           = 'shuD',
     };
 
     sp<AMessage> mNotify;
     sp<NativeWindowWrapper> mNativeWindow;
 
-    sp<AMessage> mFormat;
-    sp<ACodec> mCodec;
+    sp<AMessage> mInputFormat;
+    sp<AMessage> mOutputFormat;
+    sp<MediaCodec> mCodec;
     sp<ALooper> mCodecLooper;
+    sp<ALooper> mDecoderLooper;
 
-    Vector<sp<ABuffer> > mCSD;
-    size_t mCSDIndex;
+    Vector<sp<ABuffer> > mInputBuffers;
+    Vector<sp<ABuffer> > mOutputBuffers;
 
-    sp<AMessage> makeFormat(const sp<MetaData> &meta);
+    void handleError(int32_t err);
+    bool handleAnInputBuffer();
+    bool handleAnOutputBuffer();
 
-    void onFillThisBuffer(const sp<AMessage> &msg);
+    void requestCodecNotification();
+    bool isStaleReply(const sp<AMessage> &msg);
+
+    void onConfigure(const sp<AMessage> &format);
+    void onFlush();
+    void onInputBufferFilled(const sp<AMessage> &msg);
+    void onRenderBuffer(const sp<AMessage> &msg);
+    void onShutdown();
+
+    int32_t mBufferGeneration;
+    AString mComponentName;
 
     bool supportsSeamlessAudioFormatChange(const sp<AMessage> &targetFormat) const;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index b9651a1..e4850f0 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -71,7 +71,9 @@
 }
 
 status_t NuPlayerDriver::setDataSource(
-        const char *url, const KeyedVector<String8, String8> *headers) {
+        const sp<IMediaHTTPService> &httpService,
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
     Mutex::Autolock autoLock(mLock);
 
     if (mState != STATE_IDLE) {
@@ -80,7 +82,7 @@
 
     mState = STATE_SET_DATASOURCE_PENDING;
 
-    mPlayer->setDataSourceAsync(url, headers);
+    mPlayer->setDataSourceAsync(httpService, url, headers);
 
     while (mState == STATE_SET_DATASOURCE_PENDING) {
         mCondition.wait(mLock);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index 99f72a6..0148fb1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -31,7 +31,9 @@
     virtual status_t setUID(uid_t uid);
 
     virtual status_t setDataSource(
-            const char *url, const KeyedVector<String8, String8> *headers);
+            const sp<IMediaHTTPService> &httpService,
+            const char *url,
+            const KeyedVector<String8, String8> *headers);
 
     virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index bf5271e..a070c1a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -20,8 +20,6 @@
 
 #include "NuPlayerRenderer.h"
 
-#include "SoftwareRenderer.h"
-
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -36,7 +34,6 @@
         const sp<AMessage> &notify,
         uint32_t flags)
     : mAudioSink(sink),
-      mSoftRenderer(NULL),
       mNotify(notify),
       mFlags(flags),
       mNumFramesWritten(0),
@@ -60,12 +57,6 @@
 }
 
 NuPlayer::Renderer::~Renderer() {
-    delete mSoftRenderer;
-}
-
-void NuPlayer::Renderer::setSoftRenderer(SoftwareRenderer *softRenderer) {
-    delete mSoftRenderer;
-    mSoftRenderer = softRenderer;
 }
 
 void NuPlayer::Renderer::queueBuffer(
@@ -425,9 +416,6 @@
         ALOGV("rendering video at media time %.2f secs",
                 (mFlags & FLAG_REAL_TIME ? realTimeUs :
                 (realTimeUs + mAnchorTimeMediaUs - mAnchorTimeRealUs)) / 1E6);
-        if (mSoftRenderer != NULL) {
-            mSoftRenderer->render(entry->mBuffer->data(), entry->mBuffer->size(), NULL);
-        }
     }
 
     entry->mNotifyConsumed->setInt32("render", !tooLate);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 9124e03..94a05ea 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -23,7 +23,6 @@
 namespace android {
 
 struct ABuffer;
-class SoftwareRenderer;
 
 struct NuPlayer::Renderer : public AHandler {
     enum Flags {
@@ -57,8 +56,6 @@
         kWhatMediaRenderingStart = 'mdrd',
     };
 
-    void setSoftRenderer(SoftwareRenderer *softRenderer);
-
 protected:
     virtual ~Renderer();
 
@@ -86,7 +83,6 @@
     static const int64_t kMinPositionUpdateDelayUs;
 
     sp<MediaPlayerBase::AudioSink> mAudioSink;
-    SoftwareRenderer *mSoftRenderer;
     sp<AMessage> mNotify;
     uint32_t mFlags;
     List<QueueEntry> mAudioQueue;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 18cf6d1..94800ba 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -24,6 +24,7 @@
 #include "MyHandler.h"
 #include "SDPLoader.h"
 
+#include <media/IMediaHTTPService.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 
@@ -33,12 +34,14 @@
 
 NuPlayer::RTSPSource::RTSPSource(
         const sp<AMessage> &notify,
+        const sp<IMediaHTTPService> &httpService,
         const char *url,
         const KeyedVector<String8, String8> *headers,
         bool uidValid,
         uid_t uid,
         bool isSDP)
     : Source(notify),
+      mHTTPService(httpService),
       mURL(url),
       mUIDValid(uidValid),
       mUID(uid),
@@ -92,7 +95,7 @@
     if (mIsSDP) {
         mSDPLoader = new SDPLoader(notify,
                 (mFlags & kFlagIncognito) ? SDPLoader::kFlagIncognito : 0,
-                mUIDValid, mUID);
+                mHTTPService);
 
         mSDPLoader->load(
                 mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index 8cf34a0..3718bf9 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -34,6 +34,7 @@
 struct NuPlayer::RTSPSource : public NuPlayer::Source {
     RTSPSource(
             const sp<AMessage> &notify,
+            const sp<IMediaHTTPService> &httpService,
             const char *url,
             const KeyedVector<String8, String8> *headers,
             bool uidValid = false,
@@ -88,6 +89,7 @@
         bool mNPTMappingValid;
     };
 
+    sp<IMediaHTTPService> mHTTPService;
     AString mURL;
     KeyedVector<String8, String8> mExtraHeaders;
     bool mUIDValid;
diff --git a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
deleted file mode 100644
index 2aae4dd..0000000
--- a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "MP4Source.h"
-
-#include "FragmentedMP4Parser.h"
-#include "../NuPlayerStreamListener.h"
-
-#include <media/IStreamSource.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-
-namespace android {
-
-struct StreamSource : public FragmentedMP4Parser::Source {
-    StreamSource(const sp<IStreamSource> &source)
-        : mListener(new NuPlayer::NuPlayerStreamListener(source, 0)),
-          mPosition(0) {
-        mListener->start();
-    }
-
-    virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
-        if (offset < mPosition) {
-            return -EPIPE;
-        }
-
-        while (offset > mPosition) {
-            char buffer[1024];
-            off64_t skipBytes = offset - mPosition;
-            if (skipBytes > sizeof(buffer)) {
-                skipBytes = sizeof(buffer);
-            }
-
-            sp<AMessage> extra;
-            ssize_t n;
-            for (;;) {
-                n = mListener->read(buffer, skipBytes, &extra);
-
-                if (n == -EWOULDBLOCK) {
-                    usleep(10000);
-                    continue;
-                }
-
-                break;
-            }
-
-            ALOGV("skipped %ld bytes at offset %lld", n, mPosition);
-
-            if (n < 0) {
-                return n;
-            }
-
-            mPosition += n;
-        }
-
-        sp<AMessage> extra;
-        size_t total = 0;
-        while (total < size) {
-            ssize_t n = mListener->read(
-                    (uint8_t *)data + total, size - total, &extra);
-
-            if (n == -EWOULDBLOCK) {
-                usleep(10000);
-                continue;
-            } else if (n == 0) {
-                break;
-            } else if (n < 0) {
-                mPosition += total;
-                return n;
-            }
-
-            total += n;
-        }
-
-        ALOGV("read %ld bytes at offset %lld", total, mPosition);
-
-        mPosition += total;
-
-        return total;
-    }
-
-    bool isSeekable() {
-        return false;
-    }
-
-private:
-    sp<NuPlayer::NuPlayerStreamListener> mListener;
-    off64_t mPosition;
-
-    DISALLOW_EVIL_CONSTRUCTORS(StreamSource);
-};
-
-MP4Source::MP4Source(
-        const sp<AMessage> &notify, const sp<IStreamSource> &source)
-    : Source(notify),
-      mSource(source),
-      mLooper(new ALooper),
-      mParser(new FragmentedMP4Parser),
-      mEOS(false) {
-    mLooper->registerHandler(mParser);
-}
-
-MP4Source::~MP4Source() {
-}
-
-void MP4Source::prepareAsync() {
-    notifyVideoSizeChanged(0, 0);
-    notifyFlagsChanged(0);
-    notifyPrepared();
-}
-
-void MP4Source::start() {
-    mLooper->start(false /* runOnCallingThread */);
-    mParser->start(new StreamSource(mSource));
-}
-
-status_t MP4Source::feedMoreTSData() {
-    return mEOS ? ERROR_END_OF_STREAM : (status_t)OK;
-}
-
-sp<AMessage> MP4Source::getFormat(bool audio) {
-    return mParser->getFormat(audio);
-}
-
-status_t MP4Source::dequeueAccessUnit(
-        bool audio, sp<ABuffer> *accessUnit) {
-    return mParser->dequeueAccessUnit(audio, accessUnit);
-}
-
-}  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h
deleted file mode 100644
index a6ef622..0000000
--- a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MP4_SOURCE_H
-#define MP4_SOURCE_H
-
-#include "NuPlayerSource.h"
-
-namespace android {
-
-struct FragmentedMP4Parser;
-
-struct MP4Source : public NuPlayer::Source {
-    MP4Source(const sp<AMessage> &notify, const sp<IStreamSource> &source);
-
-    virtual void prepareAsync();
-    virtual void start();
-
-    virtual status_t feedMoreTSData();
-
-    virtual sp<AMessage> getFormat(bool audio);
-
-    virtual status_t dequeueAccessUnit(
-            bool audio, sp<ABuffer> *accessUnit);
-
-protected:
-    virtual ~MP4Source();
-
-private:
-    sp<IStreamSource> mSource;
-    sp<ALooper> mLooper;
-    sp<FragmentedMP4Parser> mParser;
-    bool mEOS;
-
-    DISALLOW_EVIL_CONSTRUCTORS(MP4Source);
-};
-
-}  // namespace android
-
-#endif // MP4_SOURCE_H
diff --git a/media/libnbaio/Android.mk b/media/libnbaio/Android.mk
index 69c75b8..9707c4a 100644
--- a/media/libnbaio/Android.mk
+++ b/media/libnbaio/Android.mk
@@ -31,9 +31,8 @@
     libcommon_time_client \
     libcutils \
     libutils \
-    liblog \
-    libmedia
-# This dependency on libmedia is for SingleStateQueueInstantiations.
-# Consider a separate a library for SingleStateQueueInstantiations.
+    liblog
+
+LOCAL_STATIC_LIBRARIES += libinstantssq
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libnbaio/AudioBufferProviderSource.cpp b/media/libnbaio/AudioBufferProviderSource.cpp
index 74a6fdb..551f516 100644
--- a/media/libnbaio/AudioBufferProviderSource.cpp
+++ b/media/libnbaio/AudioBufferProviderSource.cpp
@@ -24,11 +24,11 @@
 namespace android {
 
 AudioBufferProviderSource::AudioBufferProviderSource(AudioBufferProvider *provider,
-                                                     NBAIO_Format format) :
+                                                     const NBAIO_Format& format) :
     NBAIO_Source(format), mProvider(provider), mConsumed(0)
 {
     ALOG_ASSERT(provider != NULL);
-    ALOG_ASSERT(format != Format_Invalid);
+    ALOG_ASSERT(Format_isValid(format));
 }
 
 AudioBufferProviderSource::~AudioBufferProviderSource()
@@ -68,7 +68,7 @@
     }
     // count could be zero, either because count was zero on entry or
     // available is zero, but both are unlikely so don't check for that
-    memcpy(buffer, (char *) mBuffer.raw + (mConsumed << mBitShift), count << mBitShift);
+    memcpy(buffer, (char *) mBuffer.raw + (mConsumed * mFrameSize), count * mFrameSize);
     if (CC_UNLIKELY((mConsumed += count) >= mBuffer.frameCount)) {
         mProvider->releaseBuffer(&mBuffer);
         mBuffer.raw = NULL;
@@ -120,7 +120,7 @@
             count = available;
         }
         if (CC_LIKELY(count > 0)) {
-            char* readTgt = (char *) mBuffer.raw + (mConsumed << mBitShift);
+            char* readTgt = (char *) mBuffer.raw + (mConsumed * mFrameSize);
             ssize_t ret = via(user, readTgt, count, readPTS);
             if (CC_UNLIKELY(ret <= 0)) {
                 if (CC_LIKELY(accumulator > 0)) {
diff --git a/media/libnbaio/AudioStreamInSource.cpp b/media/libnbaio/AudioStreamInSource.cpp
index 05273f6..80bf61a 100644
--- a/media/libnbaio/AudioStreamInSource.cpp
+++ b/media/libnbaio/AudioStreamInSource.cpp
@@ -40,16 +40,14 @@
 ssize_t AudioStreamInSource::negotiate(const NBAIO_Format offers[], size_t numOffers,
                                       NBAIO_Format counterOffers[], size_t& numCounterOffers)
 {
-    if (mFormat == Format_Invalid) {
+    if (!Format_isValid(mFormat)) {
         mStreamBufferSizeBytes = mStream->common.get_buffer_size(&mStream->common);
         audio_format_t streamFormat = mStream->common.get_format(&mStream->common);
-        if (streamFormat == AUDIO_FORMAT_PCM_16_BIT) {
-            uint32_t sampleRate = mStream->common.get_sample_rate(&mStream->common);
-            audio_channel_mask_t channelMask =
-                    (audio_channel_mask_t) mStream->common.get_channels(&mStream->common);
-            mFormat = Format_from_SR_C(sampleRate, popcount(channelMask));
-            mBitShift = Format_frameBitShift(mFormat);
-        }
+        uint32_t sampleRate = mStream->common.get_sample_rate(&mStream->common);
+        audio_channel_mask_t channelMask =
+                (audio_channel_mask_t) mStream->common.get_channels(&mStream->common);
+        mFormat = Format_from_SR_C(sampleRate, popcount(channelMask), streamFormat);
+        mFrameSize = Format_frameSize(mFormat);
     }
     return NBAIO_Source::negotiate(offers, numOffers, counterOffers, numCounterOffers);
 }
@@ -67,12 +65,12 @@
 
 ssize_t AudioStreamInSource::read(void *buffer, size_t count)
 {
-    if (CC_UNLIKELY(mFormat == Format_Invalid)) {
+    if (CC_UNLIKELY(!Format_isValid(mFormat))) {
         return NEGOTIATE;
     }
-    ssize_t bytesRead = mStream->read(mStream, buffer, count << mBitShift);
+    ssize_t bytesRead = mStream->read(mStream, buffer, count * mFrameSize);
     if (bytesRead > 0) {
-        size_t framesRead = bytesRead >> mBitShift;
+        size_t framesRead = bytesRead / mFrameSize;
         mFramesRead += framesRead;
         return framesRead;
     } else {
diff --git a/media/libnbaio/AudioStreamOutSink.cpp b/media/libnbaio/AudioStreamOutSink.cpp
index e4341d7..c28d34d 100644
--- a/media/libnbaio/AudioStreamOutSink.cpp
+++ b/media/libnbaio/AudioStreamOutSink.cpp
@@ -37,16 +37,14 @@
 ssize_t AudioStreamOutSink::negotiate(const NBAIO_Format offers[], size_t numOffers,
                                       NBAIO_Format counterOffers[], size_t& numCounterOffers)
 {
-    if (mFormat == Format_Invalid) {
+    if (!Format_isValid(mFormat)) {
         mStreamBufferSizeBytes = mStream->common.get_buffer_size(&mStream->common);
         audio_format_t streamFormat = mStream->common.get_format(&mStream->common);
-        if (streamFormat == AUDIO_FORMAT_PCM_16_BIT) {
-            uint32_t sampleRate = mStream->common.get_sample_rate(&mStream->common);
-            audio_channel_mask_t channelMask =
-                    (audio_channel_mask_t) mStream->common.get_channels(&mStream->common);
-            mFormat = Format_from_SR_C(sampleRate, popcount(channelMask));
-            mBitShift = Format_frameBitShift(mFormat);
-        }
+        uint32_t sampleRate = mStream->common.get_sample_rate(&mStream->common);
+        audio_channel_mask_t channelMask =
+                (audio_channel_mask_t) mStream->common.get_channels(&mStream->common);
+        mFormat = Format_from_SR_C(sampleRate, popcount(channelMask), streamFormat);
+        mFrameSize = Format_frameSize(mFormat);
     }
     return NBAIO_Sink::negotiate(offers, numOffers, counterOffers, numCounterOffers);
 }
@@ -56,10 +54,10 @@
     if (!mNegotiated) {
         return NEGOTIATE;
     }
-    ALOG_ASSERT(mFormat != Format_Invalid);
-    ssize_t ret = mStream->write(mStream, buffer, count << mBitShift);
+    ALOG_ASSERT(Format_isValid(mFormat));
+    ssize_t ret = mStream->write(mStream, buffer, count * mFrameSize);
     if (ret > 0) {
-        ret >>= mBitShift;
+        ret /= mFrameSize;
         mFramesWritten += ret;
     } else {
         // FIXME verify HAL implementations are returning the correct error codes e.g. WOULD_BLOCK
diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp
index 3c61b60..4adf018 100644
--- a/media/libnbaio/MonoPipe.cpp
+++ b/media/libnbaio/MonoPipe.cpp
@@ -30,7 +30,24 @@
 
 namespace android {
 
-MonoPipe::MonoPipe(size_t reqFrames, NBAIO_Format format, bool writeCanBlock) :
+static uint64_t cacheN; // output of CCHelper::getLocalFreq()
+static bool cacheValid; // whether cacheN is valid
+static pthread_once_t cacheOnceControl = PTHREAD_ONCE_INIT;
+
+static void cacheOnceInit()
+{
+    CCHelper tmpHelper;
+    status_t res;
+    if (OK != (res = tmpHelper.getLocalFreq(&cacheN))) {
+        ALOGE("Failed to fetch local time frequency when constructing a"
+              " MonoPipe (res = %d).  getNextWriteTimestamp calls will be"
+              " non-functional", res);
+        return;
+    }
+    cacheValid = true;
+}
+
+MonoPipe::MonoPipe(size_t reqFrames, const NBAIO_Format& format, bool writeCanBlock) :
         NBAIO_Sink(format),
         mUpdateSeq(0),
         mReqFrames(reqFrames),
@@ -47,8 +64,6 @@
         mTimestampMutator(&mTimestampShared),
         mTimestampObserver(&mTimestampShared)
 {
-    CCHelper tmpHelper;
-    status_t res;
     uint64_t N, D;
 
     mNextRdPTS = AudioBufferProvider::kInvalidPTS;
@@ -59,12 +74,13 @@
     mSamplesToLocalTime.a_to_b_denom = 0;
 
     D = Format_sampleRate(format);
-    if (OK != (res = tmpHelper.getLocalFreq(&N))) {
-        ALOGE("Failed to fetch local time frequency when constructing a"
-              " MonoPipe (res = %d).  getNextWriteTimestamp calls will be"
-              " non-functional", res);
+
+    (void) pthread_once(&cacheOnceControl, cacheOnceInit);
+    if (!cacheValid) {
+        // log has already been done
         return;
     }
+    N = cacheN;
 
     LinearTransform::reduce(&N, &D);
     static const uint64_t kSignedHiBitsMask   = ~(0x7FFFFFFFull);
@@ -115,11 +131,11 @@
             part1 = written;
         }
         if (CC_LIKELY(part1 > 0)) {
-            memcpy((char *) mBuffer + (rear << mBitShift), buffer, part1 << mBitShift);
+            memcpy((char *) mBuffer + (rear * mFrameSize), buffer, part1 * mFrameSize);
             if (CC_UNLIKELY(rear + part1 == mMaxFrames)) {
                 size_t part2 = written - part1;
                 if (CC_LIKELY(part2 > 0)) {
-                    memcpy(mBuffer, (char *) buffer + (part1 << mBitShift), part2 << mBitShift);
+                    memcpy(mBuffer, (char *) buffer + (part1 * mFrameSize), part2 * mFrameSize);
                 }
             }
             android_atomic_release_store(written + mRear, &mRear);
@@ -129,7 +145,7 @@
             break;
         }
         count -= written;
-        buffer = (char *) buffer + (written << mBitShift);
+        buffer = (char *) buffer + (written * mFrameSize);
         // Simulate blocking I/O by sleeping at different rates, depending on a throttle.
         // The throttle tries to keep the mean pipe depth near the setpoint, with a slight jitter.
         uint32_t ns;
diff --git a/media/libnbaio/MonoPipeReader.cpp b/media/libnbaio/MonoPipeReader.cpp
index 851341a..de82229 100644
--- a/media/libnbaio/MonoPipeReader.cpp
+++ b/media/libnbaio/MonoPipeReader.cpp
@@ -73,11 +73,11 @@
         part1 = red;
     }
     if (CC_LIKELY(part1 > 0)) {
-        memcpy(buffer, (char *) mPipe->mBuffer + (front << mBitShift), part1 << mBitShift);
+        memcpy(buffer, (char *) mPipe->mBuffer + (front * mFrameSize), part1 * mFrameSize);
         if (CC_UNLIKELY(front + part1 == mPipe->mMaxFrames)) {
             size_t part2 = red - part1;
             if (CC_LIKELY(part2 > 0)) {
-                memcpy((char *) buffer + (part1 << mBitShift), mPipe->mBuffer, part2 << mBitShift);
+                memcpy((char *) buffer + (part1 * mFrameSize), mPipe->mBuffer, part2 * mFrameSize);
             }
         }
         mPipe->updateFrontAndNRPTS(red + mPipe->mFront, nextReadPTS);
diff --git a/media/libnbaio/NBAIO.cpp b/media/libnbaio/NBAIO.cpp
index e0d2c21..ff3284c 100644
--- a/media/libnbaio/NBAIO.cpp
+++ b/media/libnbaio/NBAIO.cpp
@@ -22,119 +22,42 @@
 
 namespace android {
 
-size_t Format_frameSize(NBAIO_Format format)
+size_t Format_frameSize(const NBAIO_Format& format)
 {
-    return Format_channelCount(format) * sizeof(short);
+    return format.mFrameSize;
 }
 
-size_t Format_frameBitShift(NBAIO_Format format)
+const NBAIO_Format Format_Invalid = { 0, 0, AUDIO_FORMAT_INVALID, 0 };
+
+unsigned Format_sampleRate(const NBAIO_Format& format)
 {
-    // sizeof(short) == 2, so frame size == 1 << channels
-    return Format_channelCount(format);
+    if (!Format_isValid(format)) {
+        return 0;
+    }
+    return format.mSampleRate;
 }
 
-enum {
-    Format_SR_8000,
-    Format_SR_11025,
-    Format_SR_16000,
-    Format_SR_22050,
-    Format_SR_24000,
-    Format_SR_32000,
-    Format_SR_44100,
-    Format_SR_48000,
-    Format_SR_Mask = 7
-};
-
-enum {
-    Format_C_1 = 0x08,
-    Format_C_2 = 0x10,
-    Format_C_Mask = 0x18
-};
-
-unsigned Format_sampleRate(NBAIO_Format format)
+unsigned Format_channelCount(const NBAIO_Format& format)
 {
-    if (format == Format_Invalid) {
+    if (!Format_isValid(format)) {
         return 0;
     }
-    switch (format & Format_SR_Mask) {
-    case Format_SR_8000:
-        return 8000;
-    case Format_SR_11025:
-        return 11025;
-    case Format_SR_16000:
-        return 16000;
-    case Format_SR_22050:
-        return 22050;
-    case Format_SR_24000:
-        return 24000;
-    case Format_SR_32000:
-        return 32000;
-    case Format_SR_44100:
-        return 44100;
-    case Format_SR_48000:
-        return 48000;
-    default:
-        return 0;
-    }
+    return format.mChannelCount;
 }
 
-unsigned Format_channelCount(NBAIO_Format format)
+NBAIO_Format Format_from_SR_C(unsigned sampleRate, unsigned channelCount,
+        audio_format_t format)
 {
-    if (format == Format_Invalid) {
-        return 0;
-    }
-    switch (format & Format_C_Mask) {
-    case Format_C_1:
-        return 1;
-    case Format_C_2:
-        return 2;
-    default:
-        return 0;
-    }
-}
-
-NBAIO_Format Format_from_SR_C(unsigned sampleRate, unsigned channelCount)
-{
-    NBAIO_Format format;
-    switch (sampleRate) {
-    case 8000:
-        format = Format_SR_8000;
-        break;
-    case 11025:
-        format = Format_SR_11025;
-        break;
-    case 16000:
-        format = Format_SR_16000;
-        break;
-    case 22050:
-        format = Format_SR_22050;
-        break;
-    case 24000:
-        format = Format_SR_24000;
-        break;
-    case 32000:
-        format = Format_SR_32000;
-        break;
-    case 44100:
-        format = Format_SR_44100;
-        break;
-    case 48000:
-        format = Format_SR_48000;
-        break;
-    default:
+    if (sampleRate == 0 || channelCount == 0 || !audio_is_valid_format(format)) {
         return Format_Invalid;
     }
-    switch (channelCount) {
-    case 1:
-        format |= Format_C_1;
-        break;
-    case 2:
-        format |= Format_C_2;
-        break;
-    default:
-        return Format_Invalid;
-    }
-    return format;
+    NBAIO_Format ret;
+    ret.mSampleRate = sampleRate;
+    ret.mChannelCount = channelCount;
+    ret.mFormat = format;
+    ret.mFrameSize = audio_is_linear_pcm(format) ?
+            channelCount * audio_bytes_per_sample(format) : sizeof(uint8_t);
+    return ret;
 }
 
 // This is a default implementation; it is expected that subclasses will optimize this.
@@ -216,9 +139,9 @@
 {
     ALOGV("negotiate offers=%p numOffers=%u countersOffers=%p numCounterOffers=%u",
             offers, numOffers, counterOffers, numCounterOffers);
-    if (mFormat != Format_Invalid) {
+    if (Format_isValid(mFormat)) {
         for (size_t i = 0; i < numOffers; ++i) {
-            if (offers[i] == mFormat) {
+            if (Format_isEqual(offers[i], mFormat)) {
                 mNegotiated = true;
                 return i;
             }
@@ -233,4 +156,17 @@
     return (ssize_t) NEGOTIATE;
 }
 
+bool Format_isValid(const NBAIO_Format& format)
+{
+    return format.mSampleRate != 0 && format.mChannelCount != 0 &&
+            format.mFormat != AUDIO_FORMAT_INVALID && format.mFrameSize != 0;
+}
+
+bool Format_isEqual(const NBAIO_Format& format1, const NBAIO_Format& format2)
+{
+    return format1.mSampleRate == format2.mSampleRate &&
+            format1.mChannelCount == format2.mChannelCount && format1.mFormat == format2.mFormat &&
+            format1.mFrameSize == format2.mFrameSize;
+}
+
 }   // namespace android
diff --git a/media/libnbaio/NBLog.cpp b/media/libnbaio/NBLog.cpp
index 8dfb4f0..4d9a1fa 100644
--- a/media/libnbaio/NBLog.cpp
+++ b/media/libnbaio/NBLog.cpp
@@ -341,8 +341,8 @@
     mFd = fd;
     mIndent = indent;
     String8 timestamp, body;
-    if (i > 0) {
-        lost += i;
+    lost += i;
+    if (lost > 0) {
         body.appendFormat("warning: lost %zu bytes worth of events", lost);
         // TODO timestamp empty here, only other choice to wait for the first timestamp event in the
         //      log to push it out.  Consider keeping the timestamp/body between calls to readAt().
@@ -447,7 +447,7 @@
 
 bool NBLog::Reader::isIMemory(const sp<IMemory>& iMemory) const
 {
-    return iMemory.get() == mIMemory.get();
+    return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
 }
 
 }   // namespace android
diff --git a/media/libnbaio/Pipe.cpp b/media/libnbaio/Pipe.cpp
index 1c21f9c..28a034c 100644
--- a/media/libnbaio/Pipe.cpp
+++ b/media/libnbaio/Pipe.cpp
@@ -25,7 +25,7 @@
 
 namespace android {
 
-Pipe::Pipe(size_t maxFrames, NBAIO_Format format) :
+Pipe::Pipe(size_t maxFrames, const NBAIO_Format& format) :
         NBAIO_Sink(format),
         mMaxFrames(roundup(maxFrames)),
         mBuffer(malloc(mMaxFrames * Format_frameSize(format))),
@@ -52,13 +52,13 @@
     if (CC_LIKELY(written > count)) {
         written = count;
     }
-    memcpy((char *) mBuffer + (rear << mBitShift), buffer, written << mBitShift);
+    memcpy((char *) mBuffer + (rear * mFrameSize), buffer, written * mFrameSize);
     if (CC_UNLIKELY(rear + written == mMaxFrames)) {
         if (CC_UNLIKELY((count -= written) > rear)) {
             count = rear;
         }
         if (CC_LIKELY(count > 0)) {
-            memcpy(mBuffer, (char *) buffer + (written << mBitShift), count << mBitShift);
+            memcpy(mBuffer, (char *) buffer + (written * mFrameSize), count * mFrameSize);
             written += count;
         }
     }
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index d786b84..c8e4953 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -59,7 +59,7 @@
     return avail;
 }
 
-ssize_t PipeReader::read(void *buffer, size_t count, int64_t readPTS)
+ssize_t PipeReader::read(void *buffer, size_t count, int64_t readPTS __unused)
 {
     ssize_t avail = availableToRead();
     if (CC_UNLIKELY(avail <= 0)) {
@@ -76,14 +76,14 @@
         red = count;
     }
     // In particular, an overrun during the memcpy will result in reading corrupt data
-    memcpy(buffer, (char *) mPipe.mBuffer + (front << mBitShift), red << mBitShift);
+    memcpy(buffer, (char *) mPipe.mBuffer + (front * mFrameSize), red * mFrameSize);
     // We could re-read the rear pointer here to detect the corruption, but why bother?
     if (CC_UNLIKELY(front + red == mPipe.mMaxFrames)) {
         if (CC_UNLIKELY((count -= red) > front)) {
             count = front;
         }
         if (CC_LIKELY(count > 0)) {
-            memcpy((char *) buffer + (red << mBitShift), mPipe.mBuffer, count << mBitShift);
+            memcpy((char *) buffer + (red * mFrameSize), mPipe.mBuffer, count * mFrameSize);
             red += count;
         }
     }
diff --git a/media/libnbaio/SourceAudioBufferProvider.cpp b/media/libnbaio/SourceAudioBufferProvider.cpp
index 062fa0f..e21ef48 100644
--- a/media/libnbaio/SourceAudioBufferProvider.cpp
+++ b/media/libnbaio/SourceAudioBufferProvider.cpp
@@ -24,7 +24,7 @@
 
 SourceAudioBufferProvider::SourceAudioBufferProvider(const sp<NBAIO_Source>& source) :
     mSource(source),
-    // mFrameBitShiftFormat below
+    // mFrameSize below
     mAllocated(NULL), mSize(0), mOffset(0), mRemaining(0), mGetCount(0), mFramesReleased(0)
 {
     ALOG_ASSERT(source != 0);
@@ -37,7 +37,7 @@
     numCounterOffers = 0;
     index = source->negotiate(counterOffers, 1, NULL, numCounterOffers);
     ALOG_ASSERT(index == 0);
-    mFrameBitShift = Format_frameBitShift(source->format());
+    mFrameSize = Format_frameSize(source->format());
 }
 
 SourceAudioBufferProvider::~SourceAudioBufferProvider()
@@ -54,14 +54,14 @@
         if (mRemaining < buffer->frameCount) {
             buffer->frameCount = mRemaining;
         }
-        buffer->raw = (char *) mAllocated + (mOffset << mFrameBitShift);
+        buffer->raw = (char *) mAllocated + (mOffset * mFrameSize);
         mGetCount = buffer->frameCount;
         return OK;
     }
     // do we need to reallocate?
     if (buffer->frameCount > mSize) {
         free(mAllocated);
-        mAllocated = malloc(buffer->frameCount << mFrameBitShift);
+        mAllocated = malloc(buffer->frameCount * mFrameSize);
         mSize = buffer->frameCount;
     }
     // read from source
@@ -84,7 +84,7 @@
 void SourceAudioBufferProvider::releaseBuffer(Buffer *buffer)
 {
     ALOG_ASSERT((buffer != NULL) &&
-            (buffer->raw == (char *) mAllocated + (mOffset << mFrameBitShift)) &&
+            (buffer->raw == (char *) mAllocated + (mOffset * mFrameSize)) &&
             (buffer->frameCount <= mGetCount) &&
             (mGetCount <= mRemaining) &&
             (mOffset + mRemaining <= mSize));
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 08a3c7f..5bca317 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -38,6 +38,7 @@
 #include <media/hardware/HardwareAPI.h>
 
 #include <OMX_AudioExt.h>
+#include <OMX_VideoExt.h>
 #include <OMX_Component.h>
 #include <OMX_IndexExt.h>
 
@@ -367,7 +368,7 @@
       mIsEncoder(false),
       mUseMetadataOnEncoderOutput(false),
       mShutdownInProgress(false),
-      mIsConfiguredForAdaptivePlayback(false),
+      mExplicitShutdown(false),
       mEncoderDelay(0),
       mEncoderPadding(0),
       mChannelMaskPresent(false),
@@ -376,7 +377,10 @@
       mStoreMetaDataInOutputBuffers(false),
       mMetaDataBuffersToSubmit(0),
       mRepeatFrameDelayUs(-1ll),
-      mMaxPtsGapUs(-1l) {
+      mMaxPtsGapUs(-1ll),
+      mTimePerCaptureUs(-1ll),
+      mTimePerFrameUs(-1ll),
+      mCreateInputBuffersSuspended(false) {
     mUninitializedState = new UninitializedState(this);
     mLoadedState = new LoadedState(this);
     mLoadedToIdleState = new LoadedToIdleState(this);
@@ -642,18 +646,34 @@
         return err;
     }
 
-    // XXX: Is this the right logic to use?  It's not clear to me what the OMX
-    // buffer counts refer to - how do they account for the renderer holding on
-    // to buffers?
-    if (def.nBufferCountActual < def.nBufferCountMin + *minUndequeuedBuffers) {
-        OMX_U32 newBufferCount = def.nBufferCountMin + *minUndequeuedBuffers;
+    // FIXME: assume that surface is controlled by app (native window
+    // returns the number for the case when surface is not controlled by app)
+    // FIXME2: This means that minUndeqeueudBufs can be 1 larger than reported
+    // For now, try to allocate 1 more buffer, but don't fail if unsuccessful
+
+    // Use conservative allocation while also trying to reduce starvation
+    //
+    // 1. allocate at least nBufferCountMin + minUndequeuedBuffers - that is the
+    //    minimum needed for the consumer to be able to work
+    // 2. try to allocate two (2) additional buffers to reduce starvation from
+    //    the consumer
+    //    plus an extra buffer to account for incorrect minUndequeuedBufs
+    for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
+        OMX_U32 newBufferCount =
+            def.nBufferCountMin + *minUndequeuedBuffers + extraBuffers;
         def.nBufferCountActual = newBufferCount;
         err = mOMX->setParameter(
                 mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
 
-        if (err != OK) {
-            ALOGE("[%s] setting nBufferCountActual to %lu failed: %d",
-                    mComponentName.c_str(), newBufferCount, err);
+        if (err == OK) {
+            *minUndequeuedBuffers += extraBuffers;
+            break;
+        }
+
+        ALOGW("[%s] setting nBufferCountActual to %lu failed: %d",
+                mComponentName.c_str(), newBufferCount, err);
+        /* exit condition */
+        if (extraBuffers == 0) {
             return err;
         }
     }
@@ -678,6 +698,7 @@
             &bufferCount, &bufferSize, &minUndequeuedBuffers);
     if (err != 0)
         return err;
+    mNumUndequeuedBuffers = minUndequeuedBuffers;
 
     ALOGV("[%s] Allocating %lu buffers from a native window of size %lu on "
          "output port",
@@ -743,6 +764,7 @@
             &bufferCount, &bufferSize, &minUndequeuedBuffers);
     if (err != 0)
         return err;
+    mNumUndequeuedBuffers = minUndequeuedBuffers;
 
     ALOGV("[%s] Allocating %lu meta buffers on output port",
          mComponentName.c_str(), bufferCount);
@@ -963,6 +985,8 @@
             "audio_decoder.aac", "audio_encoder.aac" },
         { MEDIA_MIMETYPE_AUDIO_VORBIS,
             "audio_decoder.vorbis", "audio_encoder.vorbis" },
+        { MEDIA_MIMETYPE_AUDIO_OPUS,
+            "audio_decoder.opus", "audio_encoder.opus" },
         { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
             "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
         { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
@@ -1038,6 +1062,9 @@
         encoder = false;
     }
 
+    sp<AMessage> inputFormat = new AMessage();
+    sp<AMessage> outputFormat = new AMessage();
+
     mIsEncoder = encoder;
 
     status_t err = setComponentRole(encoder /* isEncoder */, mime);
@@ -1120,7 +1147,17 @@
         }
 
         if (!msg->findInt64("max-pts-gap-to-encoder", &mMaxPtsGapUs)) {
-            mMaxPtsGapUs = -1l;
+            mMaxPtsGapUs = -1ll;
+        }
+
+        if (!msg->findInt64("time-lapse", &mTimePerCaptureUs)) {
+            mTimePerCaptureUs = -1ll;
+        }
+
+        if (!msg->findInt32(
+                    "create-input-buffers-suspended",
+                    (int32_t*)&mCreateInputBuffersSuspended)) {
+            mCreateInputBuffersSuspended = false;
         }
     }
 
@@ -1129,7 +1166,9 @@
     int32_t haveNativeWindow = msg->findObject("native-window", &obj) &&
             obj != NULL;
     mStoreMetaDataInOutputBuffers = false;
-    mIsConfiguredForAdaptivePlayback = false;
+    if (video && !encoder) {
+        inputFormat->setInt32("adaptive-playback", false);
+    }
     if (!encoder && video && haveNativeWindow) {
         err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, OMX_TRUE);
         if (err != OK) {
@@ -1174,14 +1213,19 @@
                 ALOGW_IF(err != OK,
                         "[%s] prepareForAdaptivePlayback failed w/ err %d",
                         mComponentName.c_str(), err);
-                mIsConfiguredForAdaptivePlayback = (err == OK);
+
+                if (err == OK) {
+                    inputFormat->setInt32("max-width", maxWidth);
+                    inputFormat->setInt32("max-height", maxHeight);
+                    inputFormat->setInt32("adaptive-playback", true);
+                }
             }
             // allow failure
             err = OK;
         } else {
             ALOGV("[%s] storeMetaDataInBuffers succeeded", mComponentName.c_str());
             mStoreMetaDataInOutputBuffers = true;
-            mIsConfiguredForAdaptivePlayback = true;
+            inputFormat->setInt32("adaptive-playback", true);
         }
 
         int32_t push;
@@ -1321,6 +1365,11 @@
         err = setMinBufferSize(kPortIndexInput, 8192);  // XXX
     }
 
+    CHECK_EQ(getPortFormat(kPortIndexInput, inputFormat), (status_t)OK);
+    CHECK_EQ(getPortFormat(kPortIndexOutput, outputFormat), (status_t)OK);
+    mInputFormat = inputFormat;
+    mOutputFormat = outputFormat;
+
     return err;
 }
 
@@ -1911,6 +1960,7 @@
             return INVALID_OPERATION;
         }
         frameRate = (float)tmp;
+        mTimePerFrameUs = (int64_t) (1000000.0f / frameRate);
     }
 
     video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f);
@@ -2323,12 +2373,81 @@
 
 status_t ACodec::setupVPXEncoderParameters(const sp<AMessage> &msg) {
     int32_t bitrate;
+    int32_t iFrameInterval = 0;
+    size_t tsLayers = 0;
+    OMX_VIDEO_ANDROID_VPXTEMPORALLAYERPATTERNTYPE pattern =
+        OMX_VIDEO_VPXTemporalLayerPatternNone;
+    static const uint32_t kVp8LayerRateAlloction
+        [OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS]
+        [OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS] = {
+        {100, 100, 100},  // 1 layer
+        { 60, 100, 100},  // 2 layers {60%, 40%}
+        { 40,  60, 100},  // 3 layers {40%, 20%, 40%}
+    };
     if (!msg->findInt32("bitrate", &bitrate)) {
         return INVALID_OPERATION;
     }
+    msg->findInt32("i-frame-interval", &iFrameInterval);
 
     OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
 
+    float frameRate;
+    if (!msg->findFloat("frame-rate", &frameRate)) {
+        int32_t tmp;
+        if (!msg->findInt32("frame-rate", &tmp)) {
+            return INVALID_OPERATION;
+        }
+        frameRate = (float)tmp;
+    }
+
+    AString tsSchema;
+    if (msg->findString("ts-schema", &tsSchema)) {
+        if (tsSchema == "webrtc.vp8.1-layer") {
+            pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
+            tsLayers = 1;
+        } else if (tsSchema == "webrtc.vp8.2-layer") {
+            pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
+            tsLayers = 2;
+        } else if (tsSchema == "webrtc.vp8.3-layer") {
+            pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
+            tsLayers = 3;
+        } else {
+            ALOGW("Unsupported ts-schema [%s]", tsSchema.c_str());
+        }
+    }
+
+    OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
+    InitOMXParams(&vp8type);
+    vp8type.nPortIndex = kPortIndexOutput;
+    status_t err = mOMX->getParameter(
+            mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+            &vp8type, sizeof(vp8type));
+
+    if (err == OK) {
+        if (iFrameInterval > 0) {
+            vp8type.nKeyFrameInterval = setPFramesSpacing(iFrameInterval, frameRate);
+        }
+        vp8type.eTemporalPattern = pattern;
+        vp8type.nTemporalLayerCount = tsLayers;
+        if (tsLayers > 0) {
+            for (size_t i = 0; i < OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS; i++) {
+                vp8type.nTemporalLayerBitrateRatio[i] =
+                    kVp8LayerRateAlloction[tsLayers - 1][i];
+            }
+        }
+        if (bitrateMode == OMX_Video_ControlRateConstant) {
+            vp8type.nMinQuantizer = 2;
+            vp8type.nMaxQuantizer = 63;
+        }
+
+        err = mOMX->setParameter(
+                mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+                &vp8type, sizeof(vp8type));
+        if (err != OK) {
+            ALOGW("Extended VP8 parameters set failed: %d", err);
+        }
+    }
+
     return configureBitrate(bitrate, bitrateMode);
 }
 
@@ -2484,19 +2603,7 @@
         return;
     }
 
-    int minUndequeuedBufs = 0;
-    status_t err = mNativeWindow->query(
-            mNativeWindow.get(), NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
-            &minUndequeuedBufs);
-
-    if (err != OK) {
-        ALOGE("[%s] NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS query failed: %s (%d)",
-                mComponentName.c_str(), strerror(-err), -err);
-
-        minUndequeuedBufs = 0;
-    }
-
-    while (countBuffersOwnedByNativeWindow() > (size_t)minUndequeuedBufs
+    while (countBuffersOwnedByNativeWindow() > mNumUndequeuedBuffers
             && dequeueBufferFromNativeWindow() != NULL) {
         // these buffers will be submitted as regular buffers; account for this
         if (mStoreMetaDataInOutputBuffers && mMetaDataBuffersToSubmit > 0) {
@@ -2542,79 +2649,78 @@
     }
 }
 
-void ACodec::sendFormatChange(const sp<AMessage> &reply) {
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", kWhatOutputFormatChanged);
-
+status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
+    // TODO: catch errors an return them instead of using CHECK
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
-    def.nPortIndex = kPortIndexOutput;
+    def.nPortIndex = portIndex;
 
     CHECK_EQ(mOMX->getParameter(
                 mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)),
              (status_t)OK);
 
-    CHECK_EQ((int)def.eDir, (int)OMX_DirOutput);
+    CHECK_EQ((int)def.eDir,
+            (int)(portIndex == kPortIndexOutput ? OMX_DirOutput : OMX_DirInput));
 
     switch (def.eDomain) {
         case OMX_PortDomainVideo:
         {
             OMX_VIDEO_PORTDEFINITIONTYPE *videoDef = &def.format.video;
+            switch ((int)videoDef->eCompressionFormat) {
+                case OMX_VIDEO_CodingUnused:
+                {
+                    CHECK(mIsEncoder ^ (portIndex == kPortIndexOutput));
+                    notify->setString("mime", MEDIA_MIMETYPE_VIDEO_RAW);
 
-            AString mime;
-            if (!mIsEncoder) {
-                notify->setString("mime", MEDIA_MIMETYPE_VIDEO_RAW);
-            } else if (GetMimeTypeForVideoCoding(
+                    notify->setInt32("stride", videoDef->nStride);
+                    notify->setInt32("slice-height", videoDef->nSliceHeight);
+                    notify->setInt32("color-format", videoDef->eColorFormat);
+
+                    OMX_CONFIG_RECTTYPE rect;
+                    InitOMXParams(&rect);
+                    rect.nPortIndex = kPortIndexOutput;
+
+                    if (mOMX->getConfig(
+                                mNode, OMX_IndexConfigCommonOutputCrop,
+                                &rect, sizeof(rect)) != OK) {
+                        rect.nLeft = 0;
+                        rect.nTop = 0;
+                        rect.nWidth = videoDef->nFrameWidth;
+                        rect.nHeight = videoDef->nFrameHeight;
+                    }
+
+                    CHECK_GE(rect.nLeft, 0);
+                    CHECK_GE(rect.nTop, 0);
+                    CHECK_GE(rect.nWidth, 0u);
+                    CHECK_GE(rect.nHeight, 0u);
+                    CHECK_LE(rect.nLeft + rect.nWidth - 1, videoDef->nFrameWidth);
+                    CHECK_LE(rect.nTop + rect.nHeight - 1, videoDef->nFrameHeight);
+
+                    notify->setRect(
+                            "crop",
+                            rect.nLeft,
+                            rect.nTop,
+                            rect.nLeft + rect.nWidth - 1,
+                            rect.nTop + rect.nHeight - 1);
+
+                    break;
+                }
+                default:
+                {
+                    CHECK(mIsEncoder ^ (portIndex == kPortIndexInput));
+                    AString mime;
+                    if (GetMimeTypeForVideoCoding(
                         videoDef->eCompressionFormat, &mime) != OK) {
-                notify->setString("mime", "application/octet-stream");
-            } else {
-                notify->setString("mime", mime.c_str());
+                        notify->setString("mime", "application/octet-stream");
+                    } else {
+                        notify->setString("mime", mime.c_str());
+                    }
+                    break;
+                }
             }
 
             notify->setInt32("width", videoDef->nFrameWidth);
             notify->setInt32("height", videoDef->nFrameHeight);
-
-            if (!mIsEncoder) {
-                notify->setInt32("stride", videoDef->nStride);
-                notify->setInt32("slice-height", videoDef->nSliceHeight);
-                notify->setInt32("color-format", videoDef->eColorFormat);
-
-                OMX_CONFIG_RECTTYPE rect;
-                InitOMXParams(&rect);
-                rect.nPortIndex = kPortIndexOutput;
-
-                if (mOMX->getConfig(
-                            mNode, OMX_IndexConfigCommonOutputCrop,
-                            &rect, sizeof(rect)) != OK) {
-                    rect.nLeft = 0;
-                    rect.nTop = 0;
-                    rect.nWidth = videoDef->nFrameWidth;
-                    rect.nHeight = videoDef->nFrameHeight;
-                }
-
-                CHECK_GE(rect.nLeft, 0);
-                CHECK_GE(rect.nTop, 0);
-                CHECK_GE(rect.nWidth, 0u);
-                CHECK_GE(rect.nHeight, 0u);
-                CHECK_LE(rect.nLeft + rect.nWidth - 1, videoDef->nFrameWidth);
-                CHECK_LE(rect.nTop + rect.nHeight - 1, videoDef->nFrameHeight);
-
-                notify->setRect(
-                        "crop",
-                        rect.nLeft,
-                        rect.nTop,
-                        rect.nLeft + rect.nWidth - 1,
-                        rect.nTop + rect.nHeight - 1);
-
-                if (mNativeWindow != NULL) {
-                    reply->setRect(
-                            "crop",
-                            rect.nLeft,
-                            rect.nTop,
-                            rect.nLeft + rect.nWidth,
-                            rect.nTop + rect.nHeight);
-                }
-            }
             break;
         }
 
@@ -2627,7 +2733,7 @@
                 {
                     OMX_AUDIO_PARAM_PCMMODETYPE params;
                     InitOMXParams(&params);
-                    params.nPortIndex = kPortIndexOutput;
+                    params.nPortIndex = portIndex;
 
                     CHECK_EQ(mOMX->getParameter(
                                 mNode, OMX_IndexParamAudioPcm,
@@ -2647,20 +2753,6 @@
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
                     notify->setInt32("channel-count", params.nChannels);
                     notify->setInt32("sample-rate", params.nSamplingRate);
-                    if (mEncoderDelay + mEncoderPadding) {
-                        size_t frameSize = params.nChannels * sizeof(int16_t);
-                        if (mSkipCutBuffer != NULL) {
-                            size_t prevbufsize = mSkipCutBuffer->size();
-                            if (prevbufsize != 0) {
-                                ALOGW("Replacing SkipCutBuffer holding %d "
-                                      "bytes",
-                                      prevbufsize);
-                            }
-                        }
-                        mSkipCutBuffer = new SkipCutBuffer(
-                                mEncoderDelay * frameSize,
-                                mEncoderPadding * frameSize);
-                    }
 
                     if (mChannelMaskPresent) {
                         notify->setInt32("channel-mask", mChannelMask);
@@ -2672,7 +2764,7 @@
                 {
                     OMX_AUDIO_PARAM_AACPROFILETYPE params;
                     InitOMXParams(&params);
-                    params.nPortIndex = kPortIndexOutput;
+                    params.nPortIndex = portIndex;
 
                     CHECK_EQ(mOMX->getParameter(
                                 mNode, OMX_IndexParamAudioAac,
@@ -2689,7 +2781,7 @@
                 {
                     OMX_AUDIO_PARAM_AMRTYPE params;
                     InitOMXParams(&params);
-                    params.nPortIndex = kPortIndexOutput;
+                    params.nPortIndex = portIndex;
 
                     CHECK_EQ(mOMX->getParameter(
                                 mNode, OMX_IndexParamAudioAmr,
@@ -2715,7 +2807,7 @@
                 {
                     OMX_AUDIO_PARAM_FLACTYPE params;
                     InitOMXParams(&params);
-                    params.nPortIndex = kPortIndexOutput;
+                    params.nPortIndex = portIndex;
 
                     CHECK_EQ(mOMX->getParameter(
                                 mNode, OMX_IndexParamAudioFlac,
@@ -2728,11 +2820,45 @@
                     break;
                 }
 
+                case OMX_AUDIO_CodingMP3:
+                {
+                    OMX_AUDIO_PARAM_MP3TYPE params;
+                    InitOMXParams(&params);
+                    params.nPortIndex = portIndex;
+
+                    CHECK_EQ(mOMX->getParameter(
+                                mNode, OMX_IndexParamAudioMp3,
+                                &params, sizeof(params)),
+                             (status_t)OK);
+
+                    notify->setString("mime", MEDIA_MIMETYPE_AUDIO_MPEG);
+                    notify->setInt32("channel-count", params.nChannels);
+                    notify->setInt32("sample-rate", params.nSampleRate);
+                    break;
+                }
+
+                case OMX_AUDIO_CodingVORBIS:
+                {
+                    OMX_AUDIO_PARAM_VORBISTYPE params;
+                    InitOMXParams(&params);
+                    params.nPortIndex = portIndex;
+
+                    CHECK_EQ(mOMX->getParameter(
+                                mNode, OMX_IndexParamAudioVorbis,
+                                &params, sizeof(params)),
+                             (status_t)OK);
+
+                    notify->setString("mime", MEDIA_MIMETYPE_AUDIO_VORBIS);
+                    notify->setInt32("channel-count", params.nChannels);
+                    notify->setInt32("sample-rate", params.nSampleRate);
+                    break;
+                }
+
                 case OMX_AUDIO_CodingAndroidAC3:
                 {
                     OMX_AUDIO_PARAM_ANDROID_AC3TYPE params;
                     InitOMXParams(&params);
-                    params.nPortIndex = kPortIndexOutput;
+                    params.nPortIndex = portIndex;
 
                     CHECK_EQ((status_t)OK, mOMX->getParameter(
                             mNode,
@@ -2747,6 +2873,7 @@
                 }
 
                 default:
+                    ALOGE("UNKNOWN AUDIO CODING: %d\n", audioDef->eEncoding);
                     TRESPASS();
             }
             break;
@@ -2756,6 +2883,43 @@
             TRESPASS();
     }
 
+    return OK;
+}
+
+void ACodec::sendFormatChange(const sp<AMessage> &reply) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatOutputFormatChanged);
+
+    CHECK_EQ(getPortFormat(kPortIndexOutput, notify), (status_t)OK);
+
+    AString mime;
+    CHECK(notify->findString("mime", &mime));
+
+    int32_t left, top, right, bottom;
+    if (mime == MEDIA_MIMETYPE_VIDEO_RAW &&
+        mNativeWindow != NULL &&
+        notify->findRect("crop", &left, &top, &right, &bottom)) {
+        // notify renderer of the crop change
+        // NOTE: native window uses extended right-bottom coordinate
+        reply->setRect("crop", left, top, right + 1, bottom + 1);
+    } else if (mime == MEDIA_MIMETYPE_AUDIO_RAW &&
+               (mEncoderDelay || mEncoderPadding)) {
+        int32_t channelCount;
+        CHECK(notify->findInt32("channel-count", &channelCount));
+        size_t frameSize = channelCount * sizeof(int16_t);
+        if (mSkipCutBuffer != NULL) {
+            size_t prevbufsize = mSkipCutBuffer->size();
+            if (prevbufsize != 0) {
+                ALOGW("Replacing SkipCutBuffer holding %d "
+                      "bytes",
+                      prevbufsize);
+            }
+        }
+        mSkipCutBuffer = new SkipCutBuffer(
+                mEncoderDelay * frameSize,
+                mEncoderPadding * frameSize);
+    }
+
     notify->post();
 
     mSentFormat = true;
@@ -2962,7 +3126,8 @@
       mCodec(codec) {
 }
 
-ACodec::BaseState::PortMode ACodec::BaseState::getPortMode(OMX_U32 portIndex) {
+ACodec::BaseState::PortMode ACodec::BaseState::getPortMode(
+        OMX_U32 /* portIndex */) {
     return KEEP_BUFFERS;
 }
 
@@ -3011,6 +3176,14 @@
     int32_t type;
     CHECK(msg->findInt32("type", &type));
 
+    // there is a possibility that this is an outstanding message for a
+    // codec that we have already destroyed
+    if (mCodec->mNode == NULL) {
+        ALOGI("ignoring message as already freed component: %s",
+                msg->debugString().c_str());
+        return true;
+    }
+
     IOMX::node_id nodeID;
     CHECK(msg->findPointer("node", &nodeID));
     CHECK_EQ(nodeID, mCodec->mNode);
@@ -3371,8 +3544,8 @@
         size_t rangeOffset, size_t rangeLength,
         OMX_U32 flags,
         int64_t timeUs,
-        void *platformPrivate,
-        void *dataPtr) {
+        void * /* platformPrivate */,
+        void * /* dataPtr */) {
     ALOGV("[%s] onOMXFillBufferDone %p time %lld us, flags = 0x%08lx",
          mCodec->mComponentName.c_str(), bufferID, timeUs, flags);
 
@@ -3424,7 +3597,7 @@
             sp<AMessage> reply =
                 new AMessage(kWhatOutputBufferDrained, mCodec->id());
 
-            if (!mCodec->mSentFormat) {
+            if (!mCodec->mSentFormat && rangeLength > 0) {
                 mCodec->sendFormatChange(reply);
             }
 
@@ -3627,7 +3800,8 @@
             int32_t keepComponentAllocated;
             CHECK(msg->findInt32(
                         "keepComponentAllocated", &keepComponentAllocated));
-            CHECK(!keepComponentAllocated);
+            ALOGW_IF(keepComponentAllocated,
+                     "cannot keep component allocated on shutdown in Uninitialized state");
 
             sp<AMessage> notify = mCodec->mNotify->dup();
             notify->setInt32("what", ACodec::kWhatShutdownCompleted);
@@ -3789,7 +3963,8 @@
     mCodec->mDequeueCounter = 0;
     mCodec->mMetaDataBuffersToSubmit = 0;
     mCodec->mRepeatFrameDelayUs = -1ll;
-    mCodec->mIsConfiguredForAdaptivePlayback = false;
+    mCodec->mInputFormat.clear();
+    mCodec->mOutputFormat.clear();
 
     if (mCodec->mShutdownInProgress) {
         bool keepComponentAllocated = mCodec->mKeepComponentAllocated;
@@ -3799,6 +3974,7 @@
 
         onShutdown(keepComponentAllocated);
     }
+    mCodec->mExplicitShutdown = false;
 }
 
 void ACodec::LoadedState::onShutdown(bool keepComponentAllocated) {
@@ -3808,9 +3984,12 @@
         mCodec->changeState(mCodec->mUninitializedState);
     }
 
-    sp<AMessage> notify = mCodec->mNotify->dup();
-    notify->setInt32("what", ACodec::kWhatShutdownCompleted);
-    notify->post();
+    if (mCodec->mExplicitShutdown) {
+        sp<AMessage> notify = mCodec->mNotify->dup();
+        notify->setInt32("what", ACodec::kWhatShutdownCompleted);
+        notify->post();
+        mCodec->mExplicitShutdown = false;
+    }
 }
 
 bool ACodec::LoadedState::onMessageReceived(const sp<AMessage> &msg) {
@@ -3844,6 +4023,7 @@
             CHECK(msg->findInt32(
                         "keepComponentAllocated", &keepComponentAllocated));
 
+            mCodec->mExplicitShutdown = true;
             onShutdown(keepComponentAllocated);
 
             handled = true;
@@ -3903,6 +4083,8 @@
     {
         sp<AMessage> notify = mCodec->mNotify->dup();
         notify->setInt32("what", ACodec::kWhatComponentConfigured);
+        notify->setMessage("input-format", mCodec->mInputFormat);
+        notify->setMessage("output-format", mCodec->mOutputFormat);
         notify->post();
     }
 
@@ -3910,7 +4092,7 @@
 }
 
 void ACodec::LoadedState::onCreateInputSurface(
-        const sp<AMessage> &msg) {
+        const sp<AMessage> & /* msg */) {
     ALOGV("onCreateInputSurface");
 
     sp<AMessage> notify = mCodec->mNotify->dup();
@@ -3938,7 +4120,7 @@
         }
     }
 
-    if (err == OK && mCodec->mMaxPtsGapUs > 0l) {
+    if (err == OK && mCodec->mMaxPtsGapUs > 0ll) {
         err = mCodec->mOMX->setInternalOption(
                 mCodec->mNode,
                 kPortIndexInput,
@@ -3948,6 +4130,41 @@
 
         if (err != OK) {
             ALOGE("[%s] Unable to configure max timestamp gap (err %d)",
+                    mCodec->mComponentName.c_str(),
+                    err);
+        }
+    }
+
+    if (err == OK && mCodec->mTimePerCaptureUs > 0ll
+            && mCodec->mTimePerFrameUs > 0ll) {
+        int64_t timeLapse[2];
+        timeLapse[0] = mCodec->mTimePerFrameUs;
+        timeLapse[1] = mCodec->mTimePerCaptureUs;
+        err = mCodec->mOMX->setInternalOption(
+                mCodec->mNode,
+                kPortIndexInput,
+                IOMX::INTERNAL_OPTION_TIME_LAPSE,
+                &timeLapse[0],
+                sizeof(timeLapse));
+
+        if (err != OK) {
+            ALOGE("[%s] Unable to configure time lapse (err %d)",
+                    mCodec->mComponentName.c_str(),
+                    err);
+        }
+    }
+
+    if (err == OK && mCodec->mCreateInputBuffersSuspended) {
+        bool suspend = true;
+        err = mCodec->mOMX->setInternalOption(
+                mCodec->mNode,
+                kPortIndexInput,
+                IOMX::INTERNAL_OPTION_SUSPEND,
+                &suspend,
+                sizeof(suspend));
+
+        if (err != OK) {
+            ALOGE("[%s] Unable to configure option to suspend (err %d)",
                   mCodec->mComponentName.c_str(),
                   err);
         }
@@ -4010,6 +4227,7 @@
 
 bool ACodec::LoadedToIdleState::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
+        case kWhatSetParameters:
         case kWhatShutdown:
         {
             mCodec->deferMessage(msg);
@@ -4076,6 +4294,7 @@
 
 bool ACodec::IdleToExecutingState::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
+        case kWhatSetParameters:
         case kWhatShutdown:
         {
             mCodec->deferMessage(msg);
@@ -4136,7 +4355,7 @@
 }
 
 ACodec::BaseState::PortMode ACodec::ExecutingState::getPortMode(
-        OMX_U32 portIndex) {
+        OMX_U32 /* portIndex */) {
     return RESUBMIT_BUFFERS;
 }
 
@@ -4224,6 +4443,7 @@
                         "keepComponentAllocated", &keepComponentAllocated));
 
             mCodec->mShutdownInProgress = true;
+            mCodec->mExplicitShutdown = true;
             mCodec->mKeepComponentAllocated = keepComponentAllocated;
 
             mActive = false;
@@ -4345,6 +4565,22 @@
         }
     }
 
+    int64_t skipFramesBeforeUs;
+    if (params->findInt64("skip-frames-before", &skipFramesBeforeUs)) {
+        status_t err =
+            mOMX->setInternalOption(
+                     mNode,
+                     kPortIndexInput,
+                     IOMX::INTERNAL_OPTION_START_TIME,
+                     &skipFramesBeforeUs,
+                     sizeof(skipFramesBeforeUs));
+
+        if (err != OK) {
+            ALOGE("Failed to set parameter 'skip-frames-before' (err %d)", err);
+            return err;
+        }
+    }
+
     int32_t dropInputFrames;
     if (params->findInt32("drop-input-frames", &dropInputFrames)) {
         bool suspend = dropInputFrames != 0;
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 6a2a696..714b5e0 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -15,6 +15,7 @@
         CameraSource.cpp                  \
         CameraSourceTimeLapse.cpp         \
         DataSource.cpp                    \
+        DataURISource.cpp                 \
         DRMExtractor.cpp                  \
         ESDS.cpp                          \
         FileSource.cpp                    \
@@ -30,8 +31,10 @@
         MediaBufferGroup.cpp              \
         MediaCodec.cpp                    \
         MediaCodecList.cpp                \
+        MediaCodecSource.cpp              \
         MediaDefs.cpp                     \
         MediaExtractor.cpp                \
+        http/MediaHTTP.cpp                \
         MediaMuxer.cpp                    \
         MediaSource.cpp                   \
         MetaData.cpp                      \
@@ -55,8 +58,6 @@
         WVMExtractor.cpp                  \
         XINGSeeker.cpp                    \
         avc_utils.cpp                     \
-        mp4/FragmentedMP4Parser.cpp       \
-        mp4/TrackFragment.cpp             \
 
 LOCAL_C_INCLUDES:= \
         $(TOP)/frameworks/av/include/media/stagefright/timedtext \
@@ -80,6 +81,7 @@
         libicuuc \
         liblog \
         libmedia \
+        libopus \
         libsonivox \
         libssl \
         libstagefright_omx \
@@ -95,6 +97,7 @@
         libstagefright_color_conversion \
         libstagefright_aacenc \
         libstagefright_matroska \
+        libstagefright_webm \
         libstagefright_timedtext \
         libvpx \
         libwebm \
@@ -103,13 +106,6 @@
         libFLAC \
         libmedia_helper
 
-LOCAL_SRC_FILES += \
-        chromium_http_stub.cpp
-LOCAL_CPPFLAGS += -DCHROMIUM_AVAILABLE=1
-
-LOCAL_SHARED_LIBRARIES += libstlport
-include external/stlport/libstlport.mk
-
 LOCAL_SHARED_LIBRARIES += \
         libstagefright_enc_common \
         libstagefright_avc_common \
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 8623100..2669849 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -221,7 +221,8 @@
 
         mAudioTrack = new AudioTrack(
                 AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
-                0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
+                0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this,
+                0 /*notificationFrames*/);
 
         if ((err = mAudioTrack->initCheck()) != OK) {
             mAudioTrack.clear();
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index e68a710..d0e0e8e 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -65,7 +65,7 @@
     if (status == OK) {
         // make sure that the AudioRecord callback never returns more than the maximum
         // buffer size
-        int frameCount = kMaxBufferSize / sizeof(int16_t) / channelCount;
+        uint32_t frameCount = kMaxBufferSize / sizeof(int16_t) / channelCount;
 
         // make sure that the AudioRecord total buffer size is large enough
         size_t bufCount = 2;
@@ -76,10 +76,10 @@
         mRecord = new AudioRecord(
                     inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
                     audio_channel_in_mask_from_count(channelCount),
-                    bufCount * frameCount,
+                    (size_t) (bufCount * frameCount),
                     AudioRecordCallbackFunction,
                     this,
-                    frameCount);
+                    frameCount /*notificationFrames*/);
         mInitCheck = mRecord->initCheck();
     } else {
         mInitCheck = status;
@@ -278,7 +278,7 @@
 
     // Drop retrieved and previously lost audio data.
     if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) {
-        mRecord->getInputFramesLost();
+        (void) mRecord->getInputFramesLost();
         ALOGV("Drop audio data at %lld/%lld us", timeUs, mStartTimeUs);
         return OK;
     }
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 0dd867c..6e5003f 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -35,6 +35,8 @@
 
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
+#include <media/IMediaHTTPConnection.h>
+#include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayerService.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -45,6 +47,7 @@
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaHTTP.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
@@ -277,15 +280,20 @@
 }
 
 status_t AwesomePlayer::setDataSource(
-        const char *uri, const KeyedVector<String8, String8> *headers) {
+        const sp<IMediaHTTPService> &httpService,
+        const char *uri,
+        const KeyedVector<String8, String8> *headers) {
     Mutex::Autolock autoLock(mLock);
-    return setDataSource_l(uri, headers);
+    return setDataSource_l(httpService, uri, headers);
 }
 
 status_t AwesomePlayer::setDataSource_l(
-        const char *uri, const KeyedVector<String8, String8> *headers) {
+        const sp<IMediaHTTPService> &httpService,
+        const char *uri,
+        const KeyedVector<String8, String8> *headers) {
     reset_l();
 
+    mHTTPService = httpService;
     mUri = uri;
 
     if (headers) {
@@ -302,7 +310,7 @@
         }
     }
 
-    ALOGI("setDataSource_l(URL suppressed)");
+    ALOGI("setDataSource_l(%s)", uriDebugString(mUri, mFlags & INCOGNITO).c_str());
 
     // The actual work will be done during preparation in the call to
     // ::finishSetDataSource_l to avoid blocking the calling thread in
@@ -582,6 +590,7 @@
     mSeekNotificationSent = true;
     mSeekTimeUs = 0;
 
+    mHTTPService.clear();
     mUri.setTo("");
     mUriHeaders.clear();
 
@@ -709,11 +718,9 @@
                 finishAsyncPrepare_l();
             }
         } else {
-            int64_t bitrate;
-            if (getBitrate(&bitrate)) {
-                size_t cachedSize = mCachedSource->cachedSize();
-                int64_t cachedDurationUs = cachedSize * 8000000ll / bitrate;
-
+            bool eos2;
+            int64_t cachedDurationUs;
+            if (getCachedDuration_l(&cachedDurationUs, &eos2) && mDurationUs > 0) {
                 int percentage = 100.0 * (double)cachedDurationUs / mDurationUs;
                 if (percentage > 100) {
                     percentage = 100;
@@ -721,7 +728,7 @@
 
                 notifyListener_l(MEDIA_BUFFERING_UPDATE, percentage);
             } else {
-                // We don't know the bitrate of the stream, use absolute size
+                // We don't know the bitrate/duration of the stream, use absolute size
                 // limits to maintain the cache.
 
                 if ((mFlags & PLAYING) && !eos
@@ -1483,7 +1490,7 @@
     CHECK(source != NULL);
 
     if (mTextDriver == NULL) {
-        mTextDriver = new TimedTextDriver(mListener);
+        mTextDriver = new TimedTextDriver(mListener, mHTTPService);
     }
 
     mTextDriver->addInBandTextSource(trackIndex, source);
@@ -2193,15 +2200,14 @@
     if (!strncasecmp("http://", mUri.string(), 7)
             || !strncasecmp("https://", mUri.string(), 8)
             || isWidevineStreaming) {
-        mConnectingDataSource = HTTPBase::Create(
-                (mFlags & INCOGNITO)
-                    ? HTTPBase::kFlagIncognito
-                    : 0);
-
-        if (mUIDValid) {
-            mConnectingDataSource->setUID(mUID);
+        if (mHTTPService == NULL) {
+            ALOGE("Attempt to play media from http URI without HTTP service.");
+            return UNKNOWN_ERROR;
         }
 
+        sp<IMediaHTTPConnection> conn = mHTTPService->makeHTTPConnection();
+        mConnectingDataSource = new MediaHTTP(conn);
+
         String8 cacheConfig;
         bool disconnectAtHighwatermark;
         NuCachedSource2::RemoveCacheSpecificHeaders(
@@ -2209,6 +2215,10 @@
 
         mLock.unlock();
         status_t err = mConnectingDataSource->connect(mUri, &mUriHeaders);
+        // force connection at this point, to avoid a race condition between getMIMEType and the
+        // caching datasource constructed below, which could result in multiple requests to the
+        // server, and/or failed connections.
+        String8 contentType = mConnectingDataSource->getMIMEType();
         mLock.lock();
 
         if (err != OK) {
@@ -2239,8 +2249,6 @@
 
         mConnectingDataSource.clear();
 
-        String8 contentType = dataSource->getMIMEType();
-
         if (strncasecmp(contentType.string(), "audio/", 6)) {
             // We're not doing this for streams that appear to be audio-only
             // streams to ensure that even low bandwidth streams start
@@ -2317,7 +2325,8 @@
             }
         }
     } else {
-        dataSource = DataSource::CreateFromURI(mUri.string(), &mUriHeaders);
+        dataSource = DataSource::CreateFromURI(
+                mHTTPService, mUri.string(), &mUriHeaders);
     }
 
     if (dataSource == NULL) {
@@ -2759,7 +2768,7 @@
         {
             Mutex::Autolock autoLock(mLock);
             if (mTextDriver == NULL) {
-                mTextDriver = new TimedTextDriver(mListener);
+                mTextDriver = new TimedTextDriver(mListener, mHTTPService);
             }
             // String values written in Parcel are UTF-16 values.
             String8 uri(request.readString16());
@@ -2771,7 +2780,7 @@
         {
             Mutex::Autolock autoLock(mLock);
             if (mTextDriver == NULL) {
-                mTextDriver = new TimedTextDriver(mListener);
+                mTextDriver = new TimedTextDriver(mListener, mHTTPService);
             }
             int fd         = request.readFileDescriptor();
             off64_t offset = request.readInt64();
@@ -2812,7 +2821,7 @@
 
     fprintf(out, " AwesomePlayer\n");
     if (mStats.mFd < 0) {
-        fprintf(out, "  URI(suppressed)");
+        fprintf(out, "  URI(%s)", uriDebugString(mUri, mFlags & INCOGNITO).c_str());
     } else {
         fprintf(out, "  fd(%d)", mStats.mFd);
     }
@@ -2901,6 +2910,8 @@
     // get current position so we can start recreated stream from here
     getPosition(&mAudioTearDownPosition);
 
+    sp<IMediaHTTPService> savedHTTPService = mHTTPService;
+
     // Reset and recreate
     reset_l();
 
@@ -2910,7 +2921,7 @@
         mFileSource = fileSource;
         err = setDataSource_l(fileSource);
     } else {
-        err = setDataSource_l(uri, &uriHeaders);
+        err = setDataSource_l(savedHTTPService, uri, &uriHeaders);
     }
 
     mFlags |= PREPARING;
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 5b41f30..b31e9e8 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -31,6 +31,12 @@
 #include <utils/String8.h>
 #include <cutils/properties.h>
 
+#if LOG_NDEBUG
+#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
+#else
+#define UNUSED_UNLESS_VERBOSE(x)
+#endif
+
 namespace android {
 
 static const int64_t CAMERA_SOURCE_TIMEOUT_NS = 3000000000LL;
@@ -63,6 +69,9 @@
 }
 
 void CameraSourceListener::notify(int32_t msgType, int32_t ext1, int32_t ext2) {
+    UNUSED_UNLESS_VERBOSE(msgType);
+    UNUSED_UNLESS_VERBOSE(ext1);
+    UNUSED_UNLESS_VERBOSE(ext2);
     ALOGV("notify(%d, %d, %d)", msgType, ext1, ext2);
 }
 
@@ -577,14 +586,15 @@
     }
 }
 
-void CameraSource::startCameraRecording() {
+status_t CameraSource::startCameraRecording() {
     ALOGV("startCameraRecording");
     // Reset the identity to the current thread because media server owns the
     // camera and recording is started by the applications. The applications
     // will connect to the camera in ICameraRecordingProxy::startRecording.
     int64_t token = IPCThreadState::self()->clearCallingIdentity();
+    status_t err;
     if (mNumInputBuffers > 0) {
-        status_t err = mCamera->sendCommand(
+        err = mCamera->sendCommand(
             CAMERA_CMD_SET_VIDEO_BUFFER_COUNT, mNumInputBuffers, 0);
 
         // This could happen for CameraHAL1 clients; thus the failure is
@@ -595,17 +605,25 @@
         }
     }
 
+    err = OK;
     if (mCameraFlags & FLAGS_HOT_CAMERA) {
         mCamera->unlock();
         mCamera.clear();
-        CHECK_EQ((status_t)OK,
-            mCameraRecordingProxy->startRecording(new ProxyListener(this)));
+        if ((err = mCameraRecordingProxy->startRecording(
+                new ProxyListener(this))) != OK) {
+            ALOGE("Failed to start recording, received error: %s (%d)",
+                    strerror(-err), err);
+        }
     } else {
         mCamera->setListener(new CameraSourceListener(this));
         mCamera->startRecording();
-        CHECK(mCamera->recordingEnabled());
+        if (!mCamera->recordingEnabled()) {
+            err = -EINVAL;
+            ALOGE("Failed to start recording");
+        }
     }
     IPCThreadState::self()->restoreCallingIdentity(token);
+    return err;
 }
 
 status_t CameraSource::start(MetaData *meta) {
@@ -637,10 +655,12 @@
         }
     }
 
-    startCameraRecording();
+    status_t err;
+    if ((err = startCameraRecording()) == OK) {
+        mStarted = true;
+    }
 
-    mStarted = true;
-    return OK;
+    return err;
 }
 
 void CameraSource::stopCameraRecording() {
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 591daac..15ba967 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -85,7 +85,8 @@
     mVideoWidth = videoSize.width;
     mVideoHeight = videoSize.height;
 
-    if (!trySettingVideoSize(videoSize.width, videoSize.height)) {
+    if (OK == mInitCheck && !trySettingVideoSize(videoSize.width, videoSize.height)) {
+        releaseCamera();
         mInitCheck = NO_INIT;
     }
 
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 97987e2..6e0f37a 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -16,10 +16,6 @@
 
 #include "include/AMRExtractor.h"
 
-#if CHROMIUM_AVAILABLE
-#include "include/chromium_http_stub.h"
-#endif
-
 #include "include/AACExtractor.h"
 #include "include/DRMExtractor.h"
 #include "include/FLACExtractor.h"
@@ -35,10 +31,14 @@
 
 #include "matroska/MatroskaExtractor.h"
 
+#include <media/IMediaHTTPConnection.h>
+#include <media/IMediaHTTPService.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/DataSource.h>
+#include <media/stagefright/DataURISource.h>
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaHTTP.h>
 #include <utils/String8.h>
 
 #include <cutils/properties.h>
@@ -180,7 +180,9 @@
 
 // static
 sp<DataSource> DataSource::CreateFromURI(
-        const char *uri, const KeyedVector<String8, String8> *headers) {
+        const sp<IMediaHTTPService> &httpService,
+        const char *uri,
+        const KeyedVector<String8, String8> *headers) {
     bool isWidevine = !strncasecmp("widevine://", uri, 11);
 
     sp<DataSource> source;
@@ -189,7 +191,7 @@
     } else if (!strncasecmp("http://", uri, 7)
             || !strncasecmp("https://", uri, 8)
             || isWidevine) {
-        sp<HTTPBase> httpSource = HTTPBase::Create();
+        sp<HTTPBase> httpSource = new MediaHTTP(httpService->makeHTTPConnection());
 
         String8 tmp;
         if (isWidevine) {
@@ -220,11 +222,8 @@
             // in the widevine:// case.
             source = httpSource;
         }
-
-# if CHROMIUM_AVAILABLE
     } else if (!strncasecmp("data:", uri, 5)) {
-        source = createDataUriSource(uri);
-#endif
+        source = DataURISource::Create(uri);
     } else {
         // Assume it's a filename.
         source = new FileSource(uri);
diff --git a/media/libstagefright/DataURISource.cpp b/media/libstagefright/DataURISource.cpp
new file mode 100644
index 0000000..377bc85
--- /dev/null
+++ b/media/libstagefright/DataURISource.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/DataURISource.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/base64.h>
+
+namespace android {
+
+// static
+sp<DataURISource> DataURISource::Create(const char *uri) {
+    if (strncasecmp("data:", uri, 5)) {
+        return NULL;
+    }
+
+    char *commaPos = strrchr(uri, ',');
+
+    if (commaPos == NULL) {
+        return NULL;
+    }
+
+    sp<ABuffer> buffer;
+
+    AString tmp(&uri[5], commaPos - &uri[5]);
+
+    if (tmp.endsWith(";base64")) {
+        AString encoded(commaPos + 1);
+
+        // Strip CR and LF...
+        for (size_t i = encoded.size(); i-- > 0;) {
+            if (encoded.c_str()[i] == '\r' || encoded.c_str()[i] == '\n') {
+                encoded.erase(i, 1);
+            }
+        }
+
+        buffer = decodeBase64(encoded);
+
+        if (buffer == NULL) {
+            ALOGE("Malformed base64 encoded content found.");
+            return NULL;
+        }
+    } else {
+#if 0
+        size_t dataLen = strlen(uri) - tmp.size() - 6;
+        buffer = new ABuffer(dataLen);
+        memcpy(buffer->data(), commaPos + 1, dataLen);
+
+        // unescape
+#else
+        // MediaPlayer doesn't care for this right now as we don't
+        // play any text-based media.
+        return NULL;
+#endif
+    }
+
+    // We don't really care about charset or mime type.
+
+    return new DataURISource(buffer);
+}
+
+DataURISource::DataURISource(const sp<ABuffer> &buffer)
+    : mBuffer(buffer) {
+}
+
+DataURISource::~DataURISource() {
+}
+
+status_t DataURISource::initCheck() const {
+    return OK;
+}
+
+ssize_t DataURISource::readAt(off64_t offset, void *data, size_t size) {
+    if (offset >= mBuffer->size()) {
+        return 0;
+    }
+
+    size_t copy = mBuffer->size() - offset;
+    if (copy > size) {
+        copy = size;
+    }
+
+    memcpy(data, mBuffer->data() + offset, copy);
+
+    return copy;
+}
+
+status_t DataURISource::getSize(off64_t *size) {
+    *size = mBuffer->size();
+
+    return OK;
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/HTTPBase.cpp b/media/libstagefright/HTTPBase.cpp
index 5fa4b6f..ca68c3d 100644
--- a/media/libstagefright/HTTPBase.cpp
+++ b/media/libstagefright/HTTPBase.cpp
@@ -20,10 +20,6 @@
 
 #include "include/HTTPBase.h"
 
-#if CHROMIUM_AVAILABLE
-#include "include/chromium_http_stub.h"
-#endif
-
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
 
@@ -40,34 +36,7 @@
       mTotalTransferBytes(0),
       mPrevBandwidthMeasureTimeUs(0),
       mPrevEstimatedBandWidthKbps(0),
-      mBandWidthCollectFreqMs(5000),
-      mUIDValid(false),
-      mUID(0) {
-}
-
-// static
-sp<HTTPBase> HTTPBase::Create(uint32_t flags) {
-#if CHROMIUM_AVAILABLE
-        HTTPBase *dataSource = createChromiumHTTPDataSource(flags);
-        if (dataSource) {
-           return dataSource;
-        }
-#endif
-    {
-        TRESPASS();
-
-        return NULL;
-    }
-}
-
-// static
-status_t HTTPBase::UpdateProxyConfig(
-        const char *host, int32_t port, const char *exclusionList) {
-#if CHROMIUM_AVAILABLE
-    return UpdateChromiumHTTPDataSourceProxyConfig(host, port, exclusionList);
-#else
-    return INVALID_OPERATION;
-#endif
+      mBandWidthCollectFreqMs(5000) {
 }
 
 void HTTPBase::addBandwidthMeasurement(
@@ -135,21 +104,6 @@
     return OK;
 }
 
-void HTTPBase::setUID(uid_t uid) {
-    mUIDValid = true;
-    mUID = uid;
-}
-
-bool HTTPBase::getUID(uid_t *uid) const {
-    if (!mUIDValid) {
-        return false;
-    }
-
-    *uid = mUID;
-
-    return true;
-}
-
 // static
 void HTTPBase::RegisterSocketUserTag(int sockfd, uid_t uid, uint32_t kTag) {
     int res = qtaguid_tagSocket(sockfd, kTag, uid);
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 362cd6b..2a3fa04 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -488,12 +488,12 @@
             break;
         }
         uint32_t chunk_type = ntohl(hdr[1]);
-        if (chunk_type == FOURCC('s', 'i', 'd', 'x')) {
-            // parse the sidx box too
-            continue;
-        } else if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+        if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
             // store the offset of the first segment
             mMoofOffset = offset;
+        } else if (chunk_type != FOURCC('m', 'd', 'a', 't')) {
+            // keep parsing until we get to the data
+            continue;
         }
         break;
     }
@@ -913,6 +913,8 @@
 
         case FOURCC('e', 'l', 's', 't'):
         {
+            *offset += chunk_size;
+
             // See 14496-12 8.6.6
             uint8_t version;
             if (mDataSource->readAt(data_offset, &version, 1) < 1) {
@@ -975,12 +977,13 @@
                     mLastTrack->meta->setInt32(kKeyEncoderPadding, paddingsamples);
                 }
             }
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('f', 'r', 'm', 'a'):
         {
+            *offset += chunk_size;
+
             uint32_t original_fourcc;
             if (mDataSource->readAt(data_offset, &original_fourcc, 4) < 4) {
                 return ERROR_IO;
@@ -994,12 +997,13 @@
                 mLastTrack->meta->setInt32(kKeyChannelCount, num_channels);
                 mLastTrack->meta->setInt32(kKeySampleRate, sample_rate);
             }
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('t', 'e', 'n', 'c'):
         {
+            *offset += chunk_size;
+
             if (chunk_size < 32) {
                 return ERROR_MALFORMED;
             }
@@ -1044,23 +1048,25 @@
             mLastTrack->meta->setInt32(kKeyCryptoMode, defaultAlgorithmId);
             mLastTrack->meta->setInt32(kKeyCryptoDefaultIVSize, defaultIVSize);
             mLastTrack->meta->setData(kKeyCryptoKey, 'tenc', defaultKeyId, 16);
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('t', 'k', 'h', 'd'):
         {
+            *offset += chunk_size;
+
             status_t err;
             if ((err = parseTrackHeader(data_offset, chunk_data_size)) != OK) {
                 return err;
             }
 
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('p', 's', 's', 'h'):
         {
+            *offset += chunk_size;
+
             PsshInfo pssh;
 
             if (mDataSource->readAt(data_offset + 4, &pssh.uuid, 16) < 16) {
@@ -1086,12 +1092,13 @@
             }
             mPssh.push_back(pssh);
 
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('m', 'd', 'h', 'd'):
         {
+            *offset += chunk_size;
+
             if (chunk_data_size < 4) {
                 return ERROR_MALFORMED;
             }
@@ -1172,7 +1179,6 @@
             mLastTrack->meta->setCString(
                     kKeyMediaLanguage, lang_code);
 
-            *offset += chunk_size;
             break;
         }
 
@@ -1339,11 +1345,12 @@
                 mLastTrack->sampleTable->setChunkOffsetParams(
                         chunk_type, data_offset, chunk_data_size);
 
+            *offset += chunk_size;
+
             if (err != OK) {
                 return err;
             }
 
-            *offset += chunk_size;
             break;
         }
 
@@ -1353,11 +1360,12 @@
                 mLastTrack->sampleTable->setSampleToChunkParams(
                         data_offset, chunk_data_size);
 
+            *offset += chunk_size;
+
             if (err != OK) {
                 return err;
             }
 
-            *offset += chunk_size;
             break;
         }
 
@@ -1368,6 +1376,8 @@
                 mLastTrack->sampleTable->setSampleSizeParams(
                         chunk_type, data_offset, chunk_data_size);
 
+            *offset += chunk_size;
+
             if (err != OK) {
                 return err;
             }
@@ -1408,7 +1418,6 @@
                 }
                 mLastTrack->meta->setInt32(kKeyMaxInputSize, max_size);
             }
-            *offset += chunk_size;
 
             // NOTE: setting another piece of metadata invalidates any pointers (such as the
             // mimetype) previously obtained, so don't cache them.
@@ -1432,6 +1441,8 @@
 
         case FOURCC('s', 't', 't', 's'):
         {
+            *offset += chunk_size;
+
             status_t err =
                 mLastTrack->sampleTable->setTimeToSampleParams(
                         data_offset, chunk_data_size);
@@ -1440,12 +1451,13 @@
                 return err;
             }
 
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('c', 't', 't', 's'):
         {
+            *offset += chunk_size;
+
             status_t err =
                 mLastTrack->sampleTable->setCompositionTimeToSampleParams(
                         data_offset, chunk_data_size);
@@ -1454,12 +1466,13 @@
                 return err;
             }
 
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('s', 't', 's', 's'):
         {
+            *offset += chunk_size;
+
             status_t err =
                 mLastTrack->sampleTable->setSyncSampleParams(
                         data_offset, chunk_data_size);
@@ -1468,13 +1481,14 @@
                 return err;
             }
 
-            *offset += chunk_size;
             break;
         }
 
         // @xyz
         case FOURCC('\xA9', 'x', 'y', 'z'):
         {
+            *offset += chunk_size;
+
             // Best case the total data length inside "@xyz" box
             // would be 8, for instance "@xyz" + "\x00\x04\x15\xc7" + "0+0/",
             // where "\x00\x04" is the text string length with value = 4,
@@ -1503,12 +1517,13 @@
 
             buffer[location_length] = '\0';
             mFileMetaData->setCString(kKeyLocation, buffer);
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('e', 's', 'd', 's'):
         {
+            *offset += chunk_size;
+
             if (chunk_data_size < 4) {
                 return ERROR_MALFORMED;
             }
@@ -1546,12 +1561,13 @@
                 }
             }
 
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('a', 'v', 'c', 'C'):
         {
+            *offset += chunk_size;
+
             sp<ABuffer> buffer = new ABuffer(chunk_data_size);
 
             if (mDataSource->readAt(
@@ -1562,12 +1578,12 @@
             mLastTrack->meta->setData(
                     kKeyAVCC, kTypeAVCC, buffer->data(), chunk_data_size);
 
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('d', '2', '6', '3'):
         {
+            *offset += chunk_size;
             /*
              * d263 contains a fixed 7 bytes part:
              *   vendor - 4 bytes
@@ -1593,7 +1609,6 @@
 
             mLastTrack->meta->setData(kKeyD263, kTypeD263, buffer, chunk_data_size);
 
-            *offset += chunk_size;
             break;
         }
 
@@ -1601,11 +1616,13 @@
         {
             uint8_t buffer[4];
             if (chunk_data_size < (off64_t)sizeof(buffer)) {
+                *offset += chunk_size;
                 return ERROR_MALFORMED;
             }
 
             if (mDataSource->readAt(
                         data_offset, buffer, 4) < 4) {
+                *offset += chunk_size;
                 return ERROR_IO;
             }
 
@@ -1639,6 +1656,8 @@
         case FOURCC('n', 'a', 'm', 'e'):
         case FOURCC('d', 'a', 't', 'a'):
         {
+            *offset += chunk_size;
+
             if (mPath.size() == 6 && underMetaDataPath(mPath)) {
                 status_t err = parseITunesMetaData(data_offset, chunk_data_size);
 
@@ -1647,12 +1666,13 @@
                 }
             }
 
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('m', 'v', 'h', 'd'):
         {
+            *offset += chunk_size;
+
             if (chunk_data_size < 24) {
                 return ERROR_MALFORMED;
             }
@@ -1680,7 +1700,6 @@
 
             mFileMetaData->setCString(kKeyDate, s.string());
 
-            *offset += chunk_size;
             break;
         }
 
@@ -1701,6 +1720,8 @@
 
         case FOURCC('h', 'd', 'l', 'r'):
         {
+            *offset += chunk_size;
+
             uint32_t buffer;
             if (mDataSource->readAt(
                         data_offset + 8, &buffer, 4) < 4) {
@@ -1715,7 +1736,6 @@
                 mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_TEXT_3GPP);
             }
 
-            *offset += chunk_size;
             break;
         }
 
@@ -1740,6 +1760,8 @@
                 delete[] buffer;
                 buffer = NULL;
 
+                // advance read pointer so we don't end up reading this again
+                *offset += chunk_size;
                 return ERROR_IO;
             }
 
@@ -1754,6 +1776,8 @@
 
         case FOURCC('c', 'o', 'v', 'r'):
         {
+            *offset += chunk_size;
+
             if (mFileMetaData != NULL) {
                 ALOGV("chunk_data_size = %lld and data_offset = %lld",
                         chunk_data_size, data_offset);
@@ -1768,7 +1792,6 @@
                     buffer->data() + kSkipBytesOfDataBox, chunk_data_size - kSkipBytesOfDataBox);
             }
 
-            *offset += chunk_size;
             break;
         }
 
@@ -1779,25 +1802,27 @@
         case FOURCC('a', 'l', 'b', 'm'):
         case FOURCC('y', 'r', 'r', 'c'):
         {
+            *offset += chunk_size;
+
             status_t err = parse3GPPMetaData(data_offset, chunk_data_size, depth);
 
             if (err != OK) {
                 return err;
             }
 
-            *offset += chunk_size;
             break;
         }
 
         case FOURCC('I', 'D', '3', '2'):
         {
+            *offset += chunk_size;
+
             if (chunk_data_size < 6) {
                 return ERROR_MALFORMED;
             }
 
             parseID3v2MetaData(data_offset + 6);
 
-            *offset += chunk_size;
             break;
         }
 
@@ -1921,9 +1946,10 @@
             ALOGW("sub-sidx boxes not supported yet");
         }
         bool sap = d3 & 0x80000000;
-        bool saptype = d3 >> 28;
-        if (!sap || saptype > 2) {
-            ALOGW("not a stream access point, or unsupported type");
+        uint32_t saptype = (d3 >> 28) & 7;
+        if (!sap || (saptype != 1 && saptype != 2)) {
+            // type 1 and 2 are sync samples
+            ALOGW("not a stream access point, or unsupported type: %08x", d3);
         }
         total_duration += d2;
         offset += 12;
@@ -2442,6 +2468,58 @@
     return OK;
 }
 
+typedef enum {
+    //AOT_NONE             = -1,
+    //AOT_NULL_OBJECT      = 0,
+    //AOT_AAC_MAIN         = 1, /**< Main profile                              */
+    AOT_AAC_LC           = 2,   /**< Low Complexity object                     */
+    //AOT_AAC_SSR          = 3,
+    //AOT_AAC_LTP          = 4,
+    AOT_SBR              = 5,
+    //AOT_AAC_SCAL         = 6,
+    //AOT_TWIN_VQ          = 7,
+    //AOT_CELP             = 8,
+    //AOT_HVXC             = 9,
+    //AOT_RSVD_10          = 10, /**< (reserved)                                */
+    //AOT_RSVD_11          = 11, /**< (reserved)                                */
+    //AOT_TTSI             = 12, /**< TTSI Object                               */
+    //AOT_MAIN_SYNTH       = 13, /**< Main Synthetic object                     */
+    //AOT_WAV_TAB_SYNTH    = 14, /**< Wavetable Synthesis object                */
+    //AOT_GEN_MIDI         = 15, /**< General MIDI object                       */
+    //AOT_ALG_SYNTH_AUD_FX = 16, /**< Algorithmic Synthesis and Audio FX object */
+    AOT_ER_AAC_LC        = 17,   /**< Error Resilient(ER) AAC Low Complexity    */
+    //AOT_RSVD_18          = 18, /**< (reserved)                                */
+    //AOT_ER_AAC_LTP       = 19, /**< Error Resilient(ER) AAC LTP object        */
+    AOT_ER_AAC_SCAL      = 20,   /**< Error Resilient(ER) AAC Scalable object   */
+    //AOT_ER_TWIN_VQ       = 21, /**< Error Resilient(ER) TwinVQ object         */
+    AOT_ER_BSAC          = 22,   /**< Error Resilient(ER) BSAC object           */
+    AOT_ER_AAC_LD        = 23,   /**< Error Resilient(ER) AAC LowDelay object   */
+    //AOT_ER_CELP          = 24, /**< Error Resilient(ER) CELP object           */
+    //AOT_ER_HVXC          = 25, /**< Error Resilient(ER) HVXC object           */
+    //AOT_ER_HILN          = 26, /**< Error Resilient(ER) HILN object           */
+    //AOT_ER_PARA          = 27, /**< Error Resilient(ER) Parametric object     */
+    //AOT_RSVD_28          = 28, /**< might become SSC                          */
+    AOT_PS               = 29,   /**< PS, Parametric Stereo (includes SBR)      */
+    //AOT_MPEGS            = 30, /**< MPEG Surround                             */
+
+    AOT_ESCAPE           = 31,   /**< Signal AOT uses more than 5 bits          */
+
+    //AOT_MP3ONMP4_L1      = 32, /**< MPEG-Layer1 in mp4                        */
+    //AOT_MP3ONMP4_L2      = 33, /**< MPEG-Layer2 in mp4                        */
+    //AOT_MP3ONMP4_L3      = 34, /**< MPEG-Layer3 in mp4                        */
+    //AOT_RSVD_35          = 35, /**< might become DST                          */
+    //AOT_RSVD_36          = 36, /**< might become ALS                          */
+    //AOT_AAC_SLS          = 37, /**< AAC + SLS                                 */
+    //AOT_SLS              = 38, /**< SLS                                       */
+    //AOT_ER_AAC_ELD       = 39, /**< AAC Enhanced Low Delay                    */
+
+    //AOT_USAC             = 42, /**< USAC                                      */
+    //AOT_SAOC             = 43, /**< SAOC                                      */
+    //AOT_LD_MPEGS         = 44, /**< Low Delay MPEG Surround                   */
+
+    //AOT_RSVD50           = 50,  /**< Interim AOT for Rsvd50                   */
+} AUDIO_OBJECT_TYPE;
+
 status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
         const void *esds_data, size_t esds_size) {
     ESDS esds(esds_data, esds_size);
@@ -2524,7 +2602,7 @@
         sampleRate = kSamplingRate[freqIndex];
     }
 
-    if (objectType == 5 || objectType == 29) { // SBR specific config per 14496-3 table 1.13
+    if (objectType == AOT_SBR || objectType == AOT_PS) {//SBR specific config per 14496-3 table 1.13
         uint32_t extFreqIndex = br.getBits(4);
         int32_t extSampleRate;
         if (extFreqIndex == 15) {
@@ -2542,6 +2620,111 @@
         //      mLastTrack->meta->setInt32(kKeyExtSampleRate, extSampleRate);
     }
 
+    switch (numChannels) {
+        // values defined in 14496-3_2009 amendment-4 Table 1.19 - Channel Configuration
+        case 0:
+        case 1:// FC
+        case 2:// FL FR
+        case 3:// FC, FL FR
+        case 4:// FC, FL FR, RC
+        case 5:// FC, FL FR, SL SR
+        case 6:// FC, FL FR, SL SR, LFE
+            //numChannels already contains the right value
+            break;
+        case 11:// FC, FL FR, SL SR, RC, LFE
+            numChannels = 7;
+            break;
+        case 7: // FC, FCL FCR, FL FR, SL SR, LFE
+        case 12:// FC, FL  FR,  SL SR, RL RR, LFE
+        case 14:// FC, FL  FR,  SL SR, LFE, FHL FHR
+            numChannels = 8;
+            break;
+        default:
+            return ERROR_UNSUPPORTED;
+    }
+
+    {
+        if (objectType == AOT_SBR || objectType == AOT_PS) {
+            const int32_t extensionSamplingFrequency = br.getBits(4);
+            objectType = br.getBits(5);
+
+            if (objectType == AOT_ESCAPE) {
+                objectType = 32 + br.getBits(6);
+            }
+        }
+        if (objectType == AOT_AAC_LC || objectType == AOT_ER_AAC_LC ||
+                objectType == AOT_ER_AAC_LD || objectType == AOT_ER_AAC_SCAL ||
+                objectType == AOT_ER_BSAC) {
+            const int32_t frameLengthFlag = br.getBits(1);
+
+            const int32_t dependsOnCoreCoder = br.getBits(1);
+
+            if (dependsOnCoreCoder ) {
+                const int32_t coreCoderDelay = br.getBits(14);
+            }
+
+            const int32_t extensionFlag = br.getBits(1);
+
+            if (numChannels == 0 ) {
+                int32_t channelsEffectiveNum = 0;
+                int32_t channelsNum = 0;
+                const int32_t ElementInstanceTag = br.getBits(4);
+                const int32_t Profile = br.getBits(2);
+                const int32_t SamplingFrequencyIndex = br.getBits(4);
+                const int32_t NumFrontChannelElements = br.getBits(4);
+                const int32_t NumSideChannelElements = br.getBits(4);
+                const int32_t NumBackChannelElements = br.getBits(4);
+                const int32_t NumLfeChannelElements = br.getBits(2);
+                const int32_t NumAssocDataElements = br.getBits(3);
+                const int32_t NumValidCcElements = br.getBits(4);
+
+                const int32_t MonoMixdownPresent = br.getBits(1);
+                if (MonoMixdownPresent != 0) {
+                    const int32_t MonoMixdownElementNumber = br.getBits(4);
+                }
+
+                const int32_t StereoMixdownPresent = br.getBits(1);
+                if (StereoMixdownPresent != 0) {
+                    const int32_t StereoMixdownElementNumber = br.getBits(4);
+                }
+
+                const int32_t MatrixMixdownIndexPresent = br.getBits(1);
+                if (MatrixMixdownIndexPresent != 0) {
+                    const int32_t MatrixMixdownIndex = br.getBits(2);
+                    const int32_t PseudoSurroundEnable = br.getBits(1);
+                }
+
+                int i;
+                for (i=0; i < NumFrontChannelElements; i++) {
+                    const int32_t FrontElementIsCpe = br.getBits(1);
+                    const int32_t FrontElementTagSelect = br.getBits(4);
+                    channelsNum += FrontElementIsCpe ? 2 : 1;
+                }
+
+                for (i=0; i < NumSideChannelElements; i++) {
+                    const int32_t SideElementIsCpe = br.getBits(1);
+                    const int32_t SideElementTagSelect = br.getBits(4);
+                    channelsNum += SideElementIsCpe ? 2 : 1;
+                }
+
+                for (i=0; i < NumBackChannelElements; i++) {
+                    const int32_t BackElementIsCpe = br.getBits(1);
+                    const int32_t BackElementTagSelect = br.getBits(4);
+                    channelsNum += BackElementIsCpe ? 2 : 1;
+                }
+                channelsEffectiveNum = channelsNum;
+
+                for (i=0; i < NumLfeChannelElements; i++) {
+                    const int32_t LfeElementTagSelect = br.getBits(4);
+                    channelsNum += 1;
+                }
+                ALOGV("mpeg4 audio channelsNum = %d", channelsNum);
+                ALOGV("mpeg4 audio channelsEffectiveNum = %d", channelsEffectiveNum);
+                numChannels = channelsNum;
+            }
+        }
+    }
+
     if (numChannels == 0) {
         return ERROR_UNSUPPORTED;
     }
@@ -2742,9 +2925,20 @@
                 }
             }
             if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
-                // *offset points to the mdat box following this moof
-                parseChunk(offset); // doesn't actually parse it, just updates offset
-                mNextMoofOffset = *offset;
+                // *offset points to the box following this moof. Find the next moof from there.
+
+                while (true) {
+                    if (mDataSource->readAt(*offset, hdr, 8) < 8) {
+                        return ERROR_END_OF_STREAM;
+                    }
+                    chunk_size = ntohl(hdr[0]);
+                    chunk_type = ntohl(hdr[1]);
+                    if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+                        mNextMoofOffset = *offset;
+                        break;
+                    }
+                    *offset += chunk_size;
+                }
             }
             break;
         }
@@ -3549,7 +3743,7 @@
                 const SidxEntry *se = &mSegments[i];
                 if (totalTime + se->mDurationUs > seekTimeUs) {
                     // The requested time is somewhere in this segment
-                    if ((mode == ReadOptions::SEEK_NEXT_SYNC) ||
+                    if ((mode == ReadOptions::SEEK_NEXT_SYNC && seekTimeUs > totalTime) ||
                         (mode == ReadOptions::SEEK_CLOSEST_SYNC &&
                         (seekTimeUs - totalTime) > (totalTime + se->mDurationUs - seekTimeUs))) {
                         // requested next sync, or closest sync and it was closer to the end of
@@ -3562,11 +3756,19 @@
                 totalTime += se->mDurationUs;
                 totalOffset += se->mSize;
             }
-        mCurrentMoofOffset = totalOffset;
-        mCurrentSamples.clear();
-        mCurrentSampleIndex = 0;
-        parseChunk(&totalOffset);
-        mCurrentTime = totalTime * mTimescale / 1000000ll;
+            mCurrentMoofOffset = totalOffset;
+            mCurrentSamples.clear();
+            mCurrentSampleIndex = 0;
+            parseChunk(&totalOffset);
+            mCurrentTime = totalTime * mTimescale / 1000000ll;
+        } else {
+            // without sidx boxes, we can only seek to 0
+            mCurrentMoofOffset = mFirstMoofOffset;
+            mCurrentSamples.clear();
+            mCurrentSampleIndex = 0;
+            off64_t tmp = mCurrentMoofOffset;
+            parseChunk(&tmp);
+            mCurrentTime = 0;
         }
 
         if (mBuffer != NULL) {
@@ -3578,7 +3780,7 @@
     }
 
     off64_t offset = 0;
-    size_t size;
+    size_t size = 0;
     uint32_t cts = 0;
     bool isSyncSample = false;
     bool newBuffer = false;
@@ -3586,16 +3788,18 @@
         newBuffer = true;
 
         if (mCurrentSampleIndex >= mCurrentSamples.size()) {
-            // move to next fragment
-            Sample lastSample = mCurrentSamples[mCurrentSamples.size() - 1];
-            off64_t nextMoof = mNextMoofOffset; // lastSample.offset + lastSample.size;
+            // move to next fragment if there is one
+            if (mNextMoofOffset <= mCurrentMoofOffset) {
+                return ERROR_END_OF_STREAM;
+            }
+            off64_t nextMoof = mNextMoofOffset;
             mCurrentMoofOffset = nextMoof;
             mCurrentSamples.clear();
             mCurrentSampleIndex = 0;
             parseChunk(&nextMoof);
-                if (mCurrentSampleIndex >= mCurrentSamples.size()) {
-                    return ERROR_END_OF_STREAM;
-                }
+            if (mCurrentSampleIndex >= mCurrentSamples.size()) {
+                return ERROR_END_OF_STREAM;
+            }
         }
 
         const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 58a4487..24e53b3 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -41,6 +41,12 @@
 
 #include "include/ESDS.h"
 
+#define WARN_UNLESS(condition, message, ...) \
+( (CONDITION(condition)) ? false : ({ \
+    ALOGW("Condition %s failed "  message, #condition, ##__VA_ARGS__); \
+    true; \
+}))
+
 namespace android {
 
 static const int64_t kMinStreamableFileSizeInBytes = 5 * 1024 * 1024;
@@ -975,13 +981,16 @@
     if (param && param->findInt32(kKeyFileType, &fileType) &&
         fileType != OUTPUT_FORMAT_MPEG_4) {
         writeFourcc("3gp4");
-    } else {
+        writeInt32(0);
         writeFourcc("isom");
+        writeFourcc("3gp4");
+    } else {
+        writeFourcc("mp42");
+        writeInt32(0);
+        writeFourcc("isom");
+        writeFourcc("mp42");
     }
 
-    writeInt32(0);
-    writeFourcc("isom");
-    writeFourcc("3gp4");
     endBox();
 }
 
@@ -1763,7 +1772,7 @@
 }
 
 status_t MPEG4Writer::Track::stop() {
-    ALOGD("Stopping %s track", mIsAudio? "Audio": "Video");
+    ALOGD("%s track stopping", mIsAudio? "Audio": "Video");
     if (!mStarted) {
         ALOGE("Stop() called but track is not started");
         return ERROR_END_OF_STREAM;
@@ -1774,19 +1783,14 @@
     }
     mDone = true;
 
+    ALOGD("%s track source stopping", mIsAudio? "Audio": "Video");
+    mSource->stop();
+    ALOGD("%s track source stopped", mIsAudio? "Audio": "Video");
+
     void *dummy;
     pthread_join(mThread, &dummy);
-
     status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
 
-    ALOGD("Stopping %s track source", mIsAudio? "Audio": "Video");
-    {
-        status_t status = mSource->stop();
-        if (err == OK && status != OK && status != ERROR_END_OF_STREAM) {
-            err = status;
-        }
-    }
-
     ALOGD("%s track stopped", mIsAudio? "Audio": "Video");
     return err;
 }
@@ -2100,6 +2104,7 @@
 
     status_t err = OK;
     MediaBuffer *buffer;
+    const char *trackName = mIsAudio ? "Audio" : "Video";
     while (!mDone && (err = mSource->read(&buffer)) == OK) {
         if (buffer->range_length() == 0) {
             buffer->release();
@@ -2195,15 +2200,27 @@
 
         if (mResumed) {
             int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
-            CHECK_GE(durExcludingEarlierPausesUs, 0ll);
+            if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
+                copy->release();
+                return ERROR_MALFORMED;
+            }
+
             int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
-            CHECK_GE(pausedDurationUs, lastDurationUs);
+            if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
+                copy->release();
+                return ERROR_MALFORMED;
+            }
+
             previousPausedDurationUs += pausedDurationUs - lastDurationUs;
             mResumed = false;
         }
 
         timestampUs -= previousPausedDurationUs;
-        CHECK_GE(timestampUs, 0ll);
+        if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+            copy->release();
+            return ERROR_MALFORMED;
+        }
+
         if (!mIsAudio) {
             /*
              * Composition time: timestampUs
@@ -2215,7 +2232,11 @@
             decodingTimeUs -= previousPausedDurationUs;
             cttsOffsetTimeUs =
                     timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
-            CHECK_GE(cttsOffsetTimeUs, 0ll);
+            if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
+                copy->release();
+                return ERROR_MALFORMED;
+            }
+
             timestampUs = decodingTimeUs;
             ALOGV("decoding time: %lld and ctts offset time: %lld",
                 timestampUs, cttsOffsetTimeUs);
@@ -2223,7 +2244,11 @@
             // Update ctts box table if necessary
             currCttsOffsetTimeTicks =
                     (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
-            CHECK_LE(currCttsOffsetTimeTicks, 0x0FFFFFFFFLL);
+            if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
+                copy->release();
+                return ERROR_MALFORMED;
+            }
+
             if (mStszTableEntries->count() == 0) {
                 // Force the first ctts table entry to have one single entry
                 // so that we can do adjustment for the initial track start
@@ -2261,9 +2286,13 @@
             }
         }
 
-        CHECK_GE(timestampUs, 0ll);
+        if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+            copy->release();
+            return ERROR_MALFORMED;
+        }
+
         ALOGV("%s media time stamp: %lld and previous paused duration %lld",
-                mIsAudio? "Audio": "Video", timestampUs, previousPausedDurationUs);
+                trackName, timestampUs, previousPausedDurationUs);
         if (timestampUs > mTrackDurationUs) {
             mTrackDurationUs = timestampUs;
         }
@@ -2278,10 +2307,27 @@
                 (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
         if (currDurationTicks < 0ll) {
             ALOGE("timestampUs %lld < lastTimestampUs %lld for %s track",
-                timestampUs, lastTimestampUs, mIsAudio? "Audio": "Video");
+                timestampUs, lastTimestampUs, trackName);
+            copy->release();
             return UNKNOWN_ERROR;
         }
 
+        // if the duration is different for this sample, see if it is close enough to the previous
+        // duration that we can fudge it and use the same value, to avoid filling the stts table
+        // with lots of near-identical entries.
+        // "close enough" here means that the current duration needs to be adjusted by less
+        // than 0.1 milliseconds
+        if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
+            int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
+                    + (mTimeScale / 2)) / mTimeScale;
+            if (deltaUs > -100 && deltaUs < 100) {
+                // use previous ticks, and adjust timestamp as if it was actually that number
+                // of ticks
+                currDurationTicks = lastDurationTicks;
+                timestampUs += deltaUs;
+            }
+        }
+
         mStszTableEntries->add(htonl(sampleSize));
         if (mStszTableEntries->count() > 2) {
 
@@ -2302,7 +2348,7 @@
             previousSampleSize = sampleSize;
         }
         ALOGV("%s timestampUs/lastTimestampUs: %lld/%lld",
-                mIsAudio? "Audio": "Video", timestampUs, lastTimestampUs);
+                trackName, timestampUs, lastTimestampUs);
         lastDurationUs = timestampUs - lastTimestampUs;
         lastDurationTicks = currDurationTicks;
         lastTimestampUs = timestampUs;
@@ -2407,7 +2453,7 @@
     sendTrackSummary(hasMultipleTracks);
 
     ALOGI("Received total/0-length (%d/%d) buffers and encoded %d frames. - %s",
-            count, nZeroLengthFrames, mStszTableEntries->count(), mIsAudio? "audio": "video");
+            count, nZeroLengthFrames, mStszTableEntries->count(), trackName);
     if (mIsAudio) {
         ALOGI("Audio track drift time: %lld us", mOwner->getDriftTimeUs());
     }
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index fe21296..601dccf 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -352,6 +352,20 @@
     return OK;
 }
 
+status_t MediaCodec::getInputFormat(sp<AMessage> *format) const {
+    sp<AMessage> msg = new AMessage(kWhatGetInputFormat, id());
+
+    sp<AMessage> response;
+    status_t err;
+    if ((err = PostAndAwaitResponse(msg, &response)) != OK) {
+        return err;
+    }
+
+    CHECK(response->findMessage("format", format));
+
+    return OK;
+}
+
 status_t MediaCodec::getName(AString *name) const {
     sp<AMessage> msg = new AMessage(kWhatGetName, id());
 
@@ -589,6 +603,7 @@
                             postActivityNotificationIfPossible();
 
                             cancelPendingDequeueOperations();
+                            setState(UNINITIALIZED);
                             break;
                         }
 
@@ -598,6 +613,7 @@
 
                             mFlags |= kFlagStickyError;
                             postActivityNotificationIfPossible();
+                            setState(UNINITIALIZED);
                             break;
                         }
                     }
@@ -642,6 +658,9 @@
                     // reset input surface flag
                     mHaveInputSurface = false;
 
+                    CHECK(msg->findMessage("input-format", &mInputFormat));
+                    CHECK(msg->findMessage("output-format", &mOutputFormat));
+
                     (new AMessage)->postReply(mReplyID);
                     break;
                 }
@@ -1330,14 +1349,19 @@
             break;
         }
 
+        case kWhatGetInputFormat:
         case kWhatGetOutputFormat:
         {
+            sp<AMessage> format =
+                (msg->what() == kWhatGetOutputFormat ? mOutputFormat : mInputFormat);
+
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if ((mState != STARTED && mState != FLUSHING)
+            if ((mState != CONFIGURED && mState != STARTING &&
+                 mState != STARTED && mState != FLUSHING)
                     || (mFlags & kFlagStickyError)
-                    || mOutputFormat == NULL) {
+                    || format == NULL) {
                 sp<AMessage> response = new AMessage;
                 response->setInt32("err", INVALID_OPERATION);
 
@@ -1346,7 +1370,7 @@
             }
 
             sp<AMessage> response = new AMessage;
-            response->setMessage("format", mOutputFormat);
+            response->setMessage("format", format);
             response->postReply(replyID);
             break;
         }
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 6248e90..8a451c8 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -48,22 +48,43 @@
 
 MediaCodecList::MediaCodecList()
     : mInitCheck(NO_INIT) {
-    FILE *file = fopen("/etc/media_codecs.xml", "r");
+    parseTopLevelXMLFile("/etc/media_codecs.xml");
+}
 
-    if (file == NULL) {
-        ALOGW("unable to open media codecs configuration xml file.");
+void MediaCodecList::parseTopLevelXMLFile(const char *codecs_xml) {
+    // get href_base
+    char *href_base_end = strrchr(codecs_xml, '/');
+    if (href_base_end != NULL) {
+        mHrefBase = AString(codecs_xml, href_base_end - codecs_xml + 1);
+    }
+
+    mInitCheck = OK;
+    mCurrentSection = SECTION_TOPLEVEL;
+    mDepth = 0;
+
+    parseXMLFile(codecs_xml);
+
+    if (mInitCheck != OK) {
+        mCodecInfos.clear();
+        mCodecQuirks.clear();
         return;
     }
 
-    parseXMLFile(file);
+    // These are currently still used by the video editing suite.
+    addMediaCodec(true /* encoder */, "AACEncoder", "audio/mp4a-latm");
+    addMediaCodec(
+            false /* encoder */, "OMX.google.raw.decoder", "audio/raw");
 
-    if (mInitCheck == OK) {
-        // These are currently still used by the video editing suite.
+    for (size_t i = mCodecInfos.size(); i-- > 0;) {
+        CodecInfo *info = &mCodecInfos.editItemAt(i);
 
-        addMediaCodec(true /* encoder */, "AACEncoder", "audio/mp4a-latm");
+        if (info->mTypes == 0) {
+            // No types supported by this component???
+            ALOGW("Component %s does not support any type of media?",
+                  info->mName.c_str());
 
-        addMediaCodec(
-                false /* encoder */, "OMX.google.raw.decoder", "audio/raw");
+            mCodecInfos.removeAt(i);
+        }
     }
 
 #if 0
@@ -84,9 +105,6 @@
         ALOGI("%s", line.c_str());
     }
 #endif
-
-    fclose(file);
-    file = NULL;
 }
 
 MediaCodecList::~MediaCodecList() {
@@ -96,10 +114,14 @@
     return mInitCheck;
 }
 
-void MediaCodecList::parseXMLFile(FILE *file) {
-    mInitCheck = OK;
-    mCurrentSection = SECTION_TOPLEVEL;
-    mDepth = 0;
+void MediaCodecList::parseXMLFile(const char *path) {
+    FILE *file = fopen(path, "r");
+
+    if (file == NULL) {
+        ALOGW("unable to open media codecs configuration xml file: %s", path);
+        mInitCheck = NAME_NOT_FOUND;
+        return;
+    }
 
     XML_Parser parser = ::XML_ParserCreate(NULL);
     CHECK(parser != NULL);
@@ -112,7 +134,7 @@
     while (mInitCheck == OK) {
         void *buff = ::XML_GetBuffer(parser, BUFF_SIZE);
         if (buff == NULL) {
-            ALOGE("failed to in call to XML_GetBuffer()");
+            ALOGE("failed in call to XML_GetBuffer()");
             mInitCheck = UNKNOWN_ERROR;
             break;
         }
@@ -124,8 +146,9 @@
             break;
         }
 
-        if (::XML_ParseBuffer(parser, bytes_read, bytes_read == 0)
-                != XML_STATUS_OK) {
+        XML_Status status = ::XML_ParseBuffer(parser, bytes_read, bytes_read == 0);
+        if (status != XML_STATUS_OK) {
+            ALOGE("malformed (%s)", ::XML_ErrorString(::XML_GetErrorCode(parser)));
             mInitCheck = ERROR_MALFORMED;
             break;
         }
@@ -137,25 +160,8 @@
 
     ::XML_ParserFree(parser);
 
-    if (mInitCheck == OK) {
-        for (size_t i = mCodecInfos.size(); i-- > 0;) {
-            CodecInfo *info = &mCodecInfos.editItemAt(i);
-
-            if (info->mTypes == 0) {
-                // No types supported by this component???
-
-                ALOGW("Component %s does not support any type of media?",
-                      info->mName.c_str());
-
-                mCodecInfos.removeAt(i);
-            }
-        }
-    }
-
-    if (mInitCheck != OK) {
-        mCodecInfos.clear();
-        mCodecQuirks.clear();
-    }
+    fclose(file);
+    file = NULL;
 }
 
 // static
@@ -169,12 +175,63 @@
     static_cast<MediaCodecList *>(me)->endElementHandler(name);
 }
 
+status_t MediaCodecList::includeXMLFile(const char **attrs) {
+    const char *href = NULL;
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "href")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            href = attrs[i + 1];
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+        ++i;
+    }
+
+    // For security reasons and for simplicity, file names can only contain
+    // [a-zA-Z0-9_.] and must start with  media_codecs_ and end with .xml
+    for (i = 0; href[i] != '\0'; i++) {
+        if (href[i] == '.' || href[i] == '_' ||
+                (href[i] >= '0' && href[i] <= '9') ||
+                (href[i] >= 'A' && href[i] <= 'Z') ||
+                (href[i] >= 'a' && href[i] <= 'z')) {
+            continue;
+        }
+        ALOGE("invalid include file name: %s", href);
+        return -EINVAL;
+    }
+
+    AString filename = href;
+    if (!filename.startsWith("media_codecs_") ||
+        !filename.endsWith(".xml")) {
+        ALOGE("invalid include file name: %s", href);
+        return -EINVAL;
+    }
+    filename.insert(mHrefBase, 0);
+
+    parseXMLFile(filename.c_str());
+    return mInitCheck;
+}
+
 void MediaCodecList::startElementHandler(
         const char *name, const char **attrs) {
     if (mInitCheck != OK) {
         return;
     }
 
+    if (!strcmp(name, "Include")) {
+        mInitCheck = includeXMLFile(attrs);
+        if (mInitCheck == OK) {
+            mPastSections.push(mCurrentSection);
+            mCurrentSection = SECTION_INCLUDE;
+        }
+        ++mDepth;
+        return;
+    }
+
     switch (mCurrentSection) {
         case SECTION_TOPLEVEL:
         {
@@ -264,6 +321,15 @@
             break;
         }
 
+        case SECTION_INCLUDE:
+        {
+            if (!strcmp(name, "Include") && mPastSections.size() > 0) {
+                mCurrentSection = mPastSections.top();
+                mPastSections.pop();
+            }
+            break;
+        }
+
         default:
             break;
     }
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
new file mode 100644
index 0000000..924173c
--- /dev/null
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -0,0 +1,881 @@
+/*
+ * Copyright 2014, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecSource"
+#define DEBUG_DRIFT_TIME 0
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <media/ICrypto.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaCodecSource.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+static void ReleaseMediaBufferReference(const sp<ABuffer> &accessUnit) {
+    void *mbuf;
+    if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf)
+            && mbuf != NULL) {
+        ALOGV("releasing mbuf %p", mbuf);
+
+        accessUnit->meta()->setPointer("mediaBuffer", NULL);
+
+        static_cast<MediaBuffer *>(mbuf)->release();
+        mbuf = NULL;
+    }
+}
+
+struct MediaCodecSource::Puller : public AHandler {
+    Puller(const sp<MediaSource> &source);
+
+    status_t start(const sp<MetaData> &meta, const sp<AMessage> &notify);
+    void stopAsync();
+
+    void pause();
+    void resume();
+
+protected:
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+    virtual ~Puller();
+
+private:
+    enum {
+        kWhatStart = 'msta',
+        kWhatStop,
+        kWhatPull,
+        kWhatPause,
+        kWhatResume,
+    };
+
+    sp<MediaSource> mSource;
+    sp<AMessage> mNotify;
+    sp<ALooper> mLooper;
+    int32_t mPullGeneration;
+    bool mIsAudio;
+    bool mPaused;
+    bool mReachedEOS;
+
+    status_t postSynchronouslyAndReturnError(const sp<AMessage> &msg);
+    void schedulePull();
+    void handleEOS();
+
+    DISALLOW_EVIL_CONSTRUCTORS(Puller);
+};
+
+MediaCodecSource::Puller::Puller(const sp<MediaSource> &source)
+    : mSource(source),
+      mLooper(new ALooper()),
+      mPullGeneration(0),
+      mIsAudio(false),
+      mPaused(false),
+      mReachedEOS(false) {
+    sp<MetaData> meta = source->getFormat();
+    const char *mime;
+    CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+    mIsAudio = !strncasecmp(mime, "audio/", 6);
+
+    mLooper->setName("pull_looper");
+}
+
+MediaCodecSource::Puller::~Puller() {
+    mLooper->unregisterHandler(id());
+    mLooper->stop();
+}
+
+status_t MediaCodecSource::Puller::postSynchronouslyAndReturnError(
+        const sp<AMessage> &msg) {
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (!response->findInt32("err", &err)) {
+        err = OK;
+    }
+
+    return err;
+}
+
+status_t MediaCodecSource::Puller::start(const sp<MetaData> &meta,
+        const sp<AMessage> &notify) {
+    ALOGV("puller (%s) start", mIsAudio ? "audio" : "video");
+    mLooper->start(
+            false /* runOnCallingThread */,
+            false /* canCallJava */,
+            PRIORITY_AUDIO);
+    mLooper->registerHandler(this);
+    mNotify = notify;
+
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    msg->setObject("meta", meta);
+    return postSynchronouslyAndReturnError(msg);
+}
+
+void MediaCodecSource::Puller::stopAsync() {
+    ALOGV("puller (%s) stopAsync", mIsAudio ? "audio" : "video");
+    (new AMessage(kWhatStop, id()))->post();
+}
+
+void MediaCodecSource::Puller::pause() {
+    (new AMessage(kWhatPause, id()))->post();
+}
+
+void MediaCodecSource::Puller::resume() {
+    (new AMessage(kWhatResume, id()))->post();
+}
+
+void MediaCodecSource::Puller::schedulePull() {
+    sp<AMessage> msg = new AMessage(kWhatPull, id());
+    msg->setInt32("generation", mPullGeneration);
+    msg->post();
+}
+
+void MediaCodecSource::Puller::handleEOS() {
+    if (!mReachedEOS) {
+        ALOGV("puller (%s) posting EOS", mIsAudio ? "audio" : "video");
+        mReachedEOS = true;
+        sp<AMessage> notify = mNotify->dup();
+        notify->setPointer("accessUnit", NULL);
+        notify->post();
+    }
+}
+
+void MediaCodecSource::Puller::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatStart:
+        {
+            sp<RefBase> obj;
+            CHECK(msg->findObject("meta", &obj));
+
+            mReachedEOS = false;
+
+            status_t err = mSource->start(static_cast<MetaData *>(obj.get()));
+
+            if (err == OK) {
+                schedulePull();
+            }
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatStop:
+        {
+            ALOGV("source (%s) stopping", mIsAudio ? "audio" : "video");
+            mSource->stop();
+            ALOGV("source (%s) stopped", mIsAudio ? "audio" : "video");
+            ++mPullGeneration;
+
+            handleEOS();
+            break;
+        }
+
+        case kWhatPull:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mPullGeneration) {
+                break;
+            }
+
+            MediaBuffer *mbuf;
+            status_t err = mSource->read(&mbuf);
+
+            if (mPaused) {
+                if (err == OK) {
+                    mbuf->release();
+                    mbuf = NULL;
+                }
+
+                msg->post();
+                break;
+            }
+
+            if (err != OK) {
+                if (err == ERROR_END_OF_STREAM) {
+                    ALOGV("stream ended, mbuf %p", mbuf);
+                } else {
+                    ALOGE("error %d reading stream.", err);
+                }
+                handleEOS();
+            } else {
+                sp<AMessage> notify = mNotify->dup();
+
+                notify->setPointer("accessUnit", mbuf);
+                notify->post();
+
+                msg->post();
+            }
+            break;
+        }
+
+        case kWhatPause:
+        {
+            mPaused = true;
+            break;
+        }
+
+        case kWhatResume:
+        {
+            mPaused = false;
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+// static
+sp<MediaCodecSource> MediaCodecSource::Create(
+        const sp<ALooper> &looper,
+        const sp<AMessage> &format,
+        const sp<MediaSource> &source,
+        uint32_t flags) {
+    sp<MediaCodecSource> mediaSource =
+            new MediaCodecSource(looper, format, source, flags);
+
+    if (mediaSource->init() == OK) {
+        return mediaSource;
+    }
+    return NULL;
+}
+
+status_t MediaCodecSource::start(MetaData* params) {
+    sp<AMessage> msg = new AMessage(kWhatStart, mReflector->id());
+    msg->setObject("meta", params);
+    return postSynchronouslyAndReturnError(msg);
+}
+
+status_t MediaCodecSource::stop() {
+    sp<AMessage> msg = new AMessage(kWhatStop, mReflector->id());
+    return postSynchronouslyAndReturnError(msg);
+}
+
+status_t MediaCodecSource::pause() {
+    (new AMessage(kWhatPause, mReflector->id()))->post();
+    return OK;
+}
+
+sp<IGraphicBufferProducer> MediaCodecSource::getGraphicBufferProducer() {
+    CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
+    return mGraphicBufferProducer;
+}
+
+status_t MediaCodecSource::read(
+        MediaBuffer** buffer, const ReadOptions* /* options */) {
+    Mutex::Autolock autolock(mOutputBufferLock);
+
+    *buffer = NULL;
+    while (mOutputBufferQueue.size() == 0 && !mEncodedReachedEOS) {
+        mOutputBufferCond.wait(mOutputBufferLock);
+    }
+    if (!mEncodedReachedEOS) {
+        *buffer = *mOutputBufferQueue.begin();
+        mOutputBufferQueue.erase(mOutputBufferQueue.begin());
+        return OK;
+    }
+    return mErrorCode;
+}
+
+void MediaCodecSource::signalBufferReturned(MediaBuffer *buffer) {
+    buffer->setObserver(0);
+    buffer->release();
+}
+
+MediaCodecSource::MediaCodecSource(
+        const sp<ALooper> &looper,
+        const sp<AMessage> &outputFormat,
+        const sp<MediaSource> &source,
+        uint32_t flags)
+    : mLooper(looper),
+      mOutputFormat(outputFormat),
+      mMeta(new MetaData),
+      mFlags(flags),
+      mIsVideo(false),
+      mStarted(false),
+      mStopping(false),
+      mDoMoreWorkPending(false),
+      mPullerReachedEOS(false),
+      mFirstSampleTimeUs(-1ll),
+      mEncodedReachedEOS(false),
+      mErrorCode(OK) {
+    CHECK(mLooper != NULL);
+
+    AString mime;
+    CHECK(mOutputFormat->findString("mime", &mime));
+
+    if (!strncasecmp("video/", mime.c_str(), 6)) {
+        mIsVideo = true;
+    }
+
+    if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+        mPuller = new Puller(source);
+    }
+}
+
+MediaCodecSource::~MediaCodecSource() {
+    releaseEncoder();
+
+    mCodecLooper->stop();
+    mLooper->unregisterHandler(mReflector->id());
+}
+
+status_t MediaCodecSource::init() {
+    status_t err = initEncoder();
+
+    if (err != OK) {
+        releaseEncoder();
+    }
+
+    return err;
+}
+
+status_t MediaCodecSource::initEncoder() {
+    mReflector = new AHandlerReflector<MediaCodecSource>(this);
+    mLooper->registerHandler(mReflector);
+
+    mCodecLooper = new ALooper;
+    mCodecLooper->setName("codec_looper");
+    mCodecLooper->start();
+
+    if (mFlags & FLAG_USE_METADATA_INPUT) {
+        mOutputFormat->setInt32("store-metadata-in-buffers", 1);
+    }
+
+    if (mFlags & FLAG_USE_SURFACE_INPUT) {
+        mOutputFormat->setInt32("create-input-buffers-suspended", 1);
+    }
+
+    AString outputMIME;
+    CHECK(mOutputFormat->findString("mime", &outputMIME));
+
+    mEncoder = MediaCodec::CreateByType(
+            mCodecLooper, outputMIME.c_str(), true /* encoder */);
+
+    if (mEncoder == NULL) {
+        return NO_INIT;
+    }
+
+    ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());
+
+    status_t err = mEncoder->configure(
+                mOutputFormat,
+                NULL /* nativeWindow */,
+                NULL /* crypto */,
+                MediaCodec::CONFIGURE_FLAG_ENCODE);
+
+    if (err != OK) {
+        return err;
+    }
+
+    mEncoder->getOutputFormat(&mOutputFormat);
+    convertMessageToMetaData(mOutputFormat, mMeta);
+
+    if (mFlags & FLAG_USE_SURFACE_INPUT) {
+        CHECK(mIsVideo);
+
+        err = mEncoder->createInputSurface(&mGraphicBufferProducer);
+
+        if (err != OK) {
+            return err;
+        }
+    }
+
+    err = mEncoder->start();
+
+    if (err != OK) {
+        return err;
+    }
+
+    err = mEncoder->getInputBuffers(&mEncoderInputBuffers);
+
+    if (err != OK) {
+        return err;
+    }
+
+    err = mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+
+    if (err != OK) {
+        return err;
+    }
+
+    mEncodedReachedEOS = false;
+    mErrorCode = OK;
+
+    return OK;
+}
+
+void MediaCodecSource::releaseEncoder() {
+    if (mEncoder == NULL) {
+        return;
+    }
+
+    mEncoder->release();
+    mEncoder.clear();
+
+    while (!mInputBufferQueue.empty()) {
+        MediaBuffer *mbuf = *mInputBufferQueue.begin();
+        mInputBufferQueue.erase(mInputBufferQueue.begin());
+        if (mbuf != NULL) {
+            mbuf->release();
+        }
+    }
+
+    for (size_t i = 0; i < mEncoderInputBuffers.size(); ++i) {
+        sp<ABuffer> accessUnit = mEncoderInputBuffers.itemAt(i);
+        ReleaseMediaBufferReference(accessUnit);
+    }
+
+    mEncoderInputBuffers.clear();
+    mEncoderOutputBuffers.clear();
+}
+
+bool MediaCodecSource::reachedEOS() {
+    return mEncodedReachedEOS && ((mPuller == NULL) || mPullerReachedEOS);
+}
+
+status_t MediaCodecSource::postSynchronouslyAndReturnError(
+        const sp<AMessage> &msg) {
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (!response->findInt32("err", &err)) {
+        err = OK;
+    }
+
+    return err;
+}
+
+void MediaCodecSource::signalEOS(status_t err) {
+    if (!mEncodedReachedEOS) {
+        ALOGI("encoder (%s) reached EOS", mIsVideo ? "video" : "audio");
+        {
+            Mutex::Autolock autoLock(mOutputBufferLock);
+            // release all unread media buffers
+            for (List<MediaBuffer*>::iterator it = mOutputBufferQueue.begin();
+                    it != mOutputBufferQueue.end(); it++) {
+                (*it)->release();
+            }
+            mOutputBufferQueue.clear();
+            mEncodedReachedEOS = true;
+            mErrorCode = err;
+            mOutputBufferCond.signal();
+        }
+
+        releaseEncoder();
+    }
+    if (mStopping && reachedEOS()) {
+        ALOGI("MediaCodecSource (%s) fully stopped",
+                mIsVideo ? "video" : "audio");
+        // posting reply to everyone that's waiting
+        List<uint32_t>::iterator it;
+        for (it = mStopReplyIDQueue.begin();
+                it != mStopReplyIDQueue.end(); it++) {
+            (new AMessage)->postReply(*it);
+        }
+        mStopReplyIDQueue.clear();
+        mStopping = false;
+    }
+}
+
+void MediaCodecSource::suspend() {
+    CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
+    if (mEncoder != NULL) {
+        sp<AMessage> params = new AMessage;
+        params->setInt32("drop-input-frames", true);
+        mEncoder->setParameters(params);
+    }
+}
+
+void MediaCodecSource::resume(int64_t skipFramesBeforeUs) {
+    CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
+    if (mEncoder != NULL) {
+        sp<AMessage> params = new AMessage;
+        params->setInt32("drop-input-frames", false);
+        if (skipFramesBeforeUs > 0) {
+            params->setInt64("skip-frames-before", skipFramesBeforeUs);
+        }
+        mEncoder->setParameters(params);
+    }
+}
+
+void MediaCodecSource::scheduleDoMoreWork() {
+    if (mDoMoreWorkPending) {
+        return;
+    }
+
+    mDoMoreWorkPending = true;
+
+    if (mEncoderActivityNotify == NULL) {
+        mEncoderActivityNotify = new AMessage(
+                kWhatEncoderActivity, mReflector->id());
+    }
+    mEncoder->requestActivityNotification(mEncoderActivityNotify);
+}
+
+status_t MediaCodecSource::feedEncoderInputBuffers() {
+    while (!mInputBufferQueue.empty()
+            && !mAvailEncoderInputIndices.empty()) {
+        MediaBuffer* mbuf = *mInputBufferQueue.begin();
+        mInputBufferQueue.erase(mInputBufferQueue.begin());
+
+        size_t bufferIndex = *mAvailEncoderInputIndices.begin();
+        mAvailEncoderInputIndices.erase(mAvailEncoderInputIndices.begin());
+
+        int64_t timeUs = 0ll;
+        uint32_t flags = 0;
+        size_t size = 0;
+
+        if (mbuf != NULL) {
+            CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
+
+            // push decoding time for video, or drift time for audio
+            if (mIsVideo) {
+                mDecodingTimeQueue.push_back(timeUs);
+            } else {
+#if DEBUG_DRIFT_TIME
+                if (mFirstSampleTimeUs < 0ll) {
+                    mFirstSampleTimeUs = timeUs;
+                }
+
+                int64_t driftTimeUs = 0;
+                if (mbuf->meta_data()->findInt64(kKeyDriftTime, &driftTimeUs)
+                        && driftTimeUs) {
+                    driftTimeUs = timeUs - mFirstSampleTimeUs - driftTimeUs;
+                }
+                mDriftTimeQueue.push_back(driftTimeUs);
+#endif // DEBUG_DRIFT_TIME
+            }
+
+            size = mbuf->size();
+
+            memcpy(mEncoderInputBuffers.itemAt(bufferIndex)->data(),
+                   mbuf->data(), size);
+
+            if (mIsVideo) {
+                // video encoder will release MediaBuffer when done
+                // with underlying data.
+                mEncoderInputBuffers.itemAt(bufferIndex)->meta()
+                        ->setPointer("mediaBuffer", mbuf);
+            } else {
+                mbuf->release();
+            }
+        } else {
+            flags = MediaCodec::BUFFER_FLAG_EOS;
+        }
+
+        status_t err = mEncoder->queueInputBuffer(
+                bufferIndex, 0, size, timeUs, flags);
+
+        if (err != OK) {
+            return err;
+        }
+    }
+
+    return OK;
+}
+
+status_t MediaCodecSource::doMoreWork() {
+    status_t err;
+
+    if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+        for (;;) {
+            size_t bufferIndex;
+            err = mEncoder->dequeueInputBuffer(&bufferIndex);
+
+            if (err != OK) {
+                break;
+            }
+
+            mAvailEncoderInputIndices.push_back(bufferIndex);
+        }
+
+        feedEncoderInputBuffers();
+    }
+
+    for (;;) {
+        size_t bufferIndex;
+        size_t offset;
+        size_t size;
+        int64_t timeUs;
+        uint32_t flags;
+        native_handle_t* handle = NULL;
+        err = mEncoder->dequeueOutputBuffer(
+                &bufferIndex, &offset, &size, &timeUs, &flags);
+
+        if (err != OK) {
+            if (err == INFO_FORMAT_CHANGED) {
+                continue;
+            } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+                mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+                continue;
+            }
+
+            if (err == -EAGAIN) {
+                err = OK;
+            }
+            break;
+        }
+        if (!(flags & MediaCodec::BUFFER_FLAG_EOS)) {
+            sp<ABuffer> outbuf = mEncoderOutputBuffers.itemAt(bufferIndex);
+
+            MediaBuffer *mbuf = new MediaBuffer(outbuf->size());
+            memcpy(mbuf->data(), outbuf->data(), outbuf->size());
+
+            if (!(flags & MediaCodec::BUFFER_FLAG_CODECCONFIG)) {
+                if (mIsVideo) {
+                    int64_t decodingTimeUs;
+                    if (mFlags & FLAG_USE_SURFACE_INPUT) {
+                        // GraphicBufferSource is supposed to discard samples
+                        // queued before start, and offset timeUs by start time
+                        CHECK_GE(timeUs, 0ll);
+                        // TODO:
+                        // Decoding time for surface source is unavailable,
+                        // use presentation time for now. May need to move
+                        // this logic into MediaCodec.
+                        decodingTimeUs = timeUs;
+                    } else {
+                        CHECK(!mDecodingTimeQueue.empty());
+                        decodingTimeUs = *(mDecodingTimeQueue.begin());
+                        mDecodingTimeQueue.erase(mDecodingTimeQueue.begin());
+                    }
+                    mbuf->meta_data()->setInt64(kKeyDecodingTime, decodingTimeUs);
+
+                    ALOGV("[video] time %lld us (%.2f secs), dts/pts diff %lld",
+                            timeUs, timeUs / 1E6, decodingTimeUs - timeUs);
+                } else {
+                    int64_t driftTimeUs = 0;
+#if DEBUG_DRIFT_TIME
+                    CHECK(!mDriftTimeQueue.empty());
+                    driftTimeUs = *(mDriftTimeQueue.begin());
+                    mDriftTimeQueue.erase(mDriftTimeQueue.begin());
+                    mbuf->meta_data()->setInt64(kKeyDriftTime, driftTimeUs);
+#endif // DEBUG_DRIFT_TIME
+                    ALOGV("[audio] time %lld us (%.2f secs), drift %lld",
+                            timeUs, timeUs / 1E6, driftTimeUs);
+                }
+                mbuf->meta_data()->setInt64(kKeyTime, timeUs);
+            } else {
+                mbuf->meta_data()->setInt32(kKeyIsCodecConfig, true);
+            }
+            if (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME) {
+                mbuf->meta_data()->setInt32(kKeyIsSyncFrame, true);
+            }
+            mbuf->setObserver(this);
+            mbuf->add_ref();
+
+            {
+                Mutex::Autolock autoLock(mOutputBufferLock);
+                mOutputBufferQueue.push_back(mbuf);
+                mOutputBufferCond.signal();
+            }
+        }
+
+        mEncoder->releaseOutputBuffer(bufferIndex);
+
+        if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+            err = ERROR_END_OF_STREAM;
+            break;
+        }
+    }
+
+    return err;
+}
+
+status_t MediaCodecSource::onStart(MetaData *params) {
+    if (mStopping) {
+        ALOGE("Failed to start while we're stopping");
+        return INVALID_OPERATION;
+    }
+
+    if (mStarted) {
+        ALOGI("MediaCodecSource (%s) resuming", mIsVideo ? "video" : "audio");
+        if (mFlags & FLAG_USE_SURFACE_INPUT) {
+            resume();
+        } else {
+            CHECK(mPuller != NULL);
+            mPuller->resume();
+        }
+        return OK;
+    }
+
+    ALOGI("MediaCodecSource (%s) starting", mIsVideo ? "video" : "audio");
+
+    status_t err = OK;
+
+    if (mFlags & FLAG_USE_SURFACE_INPUT) {
+        int64_t startTimeUs;
+        if (!params || !params->findInt64(kKeyTime, &startTimeUs)) {
+            startTimeUs = -1ll;
+        }
+        resume(startTimeUs);
+        scheduleDoMoreWork();
+    } else {
+        CHECK(mPuller != NULL);
+        sp<AMessage> notify = new AMessage(
+                kWhatPullerNotify, mReflector->id());
+        err = mPuller->start(params, notify);
+        if (err != OK) {
+            mPullerReachedEOS = true;
+            return err;
+        }
+    }
+
+    ALOGI("MediaCodecSource (%s) started", mIsVideo ? "video" : "audio");
+
+    mStarted = true;
+    return OK;
+}
+
+void MediaCodecSource::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+    case kWhatPullerNotify:
+    {
+        MediaBuffer *mbuf;
+        CHECK(msg->findPointer("accessUnit", (void**)&mbuf));
+
+        if (mbuf == NULL) {
+            ALOGI("puller (%s) reached EOS",
+                    mIsVideo ? "video" : "audio");
+            mPullerReachedEOS = true;
+        }
+
+        if (mEncoder == NULL) {
+            ALOGV("got msg '%s' after encoder shutdown.",
+                  msg->debugString().c_str());
+
+            if (mbuf != NULL) {
+                mbuf->release();
+            } else {
+                signalEOS();
+            }
+            break;
+        }
+
+        mInputBufferQueue.push_back(mbuf);
+
+        feedEncoderInputBuffers();
+        scheduleDoMoreWork();
+
+        break;
+    }
+    case kWhatEncoderActivity:
+    {
+        mDoMoreWorkPending = false;
+
+        if (mEncoder == NULL) {
+            break;
+        }
+
+        status_t err = doMoreWork();
+
+        if (err == OK) {
+            scheduleDoMoreWork();
+        } else {
+            // reached EOS, or error
+            signalEOS(err);
+        }
+
+        break;
+    }
+    case kWhatStart:
+    {
+        uint32_t replyID;
+        CHECK(msg->senderAwaitsResponse(&replyID));
+
+        sp<RefBase> obj;
+        CHECK(msg->findObject("meta", &obj));
+        MetaData *params = static_cast<MetaData *>(obj.get());
+
+        sp<AMessage> response = new AMessage;
+        response->setInt32("err", onStart(params));
+        response->postReply(replyID);
+        break;
+    }
+    case kWhatStop:
+    {
+        ALOGI("MediaCodecSource (%s) stopping", mIsVideo ? "video" : "audio");
+
+        uint32_t replyID;
+        CHECK(msg->senderAwaitsResponse(&replyID));
+
+        if (reachedEOS()) {
+            // if we already reached EOS, reply and return now
+            ALOGI("MediaCodecSource (%s) already stopped",
+                    mIsVideo ? "video" : "audio");
+            (new AMessage)->postReply(replyID);
+            break;
+        }
+
+        mStopReplyIDQueue.push_back(replyID);
+        if (mStopping) {
+            // nothing to do if we're already stopping, reply will be posted
+            // to all when we're stopped.
+            break;
+        }
+
+        mStopping = true;
+
+        // if using surface, signal source EOS and wait for EOS to come back.
+        // otherwise, release encoder and post EOS if haven't done already
+        if (mFlags & FLAG_USE_SURFACE_INPUT) {
+            mEncoder->signalEndOfInputStream();
+        } else {
+            CHECK(mPuller != NULL);
+            mPuller->stopAsync();
+            signalEOS();
+        }
+        break;
+    }
+    case kWhatPause:
+    {
+        if (mFlags && FLAG_USE_SURFACE_INPUT) {
+            suspend();
+        } else {
+            CHECK(mPuller != NULL);
+            mPuller->pause();
+        }
+        break;
+    }
+    default:
+        TRESPASS();
+    }
+}
+
+} // namespace android
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index 340cba7..c670bb4 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -36,6 +36,7 @@
 const char *MEDIA_MIMETYPE_AUDIO_AAC = "audio/mp4a-latm";
 const char *MEDIA_MIMETYPE_AUDIO_QCELP = "audio/qcelp";
 const char *MEDIA_MIMETYPE_AUDIO_VORBIS = "audio/vorbis";
+const char *MEDIA_MIMETYPE_AUDIO_OPUS = "audio/opus";
 const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW = "audio/g711-alaw";
 const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW = "audio/g711-mlaw";
 const char *MEDIA_MIMETYPE_AUDIO_RAW = "audio/raw";
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index d87e910..90335ee 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -16,6 +16,9 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MediaMuxer"
+
+#include "webm/WebmWriter.h"
+
 #include <utils/Log.h>
 
 #include <media/stagefright/MediaMuxer.h>
@@ -36,19 +39,30 @@
 namespace android {
 
 MediaMuxer::MediaMuxer(const char *path, OutputFormat format)
-    : mState(UNINITIALIZED) {
+    : mFormat(format),
+      mState(UNINITIALIZED) {
     if (format == OUTPUT_FORMAT_MPEG_4) {
         mWriter = new MPEG4Writer(path);
+    } else if (format == OUTPUT_FORMAT_WEBM) {
+        mWriter = new WebmWriter(path);
+    }
+
+    if (mWriter != NULL) {
         mFileMeta = new MetaData;
         mState = INITIALIZED;
     }
-
 }
 
 MediaMuxer::MediaMuxer(int fd, OutputFormat format)
-    : mState(UNINITIALIZED) {
+    : mFormat(format),
+      mState(UNINITIALIZED) {
     if (format == OUTPUT_FORMAT_MPEG_4) {
         mWriter = new MPEG4Writer(fd);
+    } else if (format == OUTPUT_FORMAT_WEBM) {
+        mWriter = new WebmWriter(fd);
+    }
+
+    if (mWriter != NULL) {
         mFileMeta = new MetaData;
         mState = INITIALIZED;
     }
@@ -109,8 +123,13 @@
         ALOGE("setLocation() must be called before start().");
         return INVALID_OPERATION;
     }
+    if (mFormat != OUTPUT_FORMAT_MPEG_4) {
+        ALOGE("setLocation() is only supported for .mp4 output.");
+        return INVALID_OPERATION;
+    }
+
     ALOGV("Setting location: latitude = %d, longitude = %d", latitude, longitude);
-    return mWriter->setGeoData(latitude, longitude);
+    return static_cast<MPEG4Writer*>(mWriter.get())->setGeoData(latitude, longitude);
 }
 
 status_t MediaMuxer::start() {
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index 06e2d43..61cf0ad 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -213,7 +213,14 @@
 
     mLooper->setName("NuCachedSource2");
     mLooper->registerHandler(mReflector);
-    mLooper->start();
+
+    // Since it may not be obvious why our looper thread needs to be
+    // able to call into java since it doesn't appear to do so at all...
+    // IMediaHTTPConnection may be (and most likely is) implemented in JAVA
+    // and a local JAVA IBinder will call directly into JNI methods.
+    // So whenever we call DataSource::readAt it may end up in a call to
+    // IMediaHTTPConnection::readAt and therefore call back into JAVA.
+    mLooper->start(false /* runOnCallingThread */, true /* canCallJava */);
 
     Mutex::Autolock autoLock(mLock);
     (new AMessage(kWhatFetchMore, mReflector->id()))->post();
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 7bc7da2..64f56e9 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -58,7 +58,9 @@
 }
 
 status_t NuMediaExtractor::setDataSource(
-        const char *path, const KeyedVector<String8, String8> *headers) {
+        const sp<IMediaHTTPService> &httpService,
+        const char *path,
+        const KeyedVector<String8, String8> *headers) {
     Mutex::Autolock autoLock(mLock);
 
     if (mImpl != NULL) {
@@ -66,7 +68,7 @@
     }
 
     sp<DataSource> dataSource =
-        DataSource::CreateFromURI(path, headers);
+        DataSource::CreateFromURI(httpService, path, headers);
 
     if (dataSource == NULL) {
         return -ENOENT;
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 96c5a32..a879656 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -96,6 +96,7 @@
 
 #define CODEC_LOGI(x, ...) ALOGI("[%s] "x, mComponentName, ##__VA_ARGS__)
 #define CODEC_LOGV(x, ...) ALOGV("[%s] "x, mComponentName, ##__VA_ARGS__)
+#define CODEC_LOGW(x, ...) ALOGW("[%s] "x, mComponentName, ##__VA_ARGS__)
 #define CODEC_LOGE(x, ...) ALOGE("[%s] "x, mComponentName, ##__VA_ARGS__)
 
 struct OMXCodecObserver : public BnOMXObserver {
@@ -491,6 +492,13 @@
 
             CHECK(meta->findData(kKeyVorbisBooks, &type, &data, &size));
             addCodecSpecificData(data, size);
+        } else if (meta->findData(kKeyOpusHeader, &type, &data, &size)) {
+            addCodecSpecificData(data, size);
+
+            CHECK(meta->findData(kKeyOpusCodecDelay, &type, &data, &size));
+            addCodecSpecificData(data, size);
+            CHECK(meta->findData(kKeyOpusSeekPreRoll, &type, &data, &size));
+            addCodecSpecificData(data, size);
         }
     }
 
@@ -1389,6 +1397,8 @@
             "audio_decoder.aac", "audio_encoder.aac" },
         { MEDIA_MIMETYPE_AUDIO_VORBIS,
             "audio_decoder.vorbis", "audio_encoder.vorbis" },
+        { MEDIA_MIMETYPE_AUDIO_OPUS,
+            "audio_decoder.opus", "audio_encoder.opus" },
         { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
             "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
         { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
@@ -1796,21 +1806,42 @@
                 strerror(-err), -err);
         return err;
     }
+    // FIXME: assume that surface is controlled by app (native window
+    // returns the number for the case when surface is not controlled by app)
+    // FIXME2: This means that minUndeqeueudBufs can be 1 larger than reported
+    // For now, try to allocate 1 more buffer, but don't fail if unsuccessful
 
-    // XXX: Is this the right logic to use?  It's not clear to me what the OMX
-    // buffer counts refer to - how do they account for the renderer holding on
-    // to buffers?
-    if (def.nBufferCountActual < def.nBufferCountMin + minUndequeuedBufs) {
-        OMX_U32 newBufferCount = def.nBufferCountMin + minUndequeuedBufs;
+    // Use conservative allocation while also trying to reduce starvation
+    //
+    // 1. allocate at least nBufferCountMin + minUndequeuedBuffers - that is the
+    //    minimum needed for the consumer to be able to work
+    // 2. try to allocate two (2) additional buffers to reduce starvation from
+    //    the consumer
+    //    plus an extra buffer to account for incorrect minUndequeuedBufs
+    CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
+            def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
+
+    for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
+        OMX_U32 newBufferCount =
+            def.nBufferCountMin + minUndequeuedBufs + extraBuffers;
         def.nBufferCountActual = newBufferCount;
         err = mOMX->setParameter(
                 mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-        if (err != OK) {
-            CODEC_LOGE("setting nBufferCountActual to %lu failed: %d",
-                    newBufferCount, err);
+
+        if (err == OK) {
+            minUndequeuedBufs += extraBuffers;
+            break;
+        }
+
+        CODEC_LOGW("setting nBufferCountActual to %lu failed: %d",
+                newBufferCount, err);
+        /* exit condition */
+        if (extraBuffers == 0) {
             return err;
         }
     }
+    CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
+            def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
 
     err = native_window_set_buffer_count(
             mNativeWindow.get(), def.nBufferCountActual);
@@ -4127,6 +4158,7 @@
         "OMX_AUDIO_CodingMP3",
         "OMX_AUDIO_CodingSBC",
         "OMX_AUDIO_CodingVORBIS",
+        "OMX_AUDIO_CodingOPUS",
         "OMX_AUDIO_CodingWMA",
         "OMX_AUDIO_CodingRA",
         "OMX_AUDIO_CodingMIDI",
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index 773854f..e2e6d79 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -25,7 +25,7 @@
 namespace android {
 
 SkipCutBuffer::SkipCutBuffer(int32_t skip, int32_t cut) {
-    mFrontPadding = skip;
+    mFrontPadding = mSkip = skip;
     mBackPadding = cut;
     mWriteHead = 0;
     mReadHead = 0;
@@ -94,6 +94,7 @@
 
 void SkipCutBuffer::clear() {
     mWriteHead = mReadHead = 0;
+    mFrontPadding = mSkip;
 }
 
 void SkipCutBuffer::write(const char *src, size_t num) {
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index 2b51a29..fe20835 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -24,6 +24,7 @@
 
 #include <media/stagefright/StagefrightMediaScanner.h>
 
+#include <media/IMediaHTTPService.h>
 #include <media/mediametadataretriever.h>
 #include <private/media/VideoFrame.h>
 
@@ -147,7 +148,7 @@
     status_t status;
     if (fd < 0) {
         // couldn't open it locally, maybe the media server can?
-        status = mRetriever->setDataSource(path);
+        status = mRetriever->setDataSource(NULL /* httpService */, path);
     } else {
         status = mRetriever->setDataSource(fd, 0, 0x7ffffffffffffffL);
         close(fd);
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index fcd9a85..9475d05 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -21,6 +21,7 @@
 
 #include "include/StagefrightMetadataRetriever.h"
 
+#include <media/IMediaHTTPService.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/ColorConverter.h>
 #include <media/stagefright/DataSource.h>
@@ -51,7 +52,9 @@
 }
 
 status_t StagefrightMetadataRetriever::setDataSource(
-        const char *uri, const KeyedVector<String8, String8> *headers) {
+        const sp<IMediaHTTPService> &httpService,
+        const char *uri,
+        const KeyedVector<String8, String8> *headers) {
     ALOGV("setDataSource(%s)", uri);
 
     mParsedMetaData = false;
@@ -59,7 +62,7 @@
     delete mAlbumArt;
     mAlbumArt = NULL;
 
-    mSource = DataSource::CreateFromURI(uri, headers);
+    mSource = DataSource::CreateFromURI(httpService, uri, headers);
 
     if (mSource == NULL) {
         ALOGE("Unable to create data source for '%s'.", uri);
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 686d03a..62aea36 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -54,9 +54,9 @@
         ALOGE("Invalid dimensions %dx%d", bufferWidth, bufferHeight);
     }
 
-    mBufferQueue = new BufferQueue();
-    mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight);
-    mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
+    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+    mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
+    mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
             GRALLOC_USAGE_HW_TEXTURE);
 
     sp<ISurfaceComposer> composer(ComposerService::getComposerService());
@@ -68,7 +68,7 @@
     wp<ConsumerListener> listener = static_cast<ConsumerListener*>(this);
     sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
 
-    status_t err = mBufferQueue->consumerConnect(proxy, false);
+    status_t err = mConsumer->consumerConnect(proxy, false);
     if (err != NO_ERROR) {
         ALOGE("SurfaceMediaSource: error connecting to BufferQueue: %s (%d)",
                 strerror(-err), err);
@@ -108,7 +108,7 @@
     Mutex::Autolock lock(mMutex);
 
     result.append(buffer);
-    mBufferQueue->dump(result, "");
+    mConsumer->dump(result, "");
 }
 
 status_t SurfaceMediaSource::setFrameRate(int32_t fps)
@@ -166,7 +166,7 @@
     CHECK_GT(mMaxAcquiredBufferCount, 1);
 
     status_t err =
-        mBufferQueue->setMaxAcquiredBufferCount(mMaxAcquiredBufferCount);
+        mConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBufferCount);
 
     if (err != OK) {
         return err;
@@ -205,6 +205,9 @@
         return OK;
     }
 
+    mStarted = false;
+    mFrameAvailableCondition.signal();
+
     while (mNumPendingBuffers > 0) {
         ALOGI("Still waiting for %d buffers to be returned.",
                 mNumPendingBuffers);
@@ -218,11 +221,9 @@
         mMediaBuffersAvailableCondition.wait(mMutex);
     }
 
-    mStarted = false;
-    mFrameAvailableCondition.signal();
     mMediaBuffersAvailableCondition.signal();
 
-    return mBufferQueue->consumerDisconnect();
+    return mConsumer->consumerDisconnect();
 }
 
 sp<MetaData> SurfaceMediaSource::getFormat()
@@ -292,7 +293,7 @@
     // wait here till the frames come in from the client side
     while (mStarted) {
 
-        status_t err = mBufferQueue->acquireBuffer(&item, 0);
+        status_t err = mConsumer->acquireBuffer(&item, 0);
         if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
             // wait for a buffer to be queued
             mFrameAvailableCondition.wait(mMutex);
@@ -315,7 +316,7 @@
                 if (mStartTimeNs > 0) {
                     if (item.mTimestamp < mStartTimeNs) {
                         // This frame predates start of record, discard
-                        mBufferQueue->releaseBuffer(
+                        mConsumer->releaseBuffer(
                                 item.mBuf, item.mFrameNumber, EGL_NO_DISPLAY,
                                 EGL_NO_SYNC_KHR, Fence::NO_FENCE);
                         continue;
@@ -415,7 +416,7 @@
             ALOGV("Slot %d returned, matches handle = %p", id,
                     mSlots[id].mGraphicBuffer->handle);
 
-            mBufferQueue->releaseBuffer(id, mSlots[id].mFrameNumber,
+            mConsumer->releaseBuffer(id, mSlots[id].mFrameNumber,
                                         EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
                     Fence::NO_FENCE);
 
@@ -476,4 +477,8 @@
     }
 }
 
+void SurfaceMediaSource::onSidebandStreamChanged() {
+    ALOG_ASSERT(false, "SurfaceMediaSource can't consume sideband streams");
+}
+
 } // end of namespace android
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 216a329..047fac7 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -17,6 +17,7 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "Utils"
 #include <utils/Log.h>
+#include <ctype.h>
 
 #include "include/ESDS.h"
 
@@ -251,6 +252,13 @@
         buffer->meta()->setInt32("csd", true);
         buffer->meta()->setInt64("timeUs", 0);
         msg->setBuffer("csd-1", buffer);
+    } else if (meta->findData(kKeyOpusHeader, &type, &data, &size)) {
+        sp<ABuffer> buffer = new ABuffer(size);
+        memcpy(buffer->data(), data, size);
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+        msg->setBuffer("csd-0", buffer);
     }
 
     *format = msg;
@@ -452,6 +460,11 @@
         }
     }
 
+    int32_t timeScale;
+    if (msg->findInt32("time-scale", &timeScale)) {
+        meta->setInt32(kKeyTimeScale, timeScale);
+    }
+
     // XXX TODO add whatever other keys there are
 
 #if 0
@@ -523,6 +536,7 @@
     { MEDIA_MIMETYPE_AUDIO_AMR_WB,      AUDIO_FORMAT_AMR_WB },
     { MEDIA_MIMETYPE_AUDIO_AAC,         AUDIO_FORMAT_AAC },
     { MEDIA_MIMETYPE_AUDIO_VORBIS,      AUDIO_FORMAT_VORBIS },
+    { MEDIA_MIMETYPE_AUDIO_OPUS,        AUDIO_FORMAT_OPUS},
     { 0, AUDIO_FORMAT_INVALID }
 };
 
@@ -615,5 +629,40 @@
     return AudioSystem::isOffloadSupported(info);
 }
 
+AString uriDebugString(const AString &uri, bool incognito) {
+    if (incognito) {
+        return AString("<URI suppressed>");
+    }
+
+    char prop[PROPERTY_VALUE_MAX];
+    if (property_get("media.stagefright.log-uri", prop, "false") &&
+        (!strcmp(prop, "1") || !strcmp(prop, "true"))) {
+        return uri;
+    }
+
+    // find scheme
+    AString scheme;
+    const char *chars = uri.c_str();
+    for (size_t i = 0; i < uri.size(); i++) {
+        const char c = chars[i];
+        if (!isascii(c)) {
+            break;
+        } else if (isalpha(c)) {
+            continue;
+        } else if (i == 0) {
+            // first character must be a letter
+            break;
+        } else if (isdigit(c) || c == '+' || c == '.' || c =='-') {
+            continue;
+        } else if (c != ':') {
+            break;
+        }
+        scheme = AString(uri, 0, i);
+        scheme.append("://<suppressed>");
+        return scheme;
+    }
+    return AString("<no-scheme URI suppressed>");
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index c6ac0da..38a1f6b 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -40,6 +40,25 @@
     return x + (1u << numZeroes) - 1;
 }
 
+signed parseSE(ABitReader *br) {
+    unsigned codeNum = parseUE(br);
+
+    return (codeNum & 1) ? (codeNum + 1) / 2 : -(codeNum / 2);
+}
+
+static void skipScalingList(ABitReader *br, size_t sizeOfScalingList) {
+    size_t lastScale = 8;
+    size_t nextScale = 8;
+    for (size_t j = 0; j < sizeOfScalingList; ++j) {
+        if (nextScale != 0) {
+            signed delta_scale = parseSE(br);
+            nextScale = (lastScale + delta_scale + 256) % 256;
+        }
+
+        lastScale = (nextScale == 0) ? lastScale : nextScale;
+    }
+}
+
 // Determine video dimensions from the sequence parameterset.
 void FindAVCDimensions(
         const sp<ABuffer> &seqParamSet,
@@ -63,7 +82,24 @@
         parseUE(&br);  // bit_depth_luma_minus8
         parseUE(&br);  // bit_depth_chroma_minus8
         br.skipBits(1);  // qpprime_y_zero_transform_bypass_flag
-        CHECK_EQ(br.getBits(1), 0u);  // seq_scaling_matrix_present_flag
+
+        if (br.getBits(1)) {  // seq_scaling_matrix_present_flag
+            for (size_t i = 0; i < 8; ++i) {
+                if (br.getBits(1)) {  // seq_scaling_list_present_flag[i]
+
+                    // WARNING: the code below has not ever been exercised...
+                    // need a real-world example.
+
+                    if (i < 6) {
+                        // ScalingList4x4[i],16,...
+                        skipScalingList(&br, 16);
+                    } else {
+                        // ScalingList8x8[i-6],64,...
+                        skipScalingList(&br, 64);
+                    }
+                }
+            }
+        }
     }
 
     parseUE(&br);  // log2_max_frame_num_minus4
diff --git a/media/libstagefright/chromium_http/Android.mk b/media/libstagefright/chromium_http/Android.mk
deleted file mode 100644
index 109e3fe..0000000
--- a/media/libstagefright/chromium_http/Android.mk
+++ /dev/null
@@ -1,39 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-ifneq ($(TARGET_BUILD_PDK), true)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=       \
-        DataUriSource.cpp \
-        ChromiumHTTPDataSource.cpp \
-        support.cpp \
-        chromium_http_stub.cpp
-
-LOCAL_C_INCLUDES:= \
-        $(TOP)/frameworks/av/media/libstagefright \
-        $(TOP)/frameworks/native/include/media/openmax \
-        external/chromium \
-        external/chromium/android
-
-LOCAL_CFLAGS += -Wno-multichar
-
-LOCAL_SHARED_LIBRARIES += \
-        libbinder \
-        libstlport \
-        libchromium_net \
-        libutils \
-        libbinder \
-        libcutils \
-        liblog \
-        libstagefright_foundation \
-        libstagefright \
-        libdrmframework
-
-include external/stlport/libstlport.mk
-
-LOCAL_MODULE:= libstagefright_chromium_http
-
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_SHARED_LIBRARY)
-endif
diff --git a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
deleted file mode 100644
index 7e5c280..0000000
--- a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ChromiumHTTPDataSource"
-#include <media/stagefright/foundation/ADebug.h>
-
-#include "include/ChromiumHTTPDataSource.h"
-
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/MediaErrors.h>
-
-#include "support.h"
-
-#include <cutils/properties.h> // for property_get
-
-namespace android {
-
-ChromiumHTTPDataSource::ChromiumHTTPDataSource(uint32_t flags)
-    : mFlags(flags),
-      mState(DISCONNECTED),
-      mDelegate(new SfDelegate),
-      mCurrentOffset(0),
-      mIOResult(OK),
-      mContentSize(-1),
-      mDecryptHandle(NULL),
-      mDrmManagerClient(NULL) {
-    mDelegate->setOwner(this);
-}
-
-ChromiumHTTPDataSource::~ChromiumHTTPDataSource() {
-    disconnect();
-
-    delete mDelegate;
-    mDelegate = NULL;
-
-    clearDRMState_l();
-
-    if (mDrmManagerClient != NULL) {
-        delete mDrmManagerClient;
-        mDrmManagerClient = NULL;
-    }
-}
-
-status_t ChromiumHTTPDataSource::connect(
-        const char *uri,
-        const KeyedVector<String8, String8> *headers,
-        off64_t offset) {
-    Mutex::Autolock autoLock(mLock);
-
-    uid_t uid;
-    if (getUID(&uid)) {
-        mDelegate->setUID(uid);
-    }
-
-#if defined(LOG_NDEBUG) && !LOG_NDEBUG
-    LOG_PRI(ANDROID_LOG_VERBOSE, LOG_TAG, "connect on behalf of uid %d", uid);
-#endif
-
-    return connect_l(uri, headers, offset);
-}
-
-status_t ChromiumHTTPDataSource::connect_l(
-        const char *uri,
-        const KeyedVector<String8, String8> *headers,
-        off64_t offset) {
-    if (mState != DISCONNECTED) {
-        disconnect_l();
-    }
-
-#if defined(LOG_NDEBUG) && !LOG_NDEBUG
-    LOG_PRI(ANDROID_LOG_VERBOSE, LOG_TAG,
-                "connect to <URL suppressed> @%lld", offset);
-#endif
-
-    mURI = uri;
-    mContentType = String8("application/octet-stream");
-
-    if (headers != NULL) {
-        mHeaders = *headers;
-    } else {
-        mHeaders.clear();
-    }
-
-    mState = CONNECTING;
-    mContentSize = -1;
-    mCurrentOffset = offset;
-
-    mDelegate->initiateConnection(mURI.c_str(), &mHeaders, offset);
-
-    while (mState == CONNECTING || mState == DISCONNECTING) {
-        mCondition.wait(mLock);
-    }
-
-    return mState == CONNECTED ? OK : mIOResult;
-}
-
-void ChromiumHTTPDataSource::onRedirect(const char *url) {
-    Mutex::Autolock autoLock(mLock);
-    mURI = url;
-}
-
-void ChromiumHTTPDataSource::onConnectionEstablished(
-        int64_t contentSize, const char *contentType) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mState != CONNECTING) {
-        // We may have initiated disconnection.
-        CHECK_EQ(mState, DISCONNECTING);
-        return;
-    }
-
-    mState = CONNECTED;
-    mContentSize = (contentSize < 0) ? -1 : contentSize + mCurrentOffset;
-    mContentType = String8(contentType);
-    mCondition.broadcast();
-}
-
-void ChromiumHTTPDataSource::onConnectionFailed(status_t err) {
-    Mutex::Autolock autoLock(mLock);
-    mState = DISCONNECTED;
-    mCondition.broadcast();
-
-    // mURI.clear();
-
-    mIOResult = err;
-}
-
-void ChromiumHTTPDataSource::disconnect() {
-    Mutex::Autolock autoLock(mLock);
-    disconnect_l();
-}
-
-void ChromiumHTTPDataSource::disconnect_l() {
-    if (mState == DISCONNECTED) {
-        return;
-    }
-
-    mState = DISCONNECTING;
-    mIOResult = -EINTR;
-
-    mDelegate->initiateDisconnect();
-
-    while (mState == DISCONNECTING) {
-        mCondition.wait(mLock);
-    }
-
-    CHECK_EQ((int)mState, (int)DISCONNECTED);
-}
-
-status_t ChromiumHTTPDataSource::initCheck() const {
-    Mutex::Autolock autoLock(mLock);
-
-    return mState == CONNECTED ? OK : NO_INIT;
-}
-
-ssize_t ChromiumHTTPDataSource::readAt(off64_t offset, void *data, size_t size) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mState != CONNECTED) {
-        return INVALID_OPERATION;
-    }
-
-#if 0
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get("media.stagefright.disable-net", value, 0)
-            && (!strcasecmp(value, "true") || !strcmp(value, "1"))) {
-        LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Simulating that the network is down.");
-        disconnect_l();
-        return ERROR_IO;
-    }
-#endif
-
-    if (offset != mCurrentOffset) {
-        AString tmp = mURI;
-        KeyedVector<String8, String8> tmpHeaders = mHeaders;
-
-        disconnect_l();
-
-        status_t err = connect_l(tmp.c_str(), &tmpHeaders, offset);
-
-        if (err != OK) {
-            return err;
-        }
-    }
-
-    mState = READING;
-
-    int64_t startTimeUs = ALooper::GetNowUs();
-
-    mDelegate->initiateRead(data, size);
-
-    while (mState == READING) {
-        mCondition.wait(mLock);
-    }
-
-    if (mIOResult < OK) {
-        return mIOResult;
-    }
-
-    if (mState == CONNECTED) {
-        int64_t delayUs = ALooper::GetNowUs() - startTimeUs;
-
-        // The read operation was successful, mIOResult contains
-        // the number of bytes read.
-        addBandwidthMeasurement(mIOResult, delayUs);
-
-        mCurrentOffset += mIOResult;
-        return mIOResult;
-    }
-
-    return ERROR_IO;
-}
-
-void ChromiumHTTPDataSource::onReadCompleted(ssize_t size) {
-    Mutex::Autolock autoLock(mLock);
-
-    mIOResult = size;
-
-    if (mState == READING) {
-        mState = CONNECTED;
-        mCondition.broadcast();
-    }
-}
-
-status_t ChromiumHTTPDataSource::getSize(off64_t *size) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mContentSize < 0) {
-        return ERROR_UNSUPPORTED;
-    }
-
-    *size = mContentSize;
-
-    return OK;
-}
-
-uint32_t ChromiumHTTPDataSource::flags() {
-    return kWantsPrefetching | kIsHTTPBasedSource;
-}
-
-// static
-void ChromiumHTTPDataSource::InitiateRead(
-        ChromiumHTTPDataSource *me, void *data, size_t size) {
-    me->initiateRead(data, size);
-}
-
-void ChromiumHTTPDataSource::initiateRead(void *data, size_t size) {
-    mDelegate->initiateRead(data, size);
-}
-
-void ChromiumHTTPDataSource::onDisconnectComplete() {
-    Mutex::Autolock autoLock(mLock);
-    CHECK_EQ((int)mState, (int)DISCONNECTING);
-
-    mState = DISCONNECTED;
-    // mURI.clear();
-    mIOResult = -ENOTCONN;
-
-    mCondition.broadcast();
-}
-
-sp<DecryptHandle> ChromiumHTTPDataSource::DrmInitialization(const char* mime) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mDrmManagerClient == NULL) {
-        mDrmManagerClient = new DrmManagerClient();
-    }
-
-    if (mDrmManagerClient == NULL) {
-        return NULL;
-    }
-
-    if (mDecryptHandle == NULL) {
-        /* Note if redirect occurs, mUri is the redirect uri instead of the
-         * original one
-         */
-        mDecryptHandle = mDrmManagerClient->openDecryptSession(
-                String8(mURI.c_str()), mime);
-    }
-
-    if (mDecryptHandle == NULL) {
-        delete mDrmManagerClient;
-        mDrmManagerClient = NULL;
-    }
-
-    return mDecryptHandle;
-}
-
-void ChromiumHTTPDataSource::getDrmInfo(
-        sp<DecryptHandle> &handle, DrmManagerClient **client) {
-    Mutex::Autolock autoLock(mLock);
-
-    handle = mDecryptHandle;
-    *client = mDrmManagerClient;
-}
-
-String8 ChromiumHTTPDataSource::getUri() {
-    Mutex::Autolock autoLock(mLock);
-
-    return String8(mURI.c_str());
-}
-
-String8 ChromiumHTTPDataSource::getMIMEType() const {
-    Mutex::Autolock autoLock(mLock);
-
-    return mContentType;
-}
-
-void ChromiumHTTPDataSource::clearDRMState_l() {
-    if (mDecryptHandle != NULL) {
-        // To release mDecryptHandle
-        CHECK(mDrmManagerClient);
-        mDrmManagerClient->closeDecryptSession(mDecryptHandle);
-        mDecryptHandle = NULL;
-    }
-}
-
-status_t ChromiumHTTPDataSource::reconnectAtOffset(off64_t offset) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mURI.empty()) {
-        return INVALID_OPERATION;
-    }
-
-    LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Reconnecting...");
-    status_t err = connect_l(mURI.c_str(), &mHeaders, offset);
-    if (err != OK) {
-        LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Reconnect failed w/ err 0x%08x", err);
-    }
-
-    return err;
-}
-
-// static
-status_t ChromiumHTTPDataSource::UpdateProxyConfig(
-        const char *host, int32_t port, const char *exclusionList) {
-    return SfDelegate::UpdateProxyConfig(host, port, exclusionList);
-}
-
-}  // namespace android
-
diff --git a/media/libstagefright/chromium_http/DataUriSource.cpp b/media/libstagefright/chromium_http/DataUriSource.cpp
deleted file mode 100644
index ecf3fa1..0000000
--- a/media/libstagefright/chromium_http/DataUriSource.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <include/DataUriSource.h>
-
-#include <net/base/data_url.h>
-#include <googleurl/src/gurl.h>
-
-
-namespace android {
-
-DataUriSource::DataUriSource(const char *uri) :
-    mDataUri(uri),
-    mInited(NO_INIT) {
-
-    // Copy1: const char *uri -> String8 mDataUri.
-    std::string mimeTypeStr, unusedCharsetStr, dataStr;
-    // Copy2: String8 mDataUri -> std::string
-    const bool ret = net::DataURL::Parse(
-            GURL(std::string(mDataUri.string())),
-            &mimeTypeStr, &unusedCharsetStr, &dataStr);
-    // Copy3: std::string dataStr -> AString mData
-    mData.setTo(dataStr.data(), dataStr.length());
-    mInited = ret ? OK : UNKNOWN_ERROR;
-
-    // The chromium data url implementation defaults to using "text/plain"
-    // if no mime type is specified. We prefer to leave this unspecified
-    // instead, since the mime type is sniffed in most cases.
-    if (mimeTypeStr != "text/plain") {
-        mMimeType = mimeTypeStr.c_str();
-    }
-}
-
-ssize_t DataUriSource::readAt(off64_t offset, void *out, size_t size) {
-    if (mInited != OK) {
-        return mInited;
-    }
-
-    const off64_t length = mData.size();
-    if (offset >= length) {
-        return UNKNOWN_ERROR;
-    }
-
-    const char *dataBuf = mData.c_str();
-    const size_t bytesToCopy =
-            offset + size >= length ? (length - offset) : size;
-
-    if (bytesToCopy > 0) {
-        memcpy(out, dataBuf + offset, bytesToCopy);
-    }
-
-    return bytesToCopy;
-}
-
-}  // namespace android
diff --git a/media/libstagefright/chromium_http/chromium_http_stub.cpp b/media/libstagefright/chromium_http/chromium_http_stub.cpp
deleted file mode 100644
index 289f6de..0000000
--- a/media/libstagefright/chromium_http/chromium_http_stub.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <dlfcn.h>
-
-#include <include/chromium_http_stub.h>
-#include <include/ChromiumHTTPDataSource.h>
-#include <include/DataUriSource.h>
-
-namespace android {
-
-HTTPBase *createChromiumHTTPDataSource(uint32_t flags) {
-    return new ChromiumHTTPDataSource(flags);
-}
-
-status_t UpdateChromiumHTTPDataSourceProxyConfig(
-        const char *host, int32_t port, const char *exclusionList) {
-    return ChromiumHTTPDataSource::UpdateProxyConfig(host, port, exclusionList);
-}
-
-DataSource *createDataUriSource(const char *uri) {
-    return new DataUriSource(uri);
-}
-
-}
diff --git a/media/libstagefright/chromium_http/support.cpp b/media/libstagefright/chromium_http/support.cpp
deleted file mode 100644
index 3de4877..0000000
--- a/media/libstagefright/chromium_http/support.cpp
+++ /dev/null
@@ -1,659 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ChromiumHTTPDataSourceSupport"
-#include <utils/Log.h>
-
-#include <media/stagefright/foundation/AString.h>
-
-#include "support.h"
-
-#include "android/net/android_network_library_impl.h"
-#include "base/logging.h"
-#include "base/threading/thread.h"
-#include "net/base/cert_verifier.h"
-#include "net/base/cookie_monster.h"
-#include "net/base/host_resolver.h"
-#include "net/base/ssl_config_service.h"
-#include "net/http/http_auth_handler_factory.h"
-#include "net/http/http_cache.h"
-#include "net/proxy/proxy_config_service_android.h"
-
-#include "include/ChromiumHTTPDataSource.h"
-#include <arpa/inet.h>
-#include <binder/Parcel.h>
-#include <cutils/log.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-#include <string>
-
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-#include <binder/IServiceManager.h>
-
-namespace android {
-
-// must be kept in sync with interface defined in IAudioService.aidl
-class IAudioService : public IInterface
-{
-public:
-    DECLARE_META_INTERFACE(AudioService);
-
-    virtual int verifyX509CertChain(
-            const std::vector<std::string>& cert_chain,
-            const std::string& hostname,
-            const std::string& auth_type) = 0;
-};
-
-class BpAudioService : public BpInterface<IAudioService>
-{
-public:
-    BpAudioService(const sp<IBinder>& impl)
-        : BpInterface<IAudioService>(impl)
-    {
-    }
-
-    virtual int verifyX509CertChain(
-            const std::vector<std::string>& cert_chain,
-            const std::string& hostname,
-            const std::string& auth_type)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioService::getInterfaceDescriptor());
-
-        // The vector of std::string we get isn't really a vector of strings,
-        // but rather a vector of binary certificate data. If we try to pass
-        // it to Java language code as a string, it ends up mangled on the other
-        // side, so send them as bytes instead.
-        // Since we can't send an array of byte arrays, send a single array,
-        // which will be split out by the recipient.
-
-        int numcerts = cert_chain.size();
-        data.writeInt32(numcerts);
-        size_t total = 0;
-        for (int i = 0; i < numcerts; i++) {
-            total += cert_chain[i].size();
-        }
-        size_t bytesize = total + numcerts * 4;
-        uint8_t *bytes = (uint8_t*) malloc(bytesize);
-        if (!bytes) {
-            return 5; // SSL_INVALID
-        }
-        ALOGV("%d certs: %d -> %d", numcerts, total, bytesize);
-
-        int offset = 0;
-        for (int i = 0; i < numcerts; i++) {
-            int32_t certsize = cert_chain[i].size();
-            // store this in a known order, which just happens to match the default
-            // byte order of a java ByteBuffer
-            int32_t bigsize = htonl(certsize);
-            ALOGV("cert %d, size %d", i, certsize);
-            memcpy(bytes + offset, &bigsize, sizeof(bigsize));
-            offset += sizeof(bigsize);
-            memcpy(bytes + offset, cert_chain[i].data(), certsize);
-            offset += certsize;
-        }
-        data.writeByteArray(bytesize, bytes);
-        free(bytes);
-        data.writeString16(String16(hostname.c_str()));
-        data.writeString16(String16(auth_type.c_str()));
-
-        int32_t result;
-        if (remote()->transact(IBinder::FIRST_CALL_TRANSACTION, data, &reply) != NO_ERROR
-                || reply.readExceptionCode() < 0 || reply.readInt32(&result) != NO_ERROR) {
-            return 5; // SSL_INVALID;
-        }
-        return result;
-    }
-
-};
-
-IMPLEMENT_META_INTERFACE(AudioService, "android.media.IAudioService");
-
-
-static Mutex gNetworkThreadLock;
-static base::Thread *gNetworkThread = NULL;
-static scoped_refptr<SfRequestContext> gReqContext;
-static scoped_ptr<net::NetworkChangeNotifier> gNetworkChangeNotifier;
-
-bool logMessageHandler(
-        int severity,
-        const char* file,
-        int line,
-        size_t message_start,
-        const std::string& str) {
-    int androidSeverity = ANDROID_LOG_VERBOSE;
-    switch(severity) {
-    case logging::LOG_FATAL:
-        androidSeverity = ANDROID_LOG_FATAL;
-        break;
-    case logging::LOG_ERROR_REPORT:
-    case logging::LOG_ERROR:
-        androidSeverity = ANDROID_LOG_ERROR;
-        break;
-    case logging::LOG_WARNING:
-        androidSeverity = ANDROID_LOG_WARN;
-        break;
-    default:
-        androidSeverity = ANDROID_LOG_VERBOSE;
-        break;
-    }
-    android_printLog(androidSeverity, "chromium-libstagefright",
-                    "%s:%d: %s", file, line, str.c_str());
-    return false;
-}
-
-struct AutoPrioritySaver {
-    AutoPrioritySaver()
-        : mTID(androidGetTid()),
-          mPrevPriority(androidGetThreadPriority(mTID)) {
-        androidSetThreadPriority(mTID, ANDROID_PRIORITY_NORMAL);
-    }
-
-    ~AutoPrioritySaver() {
-        androidSetThreadPriority(mTID, mPrevPriority);
-    }
-
-private:
-    pid_t mTID;
-    int mPrevPriority;
-
-    DISALLOW_EVIL_CONSTRUCTORS(AutoPrioritySaver);
-};
-
-static void InitializeNetworkThreadIfNecessary() {
-    Mutex::Autolock autoLock(gNetworkThreadLock);
-
-    if (gNetworkThread == NULL) {
-        // Make sure any threads spawned by the chromium framework are
-        // running at normal priority instead of inheriting this thread's.
-        AutoPrioritySaver saver;
-
-        gNetworkThread = new base::Thread("network");
-        base::Thread::Options options;
-        options.message_loop_type = MessageLoop::TYPE_IO;
-        CHECK(gNetworkThread->StartWithOptions(options));
-
-        gReqContext = new SfRequestContext;
-
-        gNetworkChangeNotifier.reset(net::NetworkChangeNotifier::Create());
-
-        net::AndroidNetworkLibrary::RegisterSharedInstance(
-                new SfNetworkLibrary);
-        logging::SetLogMessageHandler(logMessageHandler);
-    }
-}
-
-static void MY_LOGI(const char *s) {
-    LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "%s", s);
-}
-
-static void MY_LOGV(const char *s) {
-#if !defined(LOG_NDEBUG) || LOG_NDEBUG == 0
-    LOG_PRI(ANDROID_LOG_VERBOSE, LOG_TAG, "%s", s);
-#endif
-}
-
-SfNetLog::SfNetLog()
-    : mNextID(1) {
-}
-
-void SfNetLog::AddEntry(
-        EventType type,
-        const base::TimeTicks &time,
-        const Source &source,
-        EventPhase phase,
-        EventParameters *params) {
-#if 0
-    MY_LOGI(StringPrintf(
-                "AddEntry time=%s type=%s source=%s phase=%s\n",
-                TickCountToString(time).c_str(),
-                EventTypeToString(type),
-                SourceTypeToString(source.type),
-                EventPhaseToString(phase)).c_str());
-#endif
-}
-
-uint32 SfNetLog::NextID() {
-    return mNextID++;
-}
-
-net::NetLog::LogLevel SfNetLog::GetLogLevel() const {
-    return LOG_BASIC;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-SfRequestContext::SfRequestContext() {
-    mUserAgent = MakeUserAgent().c_str();
-
-    set_net_log(new SfNetLog());
-
-    set_host_resolver(
-        net::CreateSystemHostResolver(
-                net::HostResolver::kDefaultParallelism,
-                NULL /* resolver_proc */,
-                net_log()));
-
-    set_ssl_config_service(
-        net::SSLConfigService::CreateSystemSSLConfigService());
-
-    mProxyConfigService = new net::ProxyConfigServiceAndroid;
-
-    set_proxy_service(net::ProxyService::CreateWithoutProxyResolver(
-        mProxyConfigService, net_log()));
-
-    set_http_transaction_factory(new net::HttpCache(
-            host_resolver(),
-            new net::CertVerifier(),
-            dnsrr_resolver(),
-            dns_cert_checker(),
-            proxy_service(),
-            ssl_config_service(),
-            net::HttpAuthHandlerFactory::CreateDefault(host_resolver()),
-            network_delegate(),
-            net_log(),
-            NULL));  // backend_factory
-
-    set_cookie_store(new net::CookieMonster(NULL, NULL));
-}
-
-const std::string &SfRequestContext::GetUserAgent(const GURL &url) const {
-    return mUserAgent;
-}
-
-status_t SfRequestContext::updateProxyConfig(
-        const char *host, int32_t port, const char *exclusionList) {
-    Mutex::Autolock autoLock(mProxyConfigLock);
-
-    if (host == NULL || *host == '\0') {
-        MY_LOGV("updateProxyConfig NULL");
-
-        std::string proxy;
-        std::string exList;
-        mProxyConfigService->UpdateProxySettings(proxy, exList);
-    } else {
-#if !defined(LOG_NDEBUG) || LOG_NDEBUG == 0
-        LOG_PRI(ANDROID_LOG_VERBOSE, LOG_TAG,
-                "updateProxyConfig %s:%d, exclude '%s'",
-                host, port, exclusionList);
-#endif
-
-        std::string proxy = StringPrintf("%s:%d", host, port).c_str();
-        std::string exList = exclusionList;
-        mProxyConfigService->UpdateProxySettings(proxy, exList);
-    }
-
-    return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-SfNetworkLibrary::SfNetworkLibrary() {}
-
-SfNetworkLibrary::VerifyResult SfNetworkLibrary::VerifyX509CertChain(
-        const std::vector<std::string>& cert_chain,
-        const std::string& hostname,
-        const std::string& auth_type) {
-
-    sp<IBinder> binder =
-        defaultServiceManager()->checkService(String16("audio"));
-    if (binder == 0) {
-        ALOGW("Thread cannot connect to the audio service");
-    } else {
-        sp<IAudioService> service = interface_cast<IAudioService>(binder);
-        int code = service->verifyX509CertChain(cert_chain, hostname, auth_type);
-        ALOGV("verified: %d", code);
-        if (code == -1) {
-            return VERIFY_OK;
-        } else if (code == 2) { // SSL_IDMISMATCH
-            return VERIFY_BAD_HOSTNAME;
-        } else if (code == 3) { // SSL_UNTRUSTED
-            return VERIFY_NO_TRUSTED_ROOT;
-        }
-    }
-    return VERIFY_INVOCATION_ERROR;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-SfDelegate::SfDelegate()
-    : mOwner(NULL),
-      mURLRequest(NULL),
-      mReadBuffer(new net::IOBufferWithSize(8192)),
-      mNumBytesRead(0),
-      mNumBytesTotal(0),
-      mDataDestination(NULL),
-      mAtEOS(false) {
-    InitializeNetworkThreadIfNecessary();
-}
-
-SfDelegate::~SfDelegate() {
-    CHECK(mURLRequest == NULL);
-}
-
-// static
-status_t SfDelegate::UpdateProxyConfig(
-        const char *host, int32_t port, const char *exclusionList) {
-    InitializeNetworkThreadIfNecessary();
-
-    return gReqContext->updateProxyConfig(host, port, exclusionList);
-}
-
-void SfDelegate::setOwner(ChromiumHTTPDataSource *owner) {
-    mOwner = owner;
-}
-
-void SfDelegate::setUID(uid_t uid) {
-    gReqContext->setUID(uid);
-}
-
-bool SfDelegate::getUID(uid_t *uid) const {
-    return gReqContext->getUID(uid);
-}
-
-void SfDelegate::OnReceivedRedirect(
-            net::URLRequest *request, const GURL &new_url, bool *defer_redirect) {
-    MY_LOGV("OnReceivedRedirect");
-    mOwner->onRedirect(new_url.spec().c_str());
-}
-
-void SfDelegate::OnAuthRequired(
-            net::URLRequest *request, net::AuthChallengeInfo *auth_info) {
-    MY_LOGV("OnAuthRequired");
-
-    inherited::OnAuthRequired(request, auth_info);
-}
-
-void SfDelegate::OnCertificateRequested(
-            net::URLRequest *request, net::SSLCertRequestInfo *cert_request_info) {
-    MY_LOGV("OnCertificateRequested");
-
-    inherited::OnCertificateRequested(request, cert_request_info);
-}
-
-void SfDelegate::OnSSLCertificateError(
-            net::URLRequest *request, int cert_error, net::X509Certificate *cert) {
-    fprintf(stderr, "OnSSLCertificateError cert_error=%d\n", cert_error);
-
-    inherited::OnSSLCertificateError(request, cert_error, cert);
-}
-
-void SfDelegate::OnGetCookies(net::URLRequest *request, bool blocked_by_policy) {
-    MY_LOGV("OnGetCookies");
-}
-
-void SfDelegate::OnSetCookie(
-        net::URLRequest *request,
-        const std::string &cookie_line,
-        const net::CookieOptions &options,
-        bool blocked_by_policy) {
-    MY_LOGV("OnSetCookie");
-}
-
-void SfDelegate::OnResponseStarted(net::URLRequest *request) {
-    if (request->status().status() != net::URLRequestStatus::SUCCESS) {
-        MY_LOGI(StringPrintf(
-                    "Request failed with status %d and os_error %d",
-                    request->status().status(),
-                    request->status().os_error()).c_str());
-
-        delete mURLRequest;
-        mURLRequest = NULL;
-
-        mOwner->onConnectionFailed(ERROR_IO);
-        return;
-    } else if (mRangeRequested && request->GetResponseCode() != 206) {
-        MY_LOGI(StringPrintf(
-                    "We requested a content range, but server didn't "
-                    "support that. (responded with %d)",
-                    request->GetResponseCode()).c_str());
-
-        delete mURLRequest;
-        mURLRequest = NULL;
-
-        mOwner->onConnectionFailed(-EPIPE);
-        return;
-    } else if ((request->GetResponseCode() / 100) != 2) {
-        MY_LOGI(StringPrintf(
-                    "Server responded with http status %d",
-                    request->GetResponseCode()).c_str());
-
-        delete mURLRequest;
-        mURLRequest = NULL;
-
-        mOwner->onConnectionFailed(ERROR_IO);
-        return;
-    }
-
-    MY_LOGV("OnResponseStarted");
-
-    std::string headers;
-    request->GetAllResponseHeaders(&headers);
-
-    MY_LOGV(StringPrintf("response headers: %s", headers.c_str()).c_str());
-
-    std::string contentType;
-    request->GetResponseHeaderByName("Content-Type", &contentType);
-
-    mOwner->onConnectionEstablished(
-            request->GetExpectedContentSize(), contentType.c_str());
-}
-
-void SfDelegate::OnReadCompleted(net::URLRequest *request, int bytes_read) {
-    if (bytes_read == -1) {
-        MY_LOGI(StringPrintf(
-                    "OnReadCompleted, read failed, status %d",
-                    request->status().status()).c_str());
-
-        mOwner->onReadCompleted(ERROR_IO);
-        return;
-    }
-
-    MY_LOGV(StringPrintf("OnReadCompleted, read %d bytes", bytes_read).c_str());
-
-    if (bytes_read < 0) {
-        MY_LOGI(StringPrintf(
-                    "Read failed w/ status %d\n",
-                    request->status().status()).c_str());
-
-        mOwner->onReadCompleted(ERROR_IO);
-        return;
-    } else if (bytes_read == 0) {
-        mAtEOS = true;
-        mOwner->onReadCompleted(mNumBytesRead);
-        return;
-    }
-
-    CHECK_GT(bytes_read, 0);
-    CHECK_LE(mNumBytesRead + bytes_read, mNumBytesTotal);
-
-    memcpy((uint8_t *)mDataDestination + mNumBytesRead,
-           mReadBuffer->data(),
-           bytes_read);
-
-    mNumBytesRead += bytes_read;
-
-    readMore(request);
-}
-
-void SfDelegate::readMore(net::URLRequest *request) {
-    while (mNumBytesRead < mNumBytesTotal) {
-        size_t copy = mNumBytesTotal - mNumBytesRead;
-        if (copy > mReadBuffer->size()) {
-            copy = mReadBuffer->size();
-        }
-
-        int n;
-        if (request->Read(mReadBuffer, copy, &n)) {
-            MY_LOGV(StringPrintf("Read %d bytes directly.", n).c_str());
-
-            CHECK_LE((size_t)n, copy);
-
-            memcpy((uint8_t *)mDataDestination + mNumBytesRead,
-                   mReadBuffer->data(),
-                   n);
-
-            mNumBytesRead += n;
-
-            if (n == 0) {
-                mAtEOS = true;
-                break;
-            }
-        } else {
-            MY_LOGV("readMore pending read");
-
-            if (request->status().status() != net::URLRequestStatus::IO_PENDING) {
-                MY_LOGI(StringPrintf(
-                            "Direct read failed w/ status %d\n",
-                            request->status().status()).c_str());
-
-                mOwner->onReadCompleted(ERROR_IO);
-                return;
-            }
-
-            return;
-        }
-    }
-
-    mOwner->onReadCompleted(mNumBytesRead);
-}
-
-void SfDelegate::initiateConnection(
-        const char *uri,
-        const KeyedVector<String8, String8> *headers,
-        off64_t offset) {
-    GURL url(uri);
-
-    MessageLoop *loop = gNetworkThread->message_loop();
-    loop->PostTask(
-            FROM_HERE,
-            NewRunnableFunction(
-                &SfDelegate::OnInitiateConnectionWrapper,
-                this,
-                url,
-                headers,
-                offset));
-
-}
-
-// static
-void SfDelegate::OnInitiateConnectionWrapper(
-        SfDelegate *me, GURL url,
-        const KeyedVector<String8, String8> *headers,
-        off64_t offset) {
-    me->onInitiateConnection(url, headers, offset);
-}
-
-void SfDelegate::onInitiateConnection(
-        const GURL &url,
-        const KeyedVector<String8, String8> *extra,
-        off64_t offset) {
-    CHECK(mURLRequest == NULL);
-
-    mURLRequest = new net::URLRequest(url, this);
-    mAtEOS = false;
-
-    mRangeRequested = false;
-
-    if (offset != 0 || extra != NULL) {
-        net::HttpRequestHeaders headers =
-            mURLRequest->extra_request_headers();
-
-        if (offset != 0) {
-            headers.AddHeaderFromString(
-                    StringPrintf("Range: bytes=%lld-", offset).c_str());
-
-            mRangeRequested = true;
-        }
-
-        if (extra != NULL) {
-            for (size_t i = 0; i < extra->size(); ++i) {
-                AString s;
-                s.append(extra->keyAt(i).string());
-                s.append(": ");
-                s.append(extra->valueAt(i).string());
-
-                headers.AddHeaderFromString(s.c_str());
-            }
-        }
-
-        mURLRequest->SetExtraRequestHeaders(headers);
-    }
-
-    mURLRequest->set_context(gReqContext);
-
-    mURLRequest->Start();
-}
-
-void SfDelegate::initiateDisconnect() {
-    MessageLoop *loop = gNetworkThread->message_loop();
-    loop->PostTask(
-            FROM_HERE,
-            NewRunnableFunction(
-                &SfDelegate::OnInitiateDisconnectWrapper, this));
-}
-
-// static
-void SfDelegate::OnInitiateDisconnectWrapper(SfDelegate *me) {
-    me->onInitiateDisconnect();
-}
-
-void SfDelegate::onInitiateDisconnect() {
-    if (mURLRequest == NULL) {
-        return;
-    }
-
-    mURLRequest->Cancel();
-
-    delete mURLRequest;
-    mURLRequest = NULL;
-
-    mOwner->onDisconnectComplete();
-}
-
-void SfDelegate::initiateRead(void *data, size_t size) {
-    MessageLoop *loop = gNetworkThread->message_loop();
-    loop->PostTask(
-            FROM_HERE,
-            NewRunnableFunction(
-                &SfDelegate::OnInitiateReadWrapper, this, data, size));
-}
-
-// static
-void SfDelegate::OnInitiateReadWrapper(
-        SfDelegate *me, void *data, size_t size) {
-    me->onInitiateRead(data, size);
-}
-
-void SfDelegate::onInitiateRead(void *data, size_t size) {
-    CHECK(mURLRequest != NULL);
-
-    mNumBytesRead = 0;
-    mNumBytesTotal = size;
-    mDataDestination = data;
-
-    if (mAtEOS) {
-        mOwner->onReadCompleted(0);
-        return;
-    }
-
-    readMore(mURLRequest);
-}
-
-}  // namespace android
-
diff --git a/media/libstagefright/chromium_http/support.h b/media/libstagefright/chromium_http/support.h
deleted file mode 100644
index 975a1d3..0000000
--- a/media/libstagefright/chromium_http/support.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef SUPPORT_H_
-
-#define SUPPORT_H_
-
-#include <assert.h>
-
-#include "net/base/net_log.h"
-#include "net/url_request/url_request.h"
-#include "net/url_request/url_request_context.h"
-#include "net/base/android_network_library.h"
-#include "net/base/io_buffer.h"
-
-#include <utils/KeyedVector.h>
-#include <utils/Mutex.h>
-#include <utils/String8.h>
-
-namespace net {
-    struct ProxyConfigServiceAndroid;
-};
-
-namespace android {
-
-struct SfNetLog : public net::NetLog {
-    SfNetLog();
-
-    virtual void AddEntry(
-            EventType type,
-            const base::TimeTicks &time,
-            const Source &source,
-            EventPhase phase,
-            EventParameters *params);
-
-    virtual uint32 NextID();
-    virtual LogLevel GetLogLevel() const;
-
-private:
-    uint32 mNextID;
-
-    DISALLOW_EVIL_CONSTRUCTORS(SfNetLog);
-};
-
-struct SfRequestContext : public net::URLRequestContext {
-    SfRequestContext();
-
-    virtual const std::string &GetUserAgent(const GURL &url) const;
-
-    status_t updateProxyConfig(
-            const char *host, int32_t port, const char *exclusionList);
-
-private:
-    Mutex mProxyConfigLock;
-
-    std::string mUserAgent;
-    net::ProxyConfigServiceAndroid *mProxyConfigService;
-
-    DISALLOW_EVIL_CONSTRUCTORS(SfRequestContext);
-};
-
-// This is required for https support, we don't really verify certificates,
-// we accept anything...
-struct SfNetworkLibrary : public net::AndroidNetworkLibrary {
-    SfNetworkLibrary();
-
-    virtual VerifyResult VerifyX509CertChain(
-            const std::vector<std::string>& cert_chain,
-            const std::string& hostname,
-            const std::string& auth_type);
-
-private:
-    DISALLOW_EVIL_CONSTRUCTORS(SfNetworkLibrary);
-};
-
-struct ChromiumHTTPDataSource;
-
-struct SfDelegate : public net::URLRequest::Delegate {
-    SfDelegate();
-    virtual ~SfDelegate();
-
-    void initiateConnection(
-            const char *uri,
-            const KeyedVector<String8, String8> *headers,
-            off64_t offset);
-
-    void initiateDisconnect();
-    void initiateRead(void *data, size_t size);
-
-    void setOwner(ChromiumHTTPDataSource *mOwner);
-
-    // Gets the UID of the calling process
-    bool getUID(uid_t *uid) const;
-
-    void setUID(uid_t uid);
-
-    virtual void OnReceivedRedirect(
-            net::URLRequest *request, const GURL &new_url, bool *defer_redirect);
-
-    virtual void OnAuthRequired(
-            net::URLRequest *request, net::AuthChallengeInfo *auth_info);
-
-    virtual void OnCertificateRequested(
-            net::URLRequest *request, net::SSLCertRequestInfo *cert_request_info);
-
-    virtual void OnSSLCertificateError(
-            net::URLRequest *request, int cert_error, net::X509Certificate *cert);
-
-    virtual void OnGetCookies(net::URLRequest *request, bool blocked_by_policy);
-
-    virtual void OnSetCookie(
-            net::URLRequest *request,
-            const std::string &cookie_line,
-            const net::CookieOptions &options,
-            bool blocked_by_policy);
-
-    virtual void OnResponseStarted(net::URLRequest *request);
-
-    virtual void OnReadCompleted(net::URLRequest *request, int bytes_read);
-
-    static status_t UpdateProxyConfig(
-            const char *host, int32_t port, const char *exclusionList);
-
-private:
-    typedef Delegate inherited;
-
-    ChromiumHTTPDataSource *mOwner;
-
-    net::URLRequest *mURLRequest;
-    scoped_refptr<net::IOBufferWithSize> mReadBuffer;
-
-    size_t mNumBytesRead;
-    size_t mNumBytesTotal;
-    void *mDataDestination;
-
-    bool mRangeRequested;
-    bool mAtEOS;
-
-    void readMore(net::URLRequest *request);
-
-    static void OnInitiateConnectionWrapper(
-            SfDelegate *me,
-            GURL url,
-            const KeyedVector<String8, String8> *headers,
-            off64_t offset);
-
-    static void OnInitiateDisconnectWrapper(SfDelegate *me);
-
-    static void OnInitiateReadWrapper(
-            SfDelegate *me, void *data, size_t size);
-
-    void onInitiateConnection(
-            const GURL &url,
-            const KeyedVector<String8, String8> *headers,
-            off64_t offset);
-
-    void onInitiateDisconnect();
-    void onInitiateRead(void *data, size_t size);
-
-    DISALLOW_EVIL_CONSTRUCTORS(SfDelegate);
-};
-
-}  // namespace android
-
-#endif  // SUPPORT_H_
diff --git a/media/libstagefright/chromium_http_stub.cpp b/media/libstagefright/chromium_http_stub.cpp
deleted file mode 100644
index ed8a878..0000000
--- a/media/libstagefright/chromium_http_stub.cpp
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <dlfcn.h>
-
-#include <media/stagefright/DataSource.h>
-
-#include "include/chromium_http_stub.h"
-#include "include/HTTPBase.h"
-
-namespace android {
-
-static bool gFirst = true;
-static void *gHandle;
-static Mutex gLibMutex;
-
-HTTPBase *(*gLib_createChromiumHTTPDataSource)(uint32_t flags);
-DataSource *(*gLib_createDataUriSource)(const char *uri);
-
-status_t (*gLib_UpdateChromiumHTTPDataSourceProxyConfig)(
-        const char *host, int32_t port, const char *exclusionList);
-
-static bool load_libstagefright_chromium_http() {
-    Mutex::Autolock autoLock(gLibMutex);
-    void *sym;
-
-    if (!gFirst) {
-        return (gHandle != NULL);
-    }
-
-    gFirst = false;
-
-    gHandle = dlopen("libstagefright_chromium_http.so", RTLD_NOW);
-    if (gHandle == NULL) {
-        return false;
-    }
-
-    sym = dlsym(gHandle, "createChromiumHTTPDataSource");
-    if (sym == NULL) {
-        gHandle = NULL;
-        return false;
-    }
-    gLib_createChromiumHTTPDataSource = (HTTPBase *(*)(uint32_t))sym;
-
-    sym = dlsym(gHandle, "createDataUriSource");
-    if (sym == NULL) {
-        gHandle = NULL;
-        return false;
-    }
-    gLib_createDataUriSource = (DataSource *(*)(const char *))sym;
-
-    sym = dlsym(gHandle, "UpdateChromiumHTTPDataSourceProxyConfig");
-    if (sym == NULL) {
-        gHandle = NULL;
-        return false;
-    }
-    gLib_UpdateChromiumHTTPDataSourceProxyConfig =
-        (status_t (*)(const char *, int32_t, const char *))sym;
-
-    return true;
-}
-
-HTTPBase *createChromiumHTTPDataSource(uint32_t flags) {
-    if (!load_libstagefright_chromium_http()) {
-        return NULL;
-    }
-
-    return gLib_createChromiumHTTPDataSource(flags);
-}
-
-status_t UpdateChromiumHTTPDataSourceProxyConfig(
-        const char *host, int32_t port, const char *exclusionList) {
-    if (!load_libstagefright_chromium_http()) {
-        return INVALID_OPERATION;
-    }
-
-    return gLib_UpdateChromiumHTTPDataSourceProxyConfig(
-            host, port, exclusionList);
-}
-
-DataSource *createDataUriSource(const char *uri) {
-    if (!load_libstagefright_chromium_http()) {
-        return NULL;
-    }
-
-    return gLib_createDataUriSource(uri);
-}
-
-}
diff --git a/media/libstagefright/codecs/aacdec/Android.mk b/media/libstagefright/codecs/aacdec/Android.mk
index ffa64f9..49ff238 100644
--- a/media/libstagefright/codecs/aacdec/Android.mk
+++ b/media/libstagefright/codecs/aacdec/Android.mk
@@ -17,6 +17,8 @@
 
 LOCAL_CFLAGS :=
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_STATIC_LIBRARIES := libFraunhoferAAC
 
 LOCAL_SHARED_LIBRARIES := \
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index d4b0de7..532e36f 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -30,7 +30,7 @@
 #define DRC_DEFAULT_MOBILE_REF_LEVEL 64  /* 64*-0.25dB = -16 dB below full scale for mobile conf */
 #define DRC_DEFAULT_MOBILE_DRC_CUT   127 /* maximum compression of dynamic range for mobile conf */
 #define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */
-#define MAX_CHANNEL_COUNT            6  /* maximum number of audio channels that can be decoded */
+#define MAX_CHANNEL_COUNT            8  /* maximum number of audio channels that can be decoded */
 // names of properties that can be used to override the default DRC settings
 #define PROP_DRC_OVERRIDE_REF_LEVEL  "aac_drc_reference_level"
 #define PROP_DRC_OVERRIDE_CUT        "aac_drc_cut"
@@ -296,8 +296,11 @@
         if (!(property_get("media.aac_51_output_enabled", value, NULL) &&
                 (!strcmp(value, "1") || !strcasecmp(value, "true")))) {
             ALOGI("Downmixing multichannel AAC to stereo");
-            aacDecoder_SetParam(mAACDecoder, AAC_PCM_OUTPUT_CHANNELS, 2);
+            aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, 2);
             mStreamInfo->numChannels = 2;
+            // By default, the decoder creates a 5.1 channel downmix signal
+            // for seven and eight channel input streams. To enable 6.1 and 7.1 channel output
+            // use aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, -1)
         }
     }
 }
@@ -374,7 +377,7 @@
                 mNumSamplesOutput = 0;
             }
 
-            if (mIsADTS) {
+            if (mIsADTS && inHeader->nFilledLen) {
                 size_t adtsHeaderSize = 0;
                 // skip 30 bits, aac_frame_length follows.
                 // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk
index 057c69b..58ec3ba 100644
--- a/media/libstagefright/codecs/aacenc/Android.mk
+++ b/media/libstagefright/codecs/aacenc/Android.mk
@@ -82,6 +82,8 @@
 LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV7
 endif
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_STATIC_LIBRARY)
 
 ################################################################################
@@ -106,6 +108,8 @@
 
   LOCAL_CFLAGS :=
 
+  LOCAL_CFLAGS += -Werror
+
   LOCAL_STATIC_LIBRARIES := libFraunhoferAAC
 
   LOCAL_SHARED_LIBRARIES := \
@@ -128,6 +132,8 @@
 
   LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
 
+  LOCAL_CFLAGS += -Werror
+
   LOCAL_STATIC_LIBRARIES := \
           libstagefright_aacenc
 
diff --git a/media/libstagefright/codecs/aacenc/basic_op/oper_32b.c b/media/libstagefright/codecs/aacenc/basic_op/oper_32b.c
index cc01927..1d029fc 100644
--- a/media/libstagefright/codecs/aacenc/basic_op/oper_32b.c
+++ b/media/libstagefright/codecs/aacenc/basic_op/oper_32b.c
@@ -24,6 +24,8 @@
 #include "basic_op.h"
 #include "oper_32b.h"
 
+#define UNUSED(x) (void)(x)
+
 /*****************************************************************************
  *                                                                           *
  *  Function L_Extract()                                                     *
@@ -243,6 +245,8 @@
 Word32 rsqrt(Word32 value,     /*!< Operand to square root (0.0 ... 1) */
              Word32 accuracy)  /*!< Number of valid bits that will be calculated */
 {
+    UNUSED(accuracy);
+
     Word32 root = 0;
 	Word32 scale;
 
diff --git a/media/libstagefright/codecs/aacenc/src/aacenc.c b/media/libstagefright/codecs/aacenc/src/aacenc.c
index d1c8621..40db92c 100644
--- a/media/libstagefright/codecs/aacenc/src/aacenc.c
+++ b/media/libstagefright/codecs/aacenc/src/aacenc.c
@@ -27,6 +27,8 @@
 #include "cmnMemory.h"
 #include "memalign.h"
 
+#define UNUSED(x) (void)(x)
+
 /**
 * Init the audio codec module and return codec handle
 * \param phCodec [OUT] Return the video codec handle
@@ -46,6 +48,8 @@
 	VO_MEM_OPERATOR *pMemOP;
 	int interMem;
 
+        UNUSED(vType);
+
 	interMem = 0;
 	error = 0;
 
@@ -471,6 +475,10 @@
 */
 VO_U32 VO_API voAACEncGetParam(VO_HANDLE hCodec, VO_S32 uParamID, VO_PTR pData)
 {
+        UNUSED(hCodec);
+        UNUSED(uParamID);
+        UNUSED(pData);
+
 	return VO_ERR_NONE;
 }
 
diff --git a/media/libstagefright/codecs/aacenc/src/bitenc.c b/media/libstagefright/codecs/aacenc/src/bitenc.c
index fcc12dd..d1fd647 100644
--- a/media/libstagefright/codecs/aacenc/src/bitenc.c
+++ b/media/libstagefright/codecs/aacenc/src/bitenc.c
@@ -26,6 +26,7 @@
 #include "qc_data.h"
 #include "interface.h"
 
+#define UNUSED(x) (void)(x)
 
 static const  Word16 globalGainOffset = 100;
 static const  Word16 icsReservedBit   = 0;
@@ -585,6 +586,8 @@
   Word16 elementUsedBits;
   Word16 frameBits=0;
 
+  UNUSED(ancBytes);
+
   /*   struct bitbuffer bsWriteCopy; */
   bitMarkUp = GetBitsAvail(hBitStream);
   if(qcOut->qcElement.adtsUsed)  /*  write adts header*/
diff --git a/media/libstagefright/codecs/aacenc/src/psy_main.c b/media/libstagefright/codecs/aacenc/src/psy_main.c
index 4e9218c..6f0679c 100644
--- a/media/libstagefright/codecs/aacenc/src/psy_main.c
+++ b/media/libstagefright/codecs/aacenc/src/psy_main.c
@@ -38,6 +38,8 @@
 #include "tns_func.h"
 #include "memalign.h"
 
+#define UNUSED(x) (void)(x)
+
 /*                                    long       start       short       stop */
 static Word16 blockType2windowShape[] = {KBD_WINDOW,SINE_WINDOW,SINE_WINDOW,KBD_WINDOW};
 
@@ -170,7 +172,9 @@
 *****************************************************************************/
 Word16 PsyOutDelete(PSY_OUT *hPsyOut, VO_MEM_OPERATOR *pMemOP)
 {
-  hPsyOut=NULL;
+  UNUSED(hPsyOut);
+  UNUSED(pMemOP);
+
   return 0;
 }
 
diff --git a/media/libstagefright/codecs/aacenc/src/qc_main.c b/media/libstagefright/codecs/aacenc/src/qc_main.c
index 48ff300..e5d78aa 100644
--- a/media/libstagefright/codecs/aacenc/src/qc_main.c
+++ b/media/libstagefright/codecs/aacenc/src/qc_main.c
@@ -33,6 +33,7 @@
 #include "channel_map.h"
 #include "memalign.h"
 
+#define UNUSED(x) (void)(x)
 
 typedef enum{
   FRAME_LEN_BYTES_MODULO =  1,
@@ -204,11 +205,8 @@
 **********************************************************************************/
 void QCDelete(QC_STATE *hQC, VO_MEM_OPERATOR *pMemOP)
 {
-
-  /*
-     nothing to do
-  */
-  hQC=NULL;
+  UNUSED(hQC);
+  UNUSED(pMemOP);
 }
 
 /*********************************************************************************
diff --git a/media/libstagefright/codecs/aacenc/src/tns.c b/media/libstagefright/codecs/aacenc/src/tns.c
index 455a864..5172612 100644
--- a/media/libstagefright/codecs/aacenc/src/tns.c
+++ b/media/libstagefright/codecs/aacenc/src/tns.c
@@ -30,6 +30,8 @@
 #include "psy_configuration.h"
 #include "tns_func.h"
 
+#define UNUSED(x) (void)(x)
+
 #define TNS_MODIFY_BEGIN         2600  /* Hz */
 #define RATIO_PATCH_LOWER_BORDER 380   /* Hz */
 #define TNS_GAIN_THRESH			 141   /* 1.41*100 */
@@ -643,6 +645,8 @@
   Word32 i;
   Word32 tnsOrderPlus1 = tnsOrder + 1;
 
+  UNUSED(window);
+
   assert(tnsOrder <= TNS_MAX_ORDER);      /* remove asserts later? (btg) */
 
   for(i=0;i<tnsOrder;i++) {
diff --git a/media/libstagefright/codecs/amrnb/common/Android.mk b/media/libstagefright/codecs/amrnb/common/Android.mk
index 30ce29c..a2b3c8f 100644
--- a/media/libstagefright/codecs/amrnb/common/Android.mk
+++ b/media/libstagefright/codecs/amrnb/common/Android.mk
@@ -69,6 +69,8 @@
 LOCAL_CFLAGS := \
         -DOSCL_UNUSED_ARG= -DOSCL_IMPORT_REF= -DOSCL_EXPORT_REF=
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_MODULE := libstagefright_amrnb_common
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.mk b/media/libstagefright/codecs/amrnb/dec/Android.mk
index 8d6c6f8..b067456 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.mk
+++ b/media/libstagefright/codecs/amrnb/dec/Android.mk
@@ -47,6 +47,8 @@
 LOCAL_CFLAGS := \
         -DOSCL_UNUSED_ARG= -DOSCL_IMPORT_REF=
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_MODULE := libstagefright_amrnbdec
 
 include $(BUILD_STATIC_LIBRARY)
@@ -68,6 +70,8 @@
 
 LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_amrnbdec libstagefright_amrwbdec
 
diff --git a/media/libstagefright/codecs/amrnb/enc/Android.mk b/media/libstagefright/codecs/amrnb/enc/Android.mk
index f4e467a..afc0b89 100644
--- a/media/libstagefright/codecs/amrnb/enc/Android.mk
+++ b/media/libstagefright/codecs/amrnb/enc/Android.mk
@@ -69,6 +69,8 @@
 LOCAL_CFLAGS := \
         -DOSCL_UNUSED_ARG=
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_MODULE := libstagefright_amrnbenc
 
 include $(BUILD_STATIC_LIBRARY)
@@ -88,6 +90,8 @@
         $(LOCAL_PATH)/../common/include \
         $(LOCAL_PATH)/../common
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_amrnbenc
 
diff --git a/media/libstagefright/codecs/amrwb/Android.mk b/media/libstagefright/codecs/amrwb/Android.mk
index 677107f..efdf988 100644
--- a/media/libstagefright/codecs/amrwb/Android.mk
+++ b/media/libstagefright/codecs/amrwb/Android.mk
@@ -50,6 +50,8 @@
 LOCAL_CFLAGS := \
         -DOSCL_UNUSED_ARG= -DOSCL_IMPORT_REF=
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_MODULE := libstagefright_amrwbdec
 
 include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/codecs/amrwbenc/Android.mk b/media/libstagefright/codecs/amrwbenc/Android.mk
index c5b8e0c..64fe8d1 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/Android.mk
@@ -112,6 +112,8 @@
 LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV7
 endif
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_STATIC_LIBRARY)
 
 ################################################################################
@@ -126,6 +128,8 @@
 	frameworks/av/media/libstagefright/codecs/common/include \
 	frameworks/native/include/media/openmax
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_amrwbenc
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/autocorr.c b/media/libstagefright/codecs/amrwbenc/src/autocorr.c
index 8c477ca..0b2ea89 100644
--- a/media/libstagefright/codecs/amrwbenc/src/autocorr.c
+++ b/media/libstagefright/codecs/amrwbenc/src/autocorr.c
@@ -28,6 +28,8 @@
 #include "acelp.h"
 #include "ham_wind.tab"
 
+#define UNUSED(x) (void)(x)
+
 void Autocorr(
 		Word16 x[],                           /* (i)    : Input signal                      */
 		Word16 m,                             /* (i)    : LPC order                         */
@@ -40,6 +42,8 @@
 	Word32 L_sum, L_sum1, L_tmp, F_LEN;
 	Word16 *p1,*p2,*p3;
 	const Word16 *p4;
+        UNUSED(m);
+
 	/* Windowing of signal */
 	p1 = x;
 	p4 = vo_window;
diff --git a/media/libstagefright/codecs/amrwbenc/src/convolve.c b/media/libstagefright/codecs/amrwbenc/src/convolve.c
index acba532..4c1f7d4 100644
--- a/media/libstagefright/codecs/amrwbenc/src/convolve.c
+++ b/media/libstagefright/codecs/amrwbenc/src/convolve.c
@@ -25,6 +25,8 @@
 #include "typedef.h"
 #include "basic_op.h"
 
+#define UNUSED(x) (void)(x)
+
 void Convolve (
 		Word16 x[],        /* (i)     : input vector                           */
 		Word16 h[],        /* (i)     : impulse response                       */
@@ -35,6 +37,8 @@
 	Word32  i, n;
 	Word16 *tmpH,*tmpX;
 	Word32 s;
+        UNUSED(L);
+
 	for (n = 0; n < 64;)
 	{
 		tmpH = h+n;
diff --git a/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c b/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
index 0d66c31..b66b55e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
+++ b/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
@@ -31,6 +31,8 @@
 #define UP_SAMP      4
 #define L_INTERPOL1  4
 
+#define UNUSED(x) (void)(x)
+
 /* Local functions */
 
 #ifdef ASM_OPT
@@ -171,6 +173,7 @@
 	Word32 corr, exp_corr, norm, exp, scale;
 	Word16 exp_norm, excf[L_SUBFR], tmp;
 	Word32 L_tmp, L_tmp1, L_tmp2;
+        UNUSED(L_subfr);
 
 	/* compute the filtered excitation for the first delay t_min */
 	k = -t_min;
diff --git a/media/libstagefright/codecs/amrwbenc/src/syn_filt.c b/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
index 1bda05a..961aadc 100644
--- a/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
+++ b/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
@@ -26,6 +26,8 @@
 #include "math_op.h"
 #include "cnst.h"
 
+#define UNUSED(x) (void)(x)
+
 void Syn_filt(
 		Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients           */
 		Word16 x[],                           /* (i)     : input signal                             */
@@ -95,6 +97,8 @@
 	Word32 i,a0;
 	Word32 L_tmp, L_tmp1;
 	Word16 *p1, *p2, *p3;
+        UNUSED(m);
+
 	a0 = a[0] >> (4 + Qnew);          /* input / 16 and >>Qnew */
 	/* Do the filtering. */
 	for (i = 0; i < lg; i++)
diff --git a/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c b/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
index ea9da52..df7b9b3 100644
--- a/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
+++ b/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
@@ -39,6 +39,8 @@
 #include "mem_align.h"
 #include "cmnMemory.h"
 
+#define UNUSED(x) (void)(x)
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -1602,6 +1604,8 @@
 	VO_MEM_OPERATOR voMemoprator;
 #endif
 	VO_MEM_OPERATOR *pMemOP;
+        UNUSED(vType);
+
 	int interMem = 0;
 
 	if(pUserData == NULL || pUserData->memflag != VO_IMF_USERMEMOPERATOR || pUserData->memData == NULL )
diff --git a/media/libstagefright/codecs/avc/common/Android.mk b/media/libstagefright/codecs/avc/common/Android.mk
index 22dee15..844ef0a 100644
--- a/media/libstagefright/codecs/avc/common/Android.mk
+++ b/media/libstagefright/codecs/avc/common/Android.mk
@@ -16,4 +16,6 @@
 	$(LOCAL_PATH)/src \
  	$(LOCAL_PATH)/include
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index 7d17c2a..537ba42 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -30,6 +30,8 @@
 LOCAL_CFLAGS := \
     -DOSCL_IMPORT_REF= -DOSCL_UNUSED_ARG= -DOSCL_EXPORT_REF=
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_STATIC_LIBRARY)
 
 ################################################################################
@@ -69,4 +71,6 @@
 LOCAL_MODULE := libstagefright_soft_h264enc
 LOCAL_MODULE_TAGS := optional
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
index 89f0fed..0f4a00d 100644
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
+++ b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
@@ -34,6 +34,12 @@
 
 #include "SoftAVCEncoder.h"
 
+#if LOG_NDEBUG
+#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
+#else
+#define UNUSED_UNLESS_VERBOSE(x)
+#endif
+
 namespace android {
 
 template<class T>
@@ -136,14 +142,14 @@
 }
 
 static void* MallocWrapper(
-        void *userData, int32_t size, int32_t attrs) {
+        void * /* userData */, int32_t size, int32_t /* attrs */) {
     void *ptr = malloc(size);
     if (ptr)
         memset(ptr, 0, size);
     return ptr;
 }
 
-static void FreeWrapper(void *userData, void* ptr) {
+static void FreeWrapper(void * /* userData */, void* ptr) {
     free(ptr);
 }
 
@@ -722,7 +728,7 @@
     }
 }
 
-void SoftAVCEncoder::onQueueFilled(OMX_U32 portIndex) {
+void SoftAVCEncoder::onQueueFilled(OMX_U32 /* portIndex */) {
     if (mSignalledError || mSawInputEOS) {
         return;
     }
@@ -795,7 +801,7 @@
             }
         }
 
-        buffer_handle_t srcBuffer; // for MetaDataMode only
+        buffer_handle_t srcBuffer = NULL; // for MetaDataMode only
 
         // Get next input video frame
         if (mReadyForNextFrame) {
@@ -964,6 +970,7 @@
 }
 
 void SoftAVCEncoder::signalBufferReturned(MediaBuffer *buffer) {
+    UNUSED_UNLESS_VERBOSE(buffer);
     ALOGV("signalBufferReturned: %p", buffer);
 }
 
diff --git a/media/libstagefright/codecs/common/Android.mk b/media/libstagefright/codecs/common/Android.mk
index a33cb92..b0010ff 100644
--- a/media/libstagefright/codecs/common/Android.mk
+++ b/media/libstagefright/codecs/common/Android.mk
@@ -14,6 +14,8 @@
 LOCAL_C_INCLUDES := \
 	$(LOCAL_PATH)/include
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_SHARED_LIBRARY)
 
 
diff --git a/media/libstagefright/codecs/flac/enc/Android.mk b/media/libstagefright/codecs/flac/enc/Android.mk
index f01d605..59a11de 100644
--- a/media/libstagefright/codecs/flac/enc/Android.mk
+++ b/media/libstagefright/codecs/flac/enc/Android.mk
@@ -9,6 +9,8 @@
         frameworks/native/include/media/openmax \
         external/flac/include
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_SHARED_LIBRARIES := \
         libstagefright libstagefright_omx libstagefright_foundation libutils liblog
 
diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
index d797197..1301060 100644
--- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
+++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
@@ -27,6 +27,12 @@
 #define FLAC_COMPRESSION_LEVEL_DEFAULT 5
 #define FLAC_COMPRESSION_LEVEL_MAX     8
 
+#if LOG_NDEBUG
+#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
+#else
+#define UNUSED_UNLESS_VERBOSE(x)
+#endif
+
 namespace android {
 
 template<class T>
@@ -257,7 +263,7 @@
 }
 
 void SoftFlacEncoder::onQueueFilled(OMX_U32 portIndex) {
-    //UNUSED_UNLESS_VERBOSE(portIndex);
+    UNUSED_UNLESS_VERBOSE(portIndex);
     ALOGV("SoftFlacEncoder::onQueueFilled(portIndex=%d)", portIndex);
 
     if (mSignalledError) {
@@ -343,16 +349,17 @@
     }
 }
 
-
 FLAC__StreamEncoderWriteStatus SoftFlacEncoder::onEncodedFlacAvailable(
             const FLAC__byte buffer[],
-            size_t bytes, unsigned samples, unsigned current_frame) {
-    ALOGV("SoftFlacEncoder::onEncodedFlacAvailable(bytes=%d, samples=%d, curr_frame=%d)",
+            size_t bytes, unsigned samples,
+            unsigned current_frame) {
+    UNUSED_UNLESS_VERBOSE(current_frame);
+    ALOGV("SoftFlacEncoder::onEncodedFlacAvailable(bytes=%zu, samples=%u, curr_frame=%u)",
             bytes, samples, current_frame);
 
 #ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
     if (samples == 0) {
-        ALOGI(" saving %d bytes of header", bytes);
+        ALOGI(" saving %zu bytes of header", bytes);
         memcpy(mHeader + mHeaderOffset, buffer, bytes);
         mHeaderOffset += bytes;// will contain header size when finished receiving header
         return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
@@ -444,8 +451,12 @@
 
 // static
 FLAC__StreamEncoderWriteStatus SoftFlacEncoder::flacEncoderWriteCallback(
-            const FLAC__StreamEncoder *encoder, const FLAC__byte buffer[],
-            size_t bytes, unsigned samples, unsigned current_frame, void *client_data) {
+            const FLAC__StreamEncoder * /* encoder */,
+            const FLAC__byte buffer[],
+            size_t bytes,
+            unsigned samples,
+            unsigned current_frame,
+            void *client_data) {
     return ((SoftFlacEncoder*) client_data)->onEncodedFlacAvailable(
             buffer, bytes, samples, current_frame);
 }
diff --git a/media/libstagefright/codecs/g711/dec/Android.mk b/media/libstagefright/codecs/g711/dec/Android.mk
index 4c80da6..a0112e1 100644
--- a/media/libstagefright/codecs/g711/dec/Android.mk
+++ b/media/libstagefright/codecs/g711/dec/Android.mk
@@ -14,4 +14,6 @@
 LOCAL_MODULE := libstagefright_soft_g711dec
 LOCAL_MODULE_TAGS := optional
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/gsm/dec/Android.mk b/media/libstagefright/codecs/gsm/dec/Android.mk
index 71613d2..30868d5 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.mk
+++ b/media/libstagefright/codecs/gsm/dec/Android.mk
@@ -9,6 +9,8 @@
         frameworks/native/include/media/openmax \
         external/libgsm/inc
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_SHARED_LIBRARIES := \
         libstagefright libstagefright_omx libstagefright_foundation libutils liblog
 
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.mk b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
index a3d5779..1d232c6 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
@@ -46,6 +46,8 @@
 
 LOCAL_CFLAGS := -DOSCL_EXPORT_REF= -DOSCL_IMPORT_REF=
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_STATIC_LIBRARY)
 
 ################################################################################
@@ -72,4 +74,6 @@
 LOCAL_MODULE := libstagefright_soft_mpeg4dec
 LOCAL_MODULE_TAGS := optional
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
index 83a2dd2..c9006d9 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
@@ -33,6 +33,8 @@
     $(TOP)/frameworks/av/media/libstagefright/include \
     $(TOP)/frameworks/native/include/media/openmax
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_STATIC_LIBRARY)
 
 ################################################################################
@@ -72,4 +74,6 @@
 LOCAL_MODULE := libstagefright_soft_mpeg4enc
 LOCAL_MODULE_TAGS := optional
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index da5b785..e25709d 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -679,7 +679,7 @@
             mSawInputEOS = true;
         }
 
-        buffer_handle_t srcBuffer; // for MetaDataMode only
+        buffer_handle_t srcBuffer = NULL; // for MetaDataMode only
         if (inHeader->nFilledLen > 0) {
             uint8_t *inputData = NULL;
             if (mStoreMetaDataInBuffers) {
diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk
index 135c715..8284490 100644
--- a/media/libstagefright/codecs/mp3dec/Android.mk
+++ b/media/libstagefright/codecs/mp3dec/Android.mk
@@ -50,6 +50,8 @@
 LOCAL_CFLAGS := \
         -DOSCL_UNUSED_ARG=
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_MODULE := libstagefright_mp3dec
 
 LOCAL_ARM_MODE := arm
@@ -69,6 +71,8 @@
         $(LOCAL_PATH)/src \
         $(LOCAL_PATH)/include
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_SHARED_LIBRARIES := \
         libstagefright libstagefright_omx libstagefright_foundation libutils liblog
 
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 4d864df..5396022 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -146,6 +146,23 @@
             return OMX_ErrorNone;
         }
 
+        case OMX_IndexParamAudioMp3:
+        {
+            OMX_AUDIO_PARAM_MP3TYPE *mp3Params =
+                (OMX_AUDIO_PARAM_MP3TYPE *)params;
+
+            if (mp3Params->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            mp3Params->nChannels = mNumChannels;
+            mp3Params->nBitRate = 0 /* unknown */;
+            mp3Params->nSampleRate = mSamplingRate;
+            // other fields are encoder-only
+
+            return OMX_ErrorNone;
+        }
+
         default:
             return SimpleSoftOMXComponent::internalGetParameter(index, params);
     }
@@ -335,6 +352,9 @@
         // depend on fragments from the last one decoded.
         pvmp3_InitDecoder(mConfig, mDecoderBuf);
         mIsFirst = true;
+        mSignalledError = false;
+        mSawInputEos = false;
+        mSignalledOutputEos = false;
     }
 }
 
diff --git a/media/libstagefright/codecs/on2/dec/Android.mk b/media/libstagefright/codecs/on2/dec/Android.mk
index 7f2c46d..93ff64c 100644
--- a/media/libstagefright/codecs/on2/dec/Android.mk
+++ b/media/libstagefright/codecs/on2/dec/Android.mk
@@ -20,4 +20,6 @@
 LOCAL_MODULE := libstagefright_soft_vpxdec
 LOCAL_MODULE_TAGS := optional
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 5efe022..dc38ea8 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -27,7 +27,6 @@
 
 namespace android {
 
-
 template<class T>
 static void InitOMXParams(T *params) {
     params->nSize = sizeof(T);
@@ -141,17 +140,27 @@
       mWidth(176),
       mHeight(144),
       mBitrate(192000),  // in bps
+      mFramerate(30 << 16), // in Q16 format
       mBitrateUpdated(false),
       mBitrateControlMode(VPX_VBR),  // variable bitrate
-      mFrameDurationUs(33333),  // Defaults to 30 fps
       mDCTPartitions(0),
       mErrorResilience(OMX_FALSE),
       mColorFormat(OMX_COLOR_FormatYUV420Planar),
       mLevel(OMX_VIDEO_VP8Level_Version0),
+      mKeyFrameInterval(0),
+      mMinQuantizer(0),
+      mMaxQuantizer(0),
+      mTemporalLayers(0),
+      mTemporalPatternType(OMX_VIDEO_VPXTemporalLayerPatternNone),
+      mTemporalPatternLength(0),
+      mTemporalPatternIdx(0),
+      mLastTimestamp(0x7FFFFFFFFFFFFFFFLL),
       mConversionBuffer(NULL),
       mInputDataIsMeta(false),
       mGrallocModule(NULL),
       mKeyFrameRequested(false) {
+    memset(mTemporalLayerBitrateRatio, 0, sizeof(mTemporalLayerBitrateRatio));
+    mTemporalLayerBitrateRatio[0] = 100;
     initPorts();
 }
 
@@ -180,9 +189,8 @@
     inputPort.format.video.nStride = inputPort.format.video.nFrameWidth;
     inputPort.format.video.nSliceHeight = inputPort.format.video.nFrameHeight;
     inputPort.format.video.nBitrate = 0;
-    // frameRate is reciprocal of frameDuration, which is
-    // in microseconds. It is also in Q16 format.
-    inputPort.format.video.xFramerate = (1000000/mFrameDurationUs) << 16;
+    // frameRate is in Q16 format.
+    inputPort.format.video.xFramerate = mFramerate;
     inputPort.format.video.bFlagErrorConcealment = OMX_FALSE;
     inputPort.nPortIndex = kInputPortIndex;
     inputPort.eDir = OMX_DirInput;
@@ -220,7 +228,7 @@
     outputPort.format.video.eCompressionFormat = OMX_VIDEO_CodingVP8;
     outputPort.format.video.eColorFormat = OMX_COLOR_FormatUnused;
     outputPort.format.video.pNativeWindow = NULL;
-    outputPort.nBufferSize = 256 * 1024;  // arbitrary
+    outputPort.nBufferSize = 1024 * 1024; // arbitrary
 
     addPort(outputPort);
 }
@@ -236,7 +244,9 @@
     if (mCodecInterface == NULL) {
         return UNKNOWN_ERROR;
     }
-
+    ALOGD("VP8: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
+          (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
+          mMinQuantizer, mMaxQuantizer);
     codec_return = vpx_codec_enc_config_default(mCodecInterface,
                                                 mCodecConfiguration,
                                                 0);  // Codec specific flags
@@ -277,8 +287,120 @@
     mCodecConfiguration->g_timebase.num = 1;
     mCodecConfiguration->g_timebase.den = 1000000;
     // rc_target_bitrate is in kbps, mBitrate in bps
-    mCodecConfiguration->rc_target_bitrate = mBitrate/1000;
+    mCodecConfiguration->rc_target_bitrate = (mBitrate + 500) / 1000;
     mCodecConfiguration->rc_end_usage = mBitrateControlMode;
+    // Disable frame drop - not allowed in MediaCodec now.
+    mCodecConfiguration->rc_dropframe_thresh = 0;
+    if (mBitrateControlMode == VPX_CBR) {
+        // Disable spatial resizing.
+        mCodecConfiguration->rc_resize_allowed = 0;
+        // Single-pass mode.
+        mCodecConfiguration->g_pass = VPX_RC_ONE_PASS;
+        // Maximum amount of bits that can be subtracted from the target
+        // bitrate - expressed as percentage of the target bitrate.
+        mCodecConfiguration->rc_undershoot_pct = 100;
+        // Maximum amount of bits that can be added to the target
+        // bitrate - expressed as percentage of the target bitrate.
+        mCodecConfiguration->rc_overshoot_pct = 15;
+        // Initial value of the buffer level in ms.
+        mCodecConfiguration->rc_buf_initial_sz = 500;
+        // Amount of data that the encoder should try to maintain in ms.
+        mCodecConfiguration->rc_buf_optimal_sz = 600;
+        // The amount of data that may be buffered by the decoding
+        // application in ms.
+        mCodecConfiguration->rc_buf_sz = 1000;
+        // Enable error resilience - needed for packet loss.
+        mCodecConfiguration->g_error_resilient = 1;
+        // Disable lagged encoding.
+        mCodecConfiguration->g_lag_in_frames = 0;
+        // Maximum key frame interval - for CBR boost to 3000
+        mCodecConfiguration->kf_max_dist = 3000;
+        // Encoder determines optimal key frame placement automatically.
+        mCodecConfiguration->kf_mode = VPX_KF_AUTO;
+    }
+
+    // Frames temporal pattern - for now WebRTC like pattern is only supported.
+    switch (mTemporalLayers) {
+        case 0:
+        {
+            mTemporalPatternLength = 0;
+            break;
+        }
+        case 1:
+        {
+            mCodecConfiguration->ts_number_layers = 1;
+            mCodecConfiguration->ts_rate_decimator[0] = 1;
+            mCodecConfiguration->ts_periodicity = 1;
+            mCodecConfiguration->ts_layer_id[0] = 0;
+            mTemporalPattern[0] = kTemporalUpdateLastRefAll;
+            mTemporalPatternLength = 1;
+            break;
+        }
+        case 2:
+        {
+            mCodecConfiguration->ts_number_layers = 2;
+            mCodecConfiguration->ts_rate_decimator[0] = 2;
+            mCodecConfiguration->ts_rate_decimator[1] = 1;
+            mCodecConfiguration->ts_periodicity = 2;
+            mCodecConfiguration->ts_layer_id[0] = 0;
+            mCodecConfiguration->ts_layer_id[1] = 1;
+            mTemporalPattern[0] = kTemporalUpdateLastAndGoldenRefAltRef;
+            mTemporalPattern[1] = kTemporalUpdateGoldenWithoutDependencyRefAltRef;
+            mTemporalPattern[2] = kTemporalUpdateLastRefAltRef;
+            mTemporalPattern[3] = kTemporalUpdateGoldenRefAltRef;
+            mTemporalPattern[4] = kTemporalUpdateLastRefAltRef;
+            mTemporalPattern[5] = kTemporalUpdateGoldenRefAltRef;
+            mTemporalPattern[6] = kTemporalUpdateLastRefAltRef;
+            mTemporalPattern[7] = kTemporalUpdateNone;
+            mTemporalPatternLength = 8;
+            break;
+        }
+        case 3:
+        {
+            mCodecConfiguration->ts_number_layers = 3;
+            mCodecConfiguration->ts_rate_decimator[0] = 4;
+            mCodecConfiguration->ts_rate_decimator[1] = 2;
+            mCodecConfiguration->ts_rate_decimator[2] = 1;
+            mCodecConfiguration->ts_periodicity = 4;
+            mCodecConfiguration->ts_layer_id[0] = 0;
+            mCodecConfiguration->ts_layer_id[1] = 2;
+            mCodecConfiguration->ts_layer_id[2] = 1;
+            mCodecConfiguration->ts_layer_id[3] = 2;
+            mTemporalPattern[0] = kTemporalUpdateLastAndGoldenRefAltRef;
+            mTemporalPattern[1] = kTemporalUpdateNoneNoRefGoldenRefAltRef;
+            mTemporalPattern[2] = kTemporalUpdateGoldenWithoutDependencyRefAltRef;
+            mTemporalPattern[3] = kTemporalUpdateNone;
+            mTemporalPattern[4] = kTemporalUpdateLastRefAltRef;
+            mTemporalPattern[5] = kTemporalUpdateNone;
+            mTemporalPattern[6] = kTemporalUpdateGoldenRefAltRef;
+            mTemporalPattern[7] = kTemporalUpdateNone;
+            mTemporalPatternLength = 8;
+            break;
+        }
+        default:
+        {
+            ALOGE("Wrong number of temporal layers %u", mTemporalLayers);
+            return UNKNOWN_ERROR;
+        }
+    }
+
+    // Set bitrate values for each layer
+    for (size_t i = 0; i < mCodecConfiguration->ts_number_layers; i++) {
+        mCodecConfiguration->ts_target_bitrate[i] =
+            mCodecConfiguration->rc_target_bitrate *
+            mTemporalLayerBitrateRatio[i] / 100;
+    }
+    if (mKeyFrameInterval > 0) {
+        mCodecConfiguration->kf_max_dist = mKeyFrameInterval;
+        mCodecConfiguration->kf_min_dist = mKeyFrameInterval;
+        mCodecConfiguration->kf_mode = VPX_KF_AUTO;
+    }
+    if (mMinQuantizer > 0) {
+        mCodecConfiguration->rc_min_quantizer = mMinQuantizer;
+    }
+    if (mMaxQuantizer > 0) {
+        mCodecConfiguration->rc_max_quantizer = mMaxQuantizer;
+    }
 
     codec_return = vpx_codec_enc_init(mCodecContext,
                                       mCodecInterface,
@@ -298,6 +420,33 @@
         return UNKNOWN_ERROR;
     }
 
+    // Extra CBR settings
+    if (mBitrateControlMode == VPX_CBR) {
+        codec_return = vpx_codec_control(mCodecContext,
+                                         VP8E_SET_STATIC_THRESHOLD,
+                                         1);
+        if (codec_return == VPX_CODEC_OK) {
+            uint32_t rc_max_intra_target =
+                mCodecConfiguration->rc_buf_optimal_sz * (mFramerate >> 17) / 10;
+            // Don't go below 3 times per frame bandwidth.
+            if (rc_max_intra_target < 300) {
+                rc_max_intra_target = 300;
+            }
+            codec_return = vpx_codec_control(mCodecContext,
+                                             VP8E_SET_MAX_INTRA_BITRATE_PCT,
+                                             rc_max_intra_target);
+        }
+        if (codec_return == VPX_CODEC_OK) {
+            codec_return = vpx_codec_control(mCodecContext,
+                                             VP8E_SET_CPUUSED,
+                                             -8);
+        }
+        if (codec_return != VPX_CODEC_OK) {
+            ALOGE("Error setting cbr parameters for vpx encoder.");
+            return UNKNOWN_ERROR;
+        }
+    }
+
     if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar || mInputDataIsMeta) {
         if (mConversionBuffer == NULL) {
             mConversionBuffer = (uint8_t *)malloc(mWidth * mHeight * 3 / 2);
@@ -361,9 +510,7 @@
                 }
 
                 formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
-                // Converting from microseconds
-                // Also converting to Q16 format
-                formatParams->xFramerate = (1000000/mFrameDurationUs) << 16;
+                formatParams->xFramerate = mFramerate;
                 return OMX_ErrorNone;
             } else if (formatParams->nPortIndex == kOutputPortIndex) {
                 formatParams->eCompressionFormat = OMX_VIDEO_CodingVP8;
@@ -411,6 +558,24 @@
                 return OMX_ErrorNone;
         }
 
+        case OMX_IndexParamVideoAndroidVp8Encoder: {
+            OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
+                (OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param;
+
+                if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
+                    return OMX_ErrorUnsupportedIndex;
+                }
+
+                vp8AndroidParams->nKeyFrameInterval = mKeyFrameInterval;
+                vp8AndroidParams->eTemporalPattern = mTemporalPatternType;
+                vp8AndroidParams->nTemporalLayerCount = mTemporalLayers;
+                vp8AndroidParams->nMinQuantizer = mMinQuantizer;
+                vp8AndroidParams->nMaxQuantizer = mMaxQuantizer;
+                memcpy(vp8AndroidParams->nTemporalLayerBitrateRatio,
+                       mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
+                return OMX_ErrorNone;
+        }
+
         case OMX_IndexParamVideoProfileLevelQuerySupported: {
             OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileAndLevel =
                 (OMX_VIDEO_PARAM_PROFILELEVELTYPE *)param;
@@ -497,11 +662,15 @@
             return internalSetVp8Params(
                 (const OMX_VIDEO_PARAM_VP8TYPE *)param);
 
+        case OMX_IndexParamVideoAndroidVp8Encoder:
+            return internalSetAndroidVp8Params(
+                (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
+
         case OMX_IndexParamVideoProfileLevelCurrent:
             return internalSetProfileLevel(
                 (const OMX_VIDEO_PARAM_PROFILELEVELTYPE *)param);
 
-        case OMX_IndexVendorStartUnused:
+        case kStoreMetaDataExtensionIndex:
         {
             // storeMetaDataInBuffers
             const StoreMetaDataInBuffersParams *storeParam =
@@ -610,6 +779,50 @@
     return OMX_ErrorNone;
 }
 
+OMX_ERRORTYPE SoftVPXEncoder::internalSetAndroidVp8Params(
+        const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams) {
+    if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+    if (vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternNone &&
+        vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+        return OMX_ErrorBadParameter;
+    }
+    if (vp8AndroidParams->nTemporalLayerCount > OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
+        return OMX_ErrorBadParameter;
+    }
+    if (vp8AndroidParams->nMinQuantizer > vp8AndroidParams->nMaxQuantizer) {
+        return OMX_ErrorBadParameter;
+    }
+
+    mTemporalPatternType = vp8AndroidParams->eTemporalPattern;
+    if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+        mTemporalLayers = vp8AndroidParams->nTemporalLayerCount;
+    } else if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternNone) {
+        mTemporalLayers = 0;
+    }
+    // Check the bitrate distribution between layers is in increasing order
+    if (mTemporalLayers > 1) {
+        for (size_t i = 0; i < mTemporalLayers - 1; i++) {
+            if (vp8AndroidParams->nTemporalLayerBitrateRatio[i + 1] <=
+                    vp8AndroidParams->nTemporalLayerBitrateRatio[i]) {
+                ALOGE("Wrong bitrate ratio - should be in increasing order.");
+                return OMX_ErrorBadParameter;
+            }
+        }
+    }
+    mKeyFrameInterval = vp8AndroidParams->nKeyFrameInterval;
+    mMinQuantizer = vp8AndroidParams->nMinQuantizer;
+    mMaxQuantizer = vp8AndroidParams->nMaxQuantizer;
+    memcpy(mTemporalLayerBitrateRatio, vp8AndroidParams->nTemporalLayerBitrateRatio,
+            sizeof(mTemporalLayerBitrateRatio));
+    ALOGD("VP8: internalSetAndroidVp8Params. BRMode: %u. TS: %zu. KF: %u."
+          " QP: %u - %u BR0: %u. BR1: %u. BR2: %u",
+          (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
+          mMinQuantizer, mMaxQuantizer, mTemporalLayerBitrateRatio[0],
+          mTemporalLayerBitrateRatio[1], mTemporalLayerBitrateRatio[2]);
+    return OMX_ErrorNone;
+}
 
 OMX_ERRORTYPE SoftVPXEncoder::internalSetFormatParams(
         const OMX_VIDEO_PARAM_PORTFORMATTYPE* format) {
@@ -660,9 +873,7 @@
         mHeight = port->format.video.nFrameHeight;
 
         // xFramerate comes in Q16 format, in frames per second unit
-        const uint32_t framerate = port->format.video.xFramerate >> 16;
-        // frame duration is in microseconds
-        mFrameDurationUs = (1000000/framerate);
+        mFramerate = port->format.video.xFramerate;
 
         if (port->format.video.eColorFormat == OMX_COLOR_FormatYUV420Planar ||
             port->format.video.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
@@ -675,7 +886,7 @@
         OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
         def->format.video.nFrameWidth = mWidth;
         def->format.video.nFrameHeight = mHeight;
-        def->format.video.xFramerate = port->format.video.xFramerate;
+        def->format.video.xFramerate = mFramerate;
         def->format.video.eColorFormat = mColorFormat;
         def = &editPortInfo(kOutputPortIndex)->mDef;
         def->format.video.nFrameWidth = mWidth;
@@ -684,6 +895,13 @@
         return OMX_ErrorNone;
     } else if (port->nPortIndex == kOutputPortIndex) {
         mBitrate = port->format.video.nBitrate;
+        mWidth = port->format.video.nFrameWidth;
+        mHeight = port->format.video.nFrameHeight;
+
+        OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kOutputPortIndex)->mDef;
+        def->format.video.nFrameWidth = mWidth;
+        def->format.video.nFrameHeight = mHeight;
+        def->format.video.nBitrate = mBitrate;
         return OMX_ErrorNone;
     } else {
         return OMX_ErrorBadPortIndex;
@@ -710,6 +928,74 @@
     return OMX_ErrorNone;
 }
 
+vpx_enc_frame_flags_t SoftVPXEncoder::getEncodeFlags() {
+    vpx_enc_frame_flags_t flags = 0;
+    int patternIdx = mTemporalPatternIdx % mTemporalPatternLength;
+    mTemporalPatternIdx++;
+    switch (mTemporalPattern[patternIdx]) {
+        case kTemporalUpdateLast:
+            flags |= VP8_EFLAG_NO_UPD_GF;
+            flags |= VP8_EFLAG_NO_UPD_ARF;
+            flags |= VP8_EFLAG_NO_REF_GF;
+            flags |= VP8_EFLAG_NO_REF_ARF;
+            break;
+        case kTemporalUpdateGoldenWithoutDependency:
+            flags |= VP8_EFLAG_NO_REF_GF;
+            // Deliberately no break here.
+        case kTemporalUpdateGolden:
+            flags |= VP8_EFLAG_NO_REF_ARF;
+            flags |= VP8_EFLAG_NO_UPD_ARF;
+            flags |= VP8_EFLAG_NO_UPD_LAST;
+            break;
+        case kTemporalUpdateAltrefWithoutDependency:
+            flags |= VP8_EFLAG_NO_REF_ARF;
+            flags |= VP8_EFLAG_NO_REF_GF;
+            // Deliberately no break here.
+        case kTemporalUpdateAltref:
+            flags |= VP8_EFLAG_NO_UPD_GF;
+            flags |= VP8_EFLAG_NO_UPD_LAST;
+            break;
+        case kTemporalUpdateNoneNoRefAltref:
+            flags |= VP8_EFLAG_NO_REF_ARF;
+            // Deliberately no break here.
+        case kTemporalUpdateNone:
+            flags |= VP8_EFLAG_NO_UPD_GF;
+            flags |= VP8_EFLAG_NO_UPD_ARF;
+            flags |= VP8_EFLAG_NO_UPD_LAST;
+            flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+            break;
+        case kTemporalUpdateNoneNoRefGoldenRefAltRef:
+            flags |= VP8_EFLAG_NO_REF_GF;
+            flags |= VP8_EFLAG_NO_UPD_GF;
+            flags |= VP8_EFLAG_NO_UPD_ARF;
+            flags |= VP8_EFLAG_NO_UPD_LAST;
+            flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+            break;
+        case kTemporalUpdateGoldenWithoutDependencyRefAltRef:
+            flags |= VP8_EFLAG_NO_REF_GF;
+            flags |= VP8_EFLAG_NO_UPD_ARF;
+            flags |= VP8_EFLAG_NO_UPD_LAST;
+            break;
+        case kTemporalUpdateLastRefAltRef:
+            flags |= VP8_EFLAG_NO_UPD_GF;
+            flags |= VP8_EFLAG_NO_UPD_ARF;
+            flags |= VP8_EFLAG_NO_REF_GF;
+            break;
+        case kTemporalUpdateGoldenRefAltRef:
+            flags |= VP8_EFLAG_NO_UPD_ARF;
+            flags |= VP8_EFLAG_NO_UPD_LAST;
+            break;
+        case kTemporalUpdateLastAndGoldenRefAltRef:
+            flags |= VP8_EFLAG_NO_UPD_ARF;
+            flags |= VP8_EFLAG_NO_REF_GF;
+            break;
+        case kTemporalUpdateLastRefAll:
+            flags |= VP8_EFLAG_NO_UPD_ARF;
+            flags |= VP8_EFLAG_NO_UPD_GF;
+            break;
+    }
+    return flags;
+}
 
 void SoftVPXEncoder::onQueueFilled(OMX_U32 portIndex) {
     // Initialize encoder if not already
@@ -794,6 +1080,9 @@
                      kInputBufferAlignment, source);
 
         vpx_enc_frame_flags_t flags = 0;
+        if (mTemporalPatternLength > 0) {
+            flags = getEncodeFlags();
+        }
         if (mKeyFrameRequested) {
             flags |= VPX_EFLAG_FORCE_KF;
             mKeyFrameRequested = false;
@@ -814,11 +1103,18 @@
             mBitrateUpdated = false;
         }
 
+        uint32_t frameDuration;
+        if (inputBufferHeader->nTimeStamp > mLastTimestamp) {
+            frameDuration = (uint32_t)(inputBufferHeader->nTimeStamp - mLastTimestamp);
+        } else {
+            frameDuration = (uint32_t)(((uint64_t)1000000 << 16) / mFramerate);
+        }
+        mLastTimestamp = inputBufferHeader->nTimeStamp;
         codec_return = vpx_codec_encode(
                 mCodecContext,
                 &raw_frame,
                 inputBufferHeader->nTimeStamp,  // in timebase units
-                mFrameDurationUs,  // frame duration in timebase units
+                frameDuration,  // frame duration in timebase units
                 flags,  // frame flags
                 VPX_DL_REALTIME);  // encoding deadline
         if (codec_return != VPX_CODEC_OK) {
@@ -860,10 +1156,9 @@
 OMX_ERRORTYPE SoftVPXEncoder::getExtensionIndex(
         const char *name, OMX_INDEXTYPE *index) {
     if (!strcmp(name, "OMX.google.android.index.storeMetaDataInBuffers")) {
-        *index = OMX_IndexVendorStartUnused;
+        *(int32_t*)index = kStoreMetaDataExtensionIndex;
         return OMX_ErrorNone;
     }
-
     return SimpleSoftOMXComponent::getExtensionIndex(name, index);
 }
 
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index 076830f..c5a83d1 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -91,6 +91,47 @@
             const char *name, OMX_INDEXTYPE *index);
 
 private:
+    enum {
+        kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1,
+    };
+
+    enum TemporalReferences {
+        // For 1 layer case: reference all (last, golden, and alt ref), but only
+        // update last.
+        kTemporalUpdateLastRefAll = 12,
+        // First base layer frame for 3 temporal layers, which updates last and
+        // golden with alt ref dependency.
+        kTemporalUpdateLastAndGoldenRefAltRef = 11,
+        // First enhancement layer with alt ref dependency.
+        kTemporalUpdateGoldenRefAltRef = 10,
+        // First enhancement layer with alt ref dependency.
+        kTemporalUpdateGoldenWithoutDependencyRefAltRef = 9,
+        // Base layer with alt ref dependency.
+        kTemporalUpdateLastRefAltRef = 8,
+        // Highest enhacement layer without dependency on golden with alt ref
+        // dependency.
+        kTemporalUpdateNoneNoRefGoldenRefAltRef = 7,
+        // Second layer and last frame in cycle, for 2 layers.
+        kTemporalUpdateNoneNoRefAltref = 6,
+        // Highest enhancement layer.
+        kTemporalUpdateNone = 5,
+        // Second enhancement layer.
+        kTemporalUpdateAltref = 4,
+        // Second enhancement layer without dependency on previous frames in
+        // the second enhancement layer.
+        kTemporalUpdateAltrefWithoutDependency = 3,
+        // First enhancement layer.
+        kTemporalUpdateGolden = 2,
+        // First enhancement layer without dependency on previous frames in
+        // the first enhancement layer.
+        kTemporalUpdateGoldenWithoutDependency = 1,
+        // Base layer.
+        kTemporalUpdateLast = 0,
+    };
+    enum {
+        kMaxTemporalPattern = 8
+    };
+
     // number of buffers allocated per port
     static const uint32_t kNumBuffers = 4;
 
@@ -130,16 +171,15 @@
     // Target bitrate set for the encoder, in bits per second.
     uint32_t mBitrate;
 
+    // Target framerate set for the encoder.
+    uint32_t mFramerate;
+
     // If a request for a change it bitrate has been received.
     bool mBitrateUpdated;
 
     // Bitrate control mode, either constant or variable
     vpx_rc_mode mBitrateControlMode;
 
-    // Frame duration is the reciprocal of framerate, denoted
-    // in microseconds
-    uint64_t mFrameDurationUs;
-
     // vp8 specific configuration parameter
     // that enables token partitioning of
     // the stream into substreams
@@ -160,6 +200,36 @@
     // something else.
     OMX_VIDEO_VP8LEVELTYPE mLevel;
 
+    // Key frame interval in frames
+    uint32_t mKeyFrameInterval;
+
+    // Minimum (best quality) quantizer
+    uint32_t mMinQuantizer;
+
+    // Maximum (worst quality) quantizer
+    uint32_t mMaxQuantizer;
+
+    // Number of coding temporal layers to be used.
+    size_t mTemporalLayers;
+
+    // Temporal layer bitrare ratio in percentage
+    uint32_t mTemporalLayerBitrateRatio[OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS];
+
+    // Temporal pattern type
+    OMX_VIDEO_ANDROID_VPXTEMPORALLAYERPATTERNTYPE mTemporalPatternType;
+
+    // Temporal pattern length
+    size_t mTemporalPatternLength;
+
+    // Temporal pattern current index
+    size_t mTemporalPatternIdx;
+
+    // Frame type temporal pattern
+    TemporalReferences mTemporalPattern[kMaxTemporalPattern];
+
+    // Last input buffer timestamp
+    OMX_TICKS mLastTimestamp;
+
     // Conversion buffer is needed to convert semi
     // planar yuv420 to planar format
     // It is only allocated if input format is
@@ -185,6 +255,9 @@
     // dtor.
     status_t releaseEncoder();
 
+    // Get current encode flags
+    vpx_enc_frame_flags_t getEncodeFlags();
+
     // Handles port changes with respect to color formats
     OMX_ERRORTYPE internalSetFormatParams(
         const OMX_VIDEO_PARAM_PORTFORMATTYPE* format);
@@ -206,6 +279,10 @@
     OMX_ERRORTYPE internalSetVp8Params(
         const OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
 
+    // Handles Android vp8 specific parameters.
+    OMX_ERRORTYPE internalSetAndroidVp8Params(
+        const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams);
+
     // Updates encoder profile
     OMX_ERRORTYPE internalSetProfileLevel(
         const OMX_VIDEO_PARAM_PROFILELEVELTYPE* profileAndLevel);
diff --git a/media/libstagefright/codecs/on2/h264dec/source/H264SwDecApi.c b/media/libstagefright/codecs/on2/h264dec/source/H264SwDecApi.c
index 2bb4c4d..524a3f0 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/H264SwDecApi.c
+++ b/media/libstagefright/codecs/on2/h264dec/source/H264SwDecApi.c
@@ -42,6 +42,8 @@
 #include "h264bsd_decoder.h"
 #include "h264bsd_util.h"
 
+#define UNUSED(x) (void)(x)
+
 /*------------------------------------------------------------------------------
        Version Information
 ------------------------------------------------------------------------------*/
@@ -73,6 +75,7 @@
 #endif
 
 void H264SwDecTrace(char *string) {
+    UNUSED(string);
 }
 
 void* H264SwDecMalloc(u32 size) {
diff --git a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_reconstruct.c b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_reconstruct.c
index c948776..b409a06 100755
--- a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_reconstruct.c
+++ b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_reconstruct.c
@@ -42,6 +42,8 @@
 #include "armVC.h"
 #endif /* H264DEC_OMXDL */
 
+#define UNUSED(x) (void)(x)
+
 /*------------------------------------------------------------------------------
     2. External compiler flags
 --------------------------------------------------------------------------------
@@ -2136,7 +2138,8 @@
   i32 center,
   i32 right)
 {
-
+    UNUSED(left);
+    UNUSED(right);
     ASSERT(ref);
     ASSERT(fill);
 
diff --git a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_slice_header.c b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_slice_header.c
index a7c6f64..23401c6 100755
--- a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_slice_header.c
+++ b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_slice_header.c
@@ -47,6 +47,8 @@
 #include "h264bsd_nal_unit.h"
 #include "h264bsd_dpb.h"
 
+#define UNUSED(x) (void)(x)
+
 /*------------------------------------------------------------------------------
     2. External compiler flags
 --------------------------------------------------------------------------------
@@ -1407,6 +1409,7 @@
     u32 tmp, value, i;
     i32 ivalue;
     strmData_t tmpStrmData[1];
+    UNUSED(nalUnitType);
 
 /* Code */
 
diff --git a/media/libstagefright/codecs/opus/Android.mk b/media/libstagefright/codecs/opus/Android.mk
new file mode 100644
index 0000000..365b179
--- /dev/null
+++ b/media/libstagefright/codecs/opus/Android.mk
@@ -0,0 +1,4 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
\ No newline at end of file
diff --git a/media/libstagefright/codecs/opus/dec/Android.mk b/media/libstagefright/codecs/opus/dec/Android.mk
new file mode 100644
index 0000000..2379c5f
--- /dev/null
+++ b/media/libstagefright/codecs/opus/dec/Android.mk
@@ -0,0 +1,19 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+        SoftOpus.cpp
+
+LOCAL_C_INCLUDES := \
+        external/libopus/include \
+        frameworks/av/media/libstagefright/include \
+        frameworks/native/include/media/openmax \
+
+LOCAL_SHARED_LIBRARIES := \
+        libopus libstagefright libstagefright_omx \
+        libstagefright_foundation libutils liblog
+
+LOCAL_MODULE := libstagefright_soft_opusdec
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
\ No newline at end of file
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
new file mode 100644
index 0000000..b8084ae
--- /dev/null
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -0,0 +1,540 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftOpus"
+#include <utils/Log.h>
+
+#include "SoftOpus.h"
+#include <OMX_AudioExt.h>
+#include <OMX_IndexExt.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+extern "C" {
+    #include <opus.h>
+    #include <opus_multistream.h>
+}
+
+namespace android {
+
+static const int kRate = 48000;
+
+template<class T>
+static void InitOMXParams(T *params) {
+    params->nSize = sizeof(T);
+    params->nVersion.s.nVersionMajor = 1;
+    params->nVersion.s.nVersionMinor = 0;
+    params->nVersion.s.nRevision = 0;
+    params->nVersion.s.nStep = 0;
+}
+
+SoftOpus::SoftOpus(
+        const char *name,
+        const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData,
+        OMX_COMPONENTTYPE **component)
+    : SimpleSoftOMXComponent(name, callbacks, appData, component),
+      mInputBufferCount(0),
+      mDecoder(NULL),
+      mHeader(NULL),
+      mCodecDelay(0),
+      mSeekPreRoll(0),
+      mAnchorTimeUs(0),
+      mNumFramesOutput(0),
+      mOutputPortSettingsChange(NONE) {
+    initPorts();
+    CHECK_EQ(initDecoder(), (status_t)OK);
+}
+
+SoftOpus::~SoftOpus() {
+    if (mDecoder != NULL) {
+        opus_multistream_decoder_destroy(mDecoder);
+        mDecoder = NULL;
+    }
+    if (mHeader != NULL) {
+        delete mHeader;
+        mHeader = NULL;
+    }
+}
+
+void SoftOpus::initPorts() {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+
+    def.nPortIndex = 0;
+    def.eDir = OMX_DirInput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = 960 * 6;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 1;
+
+    def.format.audio.cMIMEType =
+        const_cast<char *>(MEDIA_MIMETYPE_AUDIO_OPUS);
+
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding =
+        (OMX_AUDIO_CODINGTYPE)OMX_AUDIO_CodingAndroidOPUS;
+
+    addPort(def);
+
+    def.nPortIndex = 1;
+    def.eDir = OMX_DirOutput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = kMaxNumSamplesPerBuffer * sizeof(int16_t);
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 2;
+
+    def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+    addPort(def);
+}
+
+status_t SoftOpus::initDecoder() {
+    return OK;
+}
+
+OMX_ERRORTYPE SoftOpus::internalGetParameter(
+        OMX_INDEXTYPE index, OMX_PTR params) {
+    switch ((int)index) {
+        case OMX_IndexParamAudioAndroidOpus:
+        {
+            OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *opusParams =
+                (OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *)params;
+
+            if (opusParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            opusParams->nAudioBandWidth = 0;
+            opusParams->nSampleRate = kRate;
+            opusParams->nBitRate = 0;
+
+            if (!isConfigured()) {
+                opusParams->nChannels = 1;
+            } else {
+                opusParams->nChannels = mHeader->channels;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm:
+        {
+            OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            pcmParams->eNumData = OMX_NumericalDataSigned;
+            pcmParams->eEndian = OMX_EndianBig;
+            pcmParams->bInterleaved = OMX_TRUE;
+            pcmParams->nBitPerSample = 16;
+            pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+            pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
+            pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
+            pcmParams->nSamplingRate = kRate;
+
+            if (!isConfigured()) {
+                pcmParams->nChannels = 1;
+            } else {
+                pcmParams->nChannels = mHeader->channels;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalGetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftOpus::internalSetParameter(
+        OMX_INDEXTYPE index, const OMX_PTR params) {
+    switch ((int)index) {
+        case OMX_IndexParamStandardComponentRole:
+        {
+            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+                (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+            if (strncmp((const char *)roleParams->cRole,
+                        "audio_decoder.opus",
+                        OMX_MAX_STRINGNAME_SIZE - 1)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAndroidOpus:
+        {
+            const OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *opusParams =
+                (const OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *)params;
+
+            if (opusParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalSetParameter(index, params);
+    }
+}
+
+bool SoftOpus::isConfigured() const {
+    return mInputBufferCount >= 1;
+}
+
+static uint16_t ReadLE16(const uint8_t *data, size_t data_size,
+                         uint32_t read_offset) {
+    if (read_offset + 1 > data_size)
+        return 0;
+    uint16_t val;
+    val = data[read_offset];
+    val |= data[read_offset + 1] << 8;
+    return val;
+}
+
+// Opus uses Vorbis channel mapping, and Vorbis channel mapping specifies
+// mappings for up to 8 channels. This information is part of the Vorbis I
+// Specification:
+// http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html
+static const int kMaxChannels = 8;
+
+// Maximum packet size used in Xiph's opusdec.
+static const int kMaxOpusOutputPacketSizeSamples = 960 * 6;
+
+// Default audio output channel layout. Used to initialize |stream_map| in
+// OpusHeader, and passed to opus_multistream_decoder_create() when the header
+// does not contain mapping information. The values are valid only for mono and
+// stereo output: Opus streams with more than 2 channels require a stream map.
+static const int kMaxChannelsWithDefaultLayout = 2;
+static const uint8_t kDefaultOpusChannelLayout[kMaxChannelsWithDefaultLayout] = { 0, 1 };
+
+// Parses Opus Header. Header spec: http://wiki.xiph.org/OggOpus#ID_Header
+static bool ParseOpusHeader(const uint8_t *data, size_t data_size,
+                            OpusHeader* header) {
+    // Size of the Opus header excluding optional mapping information.
+    const size_t kOpusHeaderSize = 19;
+
+    // Offset to the channel count byte in the Opus header.
+    const size_t kOpusHeaderChannelsOffset = 9;
+
+    // Offset to the pre-skip value in the Opus header.
+    const size_t kOpusHeaderSkipSamplesOffset = 10;
+
+    // Offset to the gain value in the Opus header.
+    const size_t kOpusHeaderGainOffset = 16;
+
+    // Offset to the channel mapping byte in the Opus header.
+    const size_t kOpusHeaderChannelMappingOffset = 18;
+
+    // Opus Header contains a stream map. The mapping values are in the header
+    // beyond the always present |kOpusHeaderSize| bytes of data. The mapping
+    // data contains stream count, coupling information, and per channel mapping
+    // values:
+    //   - Byte 0: Number of streams.
+    //   - Byte 1: Number coupled.
+    //   - Byte 2: Starting at byte 2 are |header->channels| uint8 mapping
+    //             values.
+    const size_t kOpusHeaderNumStreamsOffset = kOpusHeaderSize;
+    const size_t kOpusHeaderNumCoupledOffset = kOpusHeaderNumStreamsOffset + 1;
+    const size_t kOpusHeaderStreamMapOffset = kOpusHeaderNumStreamsOffset + 2;
+
+    if (data_size < kOpusHeaderSize) {
+        ALOGV("Header size is too small.");
+        return false;
+    }
+    header->channels = *(data + kOpusHeaderChannelsOffset);
+
+    if (header->channels <= 0 || header->channels > kMaxChannels) {
+        ALOGV("Invalid Header, wrong channel count: %d", header->channels);
+        return false;
+    }
+    header->skip_samples = ReadLE16(data, data_size,
+                                        kOpusHeaderSkipSamplesOffset);
+    header->gain_db = static_cast<int16_t>(
+                              ReadLE16(data, data_size,
+                                       kOpusHeaderGainOffset));
+    header->channel_mapping = *(data + kOpusHeaderChannelMappingOffset);
+    if (!header->channel_mapping) {
+        if (header->channels > kMaxChannelsWithDefaultLayout) {
+            ALOGV("Invalid Header, missing stream map.");
+            return false;
+        }
+        header->num_streams = 1;
+        header->num_coupled = header->channels > 1;
+        header->stream_map[0] = 0;
+        header->stream_map[1] = 1;
+        return true;
+    }
+    if (data_size < kOpusHeaderStreamMapOffset + header->channels) {
+        ALOGV("Invalid stream map; insufficient data for current channel "
+              "count: %d", header->channels);
+        return false;
+    }
+    header->num_streams = *(data + kOpusHeaderNumStreamsOffset);
+    header->num_coupled = *(data + kOpusHeaderNumCoupledOffset);
+    if (header->num_streams + header->num_coupled != header->channels) {
+        ALOGV("Inconsistent channel mapping.");
+        return false;
+    }
+    for (int i = 0; i < header->channels; ++i)
+      header->stream_map[i] = *(data + kOpusHeaderStreamMapOffset + i);
+    return true;
+}
+
+// Convert nanoseconds to number of samples.
+static uint64_t ns_to_samples(uint64_t ns, int kRate) {
+    return static_cast<double>(ns) * kRate / 1000000000;
+}
+
+void SoftOpus::onQueueFilled(OMX_U32 portIndex) {
+    List<BufferInfo *> &inQueue = getPortQueue(0);
+    List<BufferInfo *> &outQueue = getPortQueue(1);
+
+    if (mOutputPortSettingsChange != NONE) {
+        return;
+    }
+
+    if (portIndex == 0 && mInputBufferCount < 3) {
+        BufferInfo *info = *inQueue.begin();
+        OMX_BUFFERHEADERTYPE *header = info->mHeader;
+
+        const uint8_t *data = header->pBuffer + header->nOffset;
+        size_t size = header->nFilledLen;
+
+        if (mInputBufferCount == 0) {
+            CHECK(mHeader == NULL);
+            mHeader = new OpusHeader();
+            memset(mHeader, 0, sizeof(*mHeader));
+            if (!ParseOpusHeader(data, size, mHeader)) {
+                ALOGV("Parsing Opus Header failed.");
+                notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                return;
+            }
+
+            uint8_t channel_mapping[kMaxChannels] = {0};
+            memcpy(&channel_mapping,
+                   kDefaultOpusChannelLayout,
+                   kMaxChannelsWithDefaultLayout);
+
+            int status = OPUS_INVALID_STATE;
+            mDecoder = opus_multistream_decoder_create(kRate,
+                                                       mHeader->channels,
+                                                       mHeader->num_streams,
+                                                       mHeader->num_coupled,
+                                                       channel_mapping,
+                                                       &status);
+            if (!mDecoder || status != OPUS_OK) {
+                ALOGV("opus_multistream_decoder_create failed status=%s",
+                      opus_strerror(status));
+                notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                return;
+            }
+            status =
+                opus_multistream_decoder_ctl(mDecoder,
+                                             OPUS_SET_GAIN(mHeader->gain_db));
+            if (status != OPUS_OK) {
+                ALOGV("Failed to set OPUS header gain; status=%s",
+                      opus_strerror(status));
+                notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                return;
+            }
+        } else if (mInputBufferCount == 1) {
+            mCodecDelay = ns_to_samples(
+                              *(reinterpret_cast<int64_t*>(header->pBuffer +
+                                                           header->nOffset)),
+                              kRate);
+            mSamplesToDiscard = mCodecDelay;
+        } else {
+            mSeekPreRoll = ns_to_samples(
+                               *(reinterpret_cast<int64_t*>(header->pBuffer +
+                                                            header->nOffset)),
+                               kRate);
+            notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+            mOutputPortSettingsChange = AWAITING_DISABLED;
+        }
+
+        inQueue.erase(inQueue.begin());
+        info->mOwnedByUs = false;
+        notifyEmptyBufferDone(header);
+        ++mInputBufferCount;
+        return;
+    }
+
+    while (!inQueue.empty() && !outQueue.empty()) {
+        BufferInfo *inInfo = *inQueue.begin();
+        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+        BufferInfo *outInfo = *outQueue.begin();
+        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+            inQueue.erase(inQueue.begin());
+            inInfo->mOwnedByUs = false;
+            notifyEmptyBufferDone(inHeader);
+
+            outHeader->nFilledLen = 0;
+            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+
+            outQueue.erase(outQueue.begin());
+            outInfo->mOwnedByUs = false;
+            notifyFillBufferDone(outHeader);
+            return;
+        }
+
+        if (inHeader->nOffset == 0) {
+            mAnchorTimeUs = inHeader->nTimeStamp;
+            mNumFramesOutput = 0;
+        }
+
+        // When seeking to zero, |mCodecDelay| samples has to be discarded
+        // instead of |mSeekPreRoll| samples (as we would when seeking to any
+        // other timestamp).
+        if (inHeader->nTimeStamp == 0) {
+            mSamplesToDiscard = mCodecDelay;
+        }
+
+        const uint8_t *data = inHeader->pBuffer + inHeader->nOffset;
+        const uint32_t size = inHeader->nFilledLen;
+
+        int numFrames = opus_multistream_decode(mDecoder,
+                                                data,
+                                                size,
+                                                (int16_t *)outHeader->pBuffer,
+                                                kMaxOpusOutputPacketSizeSamples,
+                                                0);
+        if (numFrames < 0) {
+            ALOGE("opus_multistream_decode returned %d", numFrames);
+            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+            return;
+        }
+
+        outHeader->nOffset = 0;
+        if (mSamplesToDiscard > 0) {
+            if (mSamplesToDiscard > numFrames) {
+                mSamplesToDiscard -= numFrames;
+                numFrames = 0;
+            } else {
+                numFrames -= mSamplesToDiscard;
+                outHeader->nOffset = mSamplesToDiscard * sizeof(int16_t) *
+                                     mHeader->channels;
+                mSamplesToDiscard = 0;
+            }
+        }
+
+        outHeader->nFilledLen = numFrames * sizeof(int16_t) * mHeader->channels;
+        outHeader->nFlags = 0;
+
+        outHeader->nTimeStamp = mAnchorTimeUs +
+                                (mNumFramesOutput * 1000000ll) /
+                                kRate;
+
+        mNumFramesOutput += numFrames;
+
+        inInfo->mOwnedByUs = false;
+        inQueue.erase(inQueue.begin());
+        inInfo = NULL;
+        notifyEmptyBufferDone(inHeader);
+        inHeader = NULL;
+
+        outInfo->mOwnedByUs = false;
+        outQueue.erase(outQueue.begin());
+        outInfo = NULL;
+        notifyFillBufferDone(outHeader);
+        outHeader = NULL;
+
+        ++mInputBufferCount;
+    }
+}
+
+void SoftOpus::onPortFlushCompleted(OMX_U32 portIndex) {
+    if (portIndex == 0 && mDecoder != NULL) {
+        // Make sure that the next buffer output does not still
+        // depend on fragments from the last one decoded.
+        mNumFramesOutput = 0;
+        opus_multistream_decoder_ctl(mDecoder, OPUS_RESET_STATE);
+        mAnchorTimeUs = 0;
+        mSamplesToDiscard = mSeekPreRoll;
+    }
+}
+
+void SoftOpus::onReset() {
+    mInputBufferCount = 0;
+    mNumFramesOutput = 0;
+    if (mDecoder != NULL) {
+        opus_multistream_decoder_destroy(mDecoder);
+        mDecoder = NULL;
+    }
+    if (mHeader != NULL) {
+        delete mHeader;
+        mHeader = NULL;
+    }
+
+    mOutputPortSettingsChange = NONE;
+}
+
+void SoftOpus::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
+    if (portIndex != 1) {
+        return;
+    }
+
+    switch (mOutputPortSettingsChange) {
+        case NONE:
+            break;
+
+        case AWAITING_DISABLED:
+        {
+            CHECK(!enabled);
+            mOutputPortSettingsChange = AWAITING_ENABLED;
+            break;
+        }
+
+        default:
+        {
+            CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
+            CHECK(enabled);
+            mOutputPortSettingsChange = NONE;
+            break;
+        }
+    }
+}
+
+}  // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+        const char *name, const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+    return new android::SoftOpus(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.h b/media/libstagefright/codecs/opus/dec/SoftOpus.h
new file mode 100644
index 0000000..97f6561
--- /dev/null
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * The Opus specification is part of IETF RFC 6716:
+ * http://tools.ietf.org/html/rfc6716
+ */
+
+#ifndef SOFT_OPUS_H_
+
+#define SOFT_OPUS_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+struct OpusMSDecoder;
+
+namespace android {
+
+struct OpusHeader {
+  int channels;
+  int skip_samples;
+  int channel_mapping;
+  int num_streams;
+  int num_coupled;
+  int16_t gain_db;
+  uint8_t stream_map[8];
+};
+
+struct SoftOpus : public SimpleSoftOMXComponent {
+    SoftOpus(const char *name,
+             const OMX_CALLBACKTYPE *callbacks,
+             OMX_PTR appData,
+             OMX_COMPONENTTYPE **component);
+
+protected:
+    virtual ~SoftOpus();
+
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR params);
+
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
+    virtual void onQueueFilled(OMX_U32 portIndex);
+    virtual void onPortFlushCompleted(OMX_U32 portIndex);
+    virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+    virtual void onReset();
+
+private:
+    enum {
+        kNumBuffers = 4,
+        kMaxNumSamplesPerBuffer = 960 * 6
+    };
+
+    size_t mInputBufferCount;
+
+    OpusMSDecoder *mDecoder;
+    OpusHeader *mHeader;
+
+    int64_t mCodecDelay;
+    int64_t mSeekPreRoll;
+    int64_t mSamplesToDiscard;
+    int64_t mAnchorTimeUs;
+    int64_t mNumFramesOutput;
+
+    enum {
+        NONE,
+        AWAITING_DISABLED,
+        AWAITING_ENABLED
+    } mOutputPortSettingsChange;
+
+    void initPorts();
+    status_t initDecoder();
+    bool isConfigured() const;
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftOpus);
+};
+
+}  // namespace android
+
+#endif  // SOFT_OPUS_H_
diff --git a/media/libstagefright/codecs/raw/Android.mk b/media/libstagefright/codecs/raw/Android.mk
index fe90a03..87080e7 100644
--- a/media/libstagefright/codecs/raw/Android.mk
+++ b/media/libstagefright/codecs/raw/Android.mk
@@ -8,6 +8,8 @@
         frameworks/av/media/libstagefright/include \
         frameworks/native/include/media/openmax
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_SHARED_LIBRARIES := \
         libstagefright_omx libstagefright_foundation libutils liblog
 
diff --git a/media/libstagefright/codecs/vorbis/dec/Android.mk b/media/libstagefright/codecs/vorbis/dec/Android.mk
index 2232353..217a6d2 100644
--- a/media/libstagefright/codecs/vorbis/dec/Android.mk
+++ b/media/libstagefright/codecs/vorbis/dec/Android.mk
@@ -16,4 +16,6 @@
 LOCAL_MODULE := libstagefright_soft_vorbisdec
 LOCAL_MODULE_TAGS := optional
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/data/media_codecs_google_audio.xml b/media/libstagefright/data/media_codecs_google_audio.xml
new file mode 100644
index 0000000..b1f93de
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_audio.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="OMX.google.mp3.decoder" type="audio/mpeg" />
+        <MediaCodec name="OMX.google.amrnb.decoder" type="audio/3gpp" />
+        <MediaCodec name="OMX.google.amrwb.decoder" type="audio/amr-wb" />
+        <MediaCodec name="OMX.google.aac.decoder" type="audio/mp4a-latm" />
+        <MediaCodec name="OMX.google.g711.alaw.decoder" type="audio/g711-alaw" />
+        <MediaCodec name="OMX.google.g711.mlaw.decoder" type="audio/g711-mlaw" />
+        <MediaCodec name="OMX.google.vorbis.decoder" type="audio/vorbis" />
+        <MediaCodec name="OMX.google.opus.decoder" type="audio/opus" />
+    </Decoders>
+
+    <Encoders>
+        <MediaCodec name="OMX.google.aac.encoder" type="audio/mp4a-latm" />
+        <MediaCodec name="OMX.google.amrnb.encoder" type="audio/3gpp" />
+        <MediaCodec name="OMX.google.amrwb.encoder" type="audio/amr-wb" />
+        <MediaCodec name="OMX.google.flac.encoder" type="audio/flac" />
+    </Encoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_telephony.xml b/media/libstagefright/data/media_codecs_google_telephony.xml
new file mode 100644
index 0000000..28f5ffc
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_telephony.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="OMX.google.gsm.decoder" type="audio/gsm" />
+    </Decoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
new file mode 100644
index 0000000..41e0efb
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es" />
+        <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp" />
+        <MediaCodec name="OMX.google.h264.decoder" type="video/avc" />
+        <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8" />
+        <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9" />
+    </Decoders>
+
+    <Encoders>
+        <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp" />
+        <MediaCodec name="OMX.google.h264.encoder" type="video/avc" />
+        <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es" />
+        <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8" />
+    </Encoders>
+</Included>
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index b6b21f1..f2d501e 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -20,6 +20,7 @@
 #include <stdlib.h>
 #include <string.h>
 
+#include <utils/String8.h>
 #include "ADebug.h"
 #include "AString.h"
 
@@ -48,6 +49,13 @@
     setTo(s, size);
 }
 
+AString::AString(const String8 &from)
+    : mData(NULL),
+      mSize(0),
+      mAllocSize(1) {
+    setTo(from.string(), from.length());
+}
+
 AString::AString(const AString &from)
     : mData(NULL),
       mSize(0),
diff --git a/media/libstagefright/foundation/base64.cpp b/media/libstagefright/foundation/base64.cpp
index d5fb4e0..dcf5bef 100644
--- a/media/libstagefright/foundation/base64.cpp
+++ b/media/libstagefright/foundation/base64.cpp
@@ -33,6 +33,10 @@
 
         if (n >= 2 && s.c_str()[n - 2] == '=') {
             padding = 2;
+
+            if (n >= 3 && s.c_str()[n - 3] == '=') {
+                padding = 3;
+            }
         }
     }
 
@@ -71,7 +75,7 @@
         if (((i + 1) % 4) == 0) {
             out[j++] = (accum >> 16);
 
-            if (j < outLen) { out[j++] = (accum >> 8) & 0xff; } 
+            if (j < outLen) { out[j++] = (accum >> 8) & 0xff; }
             if (j < outLen) { out[j++] = accum & 0xff; }
 
             accum = 0;
diff --git a/media/libstagefright/http/Android.mk b/media/libstagefright/http/Android.mk
new file mode 100644
index 0000000..7f3307d
--- /dev/null
+++ b/media/libstagefright/http/Android.mk
@@ -0,0 +1,28 @@
+LOCAL_PATH:= $(call my-dir)
+
+ifneq ($(TARGET_BUILD_PDK), true)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:=       \
+        HTTPHelper.cpp          \
+
+LOCAL_C_INCLUDES:= \
+	$(TOP)/frameworks/av/media/libstagefright \
+	$(TOP)/frameworks/native/include/media/openmax \
+	$(TOP)/frameworks/base/core/jni \
+
+LOCAL_SHARED_LIBRARIES := \
+	libstagefright liblog libutils libbinder libstagefright_foundation \
+        libandroid_runtime \
+        libmedia
+
+LOCAL_MODULE:= libstagefright_http_support
+
+LOCAL_CFLAGS += -Wno-multichar
+
+LOCAL_CFLAGS += -Werror
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif
diff --git a/media/libstagefright/http/HTTPHelper.cpp b/media/libstagefright/http/HTTPHelper.cpp
new file mode 100644
index 0000000..77845e2
--- /dev/null
+++ b/media/libstagefright/http/HTTPHelper.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "HTTPHelper"
+#include <utils/Log.h>
+
+#include "HTTPHelper.h"
+
+#include "android_runtime/AndroidRuntime.h"
+#include "android_util_Binder.h"
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <nativehelper/ScopedLocalRef.h>
+#include "jni.h"
+
+namespace android {
+
+sp<IMediaHTTPService> CreateHTTPServiceInCurrentJavaContext() {
+    if (AndroidRuntime::getJavaVM() == NULL) {
+        ALOGE("CreateHTTPServiceInCurrentJavaContext called outside "
+              "JAVA environment.");
+        return NULL;
+    }
+
+    JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+    ScopedLocalRef<jclass> clazz(
+            env, env->FindClass("android/media/MediaHTTPService"));
+    CHECK(clazz.get() != NULL);
+
+    jmethodID constructID = env->GetMethodID(clazz.get(), "<init>", "()V");
+    CHECK(constructID != NULL);
+
+    ScopedLocalRef<jobject> httpServiceObj(
+            env, env->NewObject(clazz.get(), constructID));
+
+    sp<IMediaHTTPService> httpService;
+    if (httpServiceObj.get() != NULL) {
+        jmethodID asBinderID =
+            env->GetMethodID(clazz.get(), "asBinder", "()Landroid/os/IBinder;");
+        CHECK(asBinderID != NULL);
+
+        ScopedLocalRef<jobject> httpServiceBinderObj(
+                env, env->CallObjectMethod(httpServiceObj.get(), asBinderID));
+        CHECK(httpServiceBinderObj.get() != NULL);
+
+        sp<IBinder> binder =
+            ibinderForJavaObject(env, httpServiceBinderObj.get());
+
+        httpService = interface_cast<IMediaHTTPService>(binder);
+    }
+
+    return httpService;
+}
+
+}  // namespace android
diff --git a/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches.h b/media/libstagefright/http/HTTPHelper.h
old mode 100755
new mode 100644
similarity index 65%
rename from libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches.h
rename to media/libstagefright/http/HTTPHelper.h
index 13cac6d..8aef115
--- a/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches.h
+++ b/media/libstagefright/http/HTTPHelper.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,13 +14,18 @@
  * limitations under the License.
  */
 
-#ifndef NXPSW_COMPILERSWITCHES_H
-#define NXPSW_COMPILERSWITCHES_H
+#ifndef HTTP_HELPER_H_
 
-/* ----- Main features ----- */
-#include "NXPSW_CompilerSwitches_MCS.h" /* Transcoder */
+#define HTTP_HELPER_H_
 
-/* ----- Add-ons ----- */
+#include <utils/RefBase.h>
 
-#endif /* NXPSW_COMPILERSWITCHES_H */
+namespace android {
 
+struct IMediaHTTPService;
+
+sp<IMediaHTTPService> CreateHTTPServiceInCurrentJavaContext();
+
+}  // namespace android
+
+#endif  // HTTP_HELPER_H_
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
new file mode 100644
index 0000000..2d29913
--- /dev/null
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaHTTP"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaHTTP.h>
+
+#include <binder/IServiceManager.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/Utils.h>
+
+#include <media/IMediaHTTPConnection.h>
+
+namespace android {
+
+MediaHTTP::MediaHTTP(const sp<IMediaHTTPConnection> &conn)
+    : mInitCheck(NO_INIT),
+      mHTTPConnection(conn),
+      mCachedSizeValid(false),
+      mCachedSize(0ll),
+      mDrmManagerClient(NULL) {
+    mInitCheck = OK;
+}
+
+MediaHTTP::~MediaHTTP() {
+    clearDRMState_l();
+}
+
+status_t MediaHTTP::connect(
+        const char *uri,
+        const KeyedVector<String8, String8> *headers,
+        off64_t /* offset */) {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    KeyedVector<String8, String8> extHeaders;
+    if (headers != NULL) {
+        extHeaders = *headers;
+    }
+    extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
+
+    bool success = mHTTPConnection->connect(uri, &extHeaders);
+
+    mLastHeaders = extHeaders;
+    mLastURI = uri;
+
+    mCachedSizeValid = false;
+
+    return success ? OK : UNKNOWN_ERROR;
+}
+
+void MediaHTTP::disconnect() {
+    if (mInitCheck != OK) {
+        return;
+    }
+
+    mHTTPConnection->disconnect();
+}
+
+status_t MediaHTTP::initCheck() const {
+    return mInitCheck;
+}
+
+ssize_t MediaHTTP::readAt(off64_t offset, void *data, size_t size) {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    int64_t startTimeUs = ALooper::GetNowUs();
+
+    size_t numBytesRead = 0;
+    while (numBytesRead < size) {
+        size_t copy = size - numBytesRead;
+
+        if (copy > 64 * 1024) {
+            // limit the buffer sizes transferred across binder boundaries
+            // to avoid spurious transaction failures.
+            copy = 64 * 1024;
+        }
+
+        ssize_t n = mHTTPConnection->readAt(
+                offset + numBytesRead, (uint8_t *)data + numBytesRead, copy);
+
+        if (n < 0) {
+            return n;
+        } else if (n == 0) {
+            break;
+        }
+
+        numBytesRead += n;
+    }
+
+    int64_t delayUs = ALooper::GetNowUs() - startTimeUs;
+
+    addBandwidthMeasurement(numBytesRead, delayUs);
+
+    return numBytesRead;
+}
+
+status_t MediaHTTP::getSize(off64_t *size) {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    // Caching the returned size so that it stays valid even after a
+    // disconnect. NuCachedSource2 relies on this.
+
+    if (!mCachedSizeValid) {
+        mCachedSize = mHTTPConnection->getSize();
+        mCachedSizeValid = true;
+    }
+
+    *size = mCachedSize;
+
+    return *size < 0 ? *size : OK;
+}
+
+uint32_t MediaHTTP::flags() {
+    return kWantsPrefetching | kIsHTTPBasedSource;
+}
+
+status_t MediaHTTP::reconnectAtOffset(off64_t offset) {
+    return connect(mLastURI.c_str(), &mLastHeaders, offset);
+}
+
+// DRM...
+
+sp<DecryptHandle> MediaHTTP::DrmInitialization(const char* mime) {
+    if (mDrmManagerClient == NULL) {
+        mDrmManagerClient = new DrmManagerClient();
+    }
+
+    if (mDrmManagerClient == NULL) {
+        return NULL;
+    }
+
+    if (mDecryptHandle == NULL) {
+        mDecryptHandle = mDrmManagerClient->openDecryptSession(
+                String8(mLastURI.c_str()), mime);
+    }
+
+    if (mDecryptHandle == NULL) {
+        delete mDrmManagerClient;
+        mDrmManagerClient = NULL;
+    }
+
+    return mDecryptHandle;
+}
+
+void MediaHTTP::getDrmInfo(
+        sp<DecryptHandle> &handle, DrmManagerClient **client) {
+    handle = mDecryptHandle;
+    *client = mDrmManagerClient;
+}
+
+String8 MediaHTTP::getUri() {
+    String8 uri;
+    if (OK == mHTTPConnection->getUri(&uri)) {
+        return uri;
+    }
+    return String8(mLastURI.c_str());
+}
+
+String8 MediaHTTP::getMIMEType() const {
+    if (mInitCheck != OK) {
+        return String8("application/octet-stream");
+    }
+
+    String8 mimeType;
+    status_t err = mHTTPConnection->getMIMEType(&mimeType);
+
+    if (err != OK) {
+        return String8("application/octet-stream");
+    }
+
+    return mimeType;
+}
+
+void MediaHTTP::clearDRMState_l() {
+    if (mDecryptHandle != NULL) {
+        // To release mDecryptHandle
+        CHECK(mDrmManagerClient);
+        mDrmManagerClient->closeDecryptSession(mDecryptHandle);
+        mDecryptHandle = NULL;
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/httplive/Android.mk b/media/libstagefright/httplive/Android.mk
index f3529f9..e8d558c 100644
--- a/media/libstagefright/httplive/Android.mk
+++ b/media/libstagefright/httplive/Android.mk
@@ -13,6 +13,8 @@
 	$(TOP)/frameworks/native/include/media/openmax \
 	$(TOP)/external/openssl/include
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_SHARED_LIBRARIES := \
         libbinder \
         libcrypto \
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 6d48ab7..08a146f 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -27,6 +27,8 @@
 #include "mpeg2ts/AnotherPacketSource.h"
 
 #include <cutils/properties.h>
+#include <media/IMediaHTTPConnection.h>
+#include <media/IMediaHTTPService.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -34,6 +36,7 @@
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaHTTP.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
 
@@ -47,17 +50,13 @@
 namespace android {
 
 LiveSession::LiveSession(
-        const sp<AMessage> &notify, uint32_t flags, bool uidValid, uid_t uid)
+        const sp<AMessage> &notify, uint32_t flags,
+        const sp<IMediaHTTPService> &httpService)
     : mNotify(notify),
       mFlags(flags),
-      mUIDValid(uidValid),
-      mUID(uid),
+      mHTTPService(httpService),
       mInPreparationPhase(true),
-      mHTTPDataSource(
-              HTTPBase::Create(
-                  (mFlags & kFlagIncognito)
-                    ? HTTPBase::kFlagIncognito
-                    : 0)),
+      mHTTPDataSource(new MediaHTTP(mHTTPService->makeHTTPConnection())),
       mPrevBandwidthIndex(-1),
       mStreamMask(0),
       mNewStreamMask(0),
@@ -70,9 +69,6 @@
       mSwitchInProgress(false),
       mDisconnectReplyID(0),
       mSeekReplyID(0) {
-    if (mUIDValid) {
-        mHTTPDataSource->setUID(mUID);
-    }
 
     mStreams[kAudioIndex] = StreamItem("audio");
     mStreams[kVideoIndex] = StreamItem("video");
@@ -481,11 +477,8 @@
         headers = NULL;
     }
 
-#if 1
-    ALOGI("onConnect <URL suppressed>");
-#else
-    ALOGI("onConnect %s", url.c_str());
-#endif
+    // TODO currently we don't know if we are coming here from incognito mode
+    ALOGI("onConnect %s", uriDebugString(url).c_str());
 
     mMasterURL = url;
 
@@ -493,7 +486,7 @@
     mPlaylist = fetchPlaylist(url.c_str(), NULL /* curPlaylistHash */, &dummy);
 
     if (mPlaylist == NULL) {
-        ALOGE("unable to fetch master playlist <URL suppressed>.");
+        ALOGE("unable to fetch master playlist %s.", uriDebugString(url).c_str());
 
         postPrepared(ERROR_IO);
         return;
@@ -680,7 +673,7 @@
 
     ssize_t bytesRead = 0;
     // adjust range_length if only reading partial block
-    if (block_size > 0 && (range_length == -1 || buffer->size() + block_size < range_length)) {
+    if (block_size > 0 && (range_length == -1 || (int64_t)(buffer->size() + block_size) < range_length)) {
         range_length = buffer->size() + block_size;
     }
     for (;;) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 3f8fee5..d7ed56f 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -28,6 +28,7 @@
 struct AnotherPacketSource;
 struct DataSource;
 struct HTTPBase;
+struct IMediaHTTPService;
 struct LiveDataSource;
 struct M3UParser;
 struct PlaylistFetcher;
@@ -40,7 +41,8 @@
     };
     LiveSession(
             const sp<AMessage> &notify,
-            uint32_t flags = 0, bool uidValid = false, uid_t uid = 0);
+            uint32_t flags,
+            const sp<IMediaHTTPService> &httpService);
 
     enum StreamIndex {
         kAudioIndex    = 0,
@@ -134,8 +136,7 @@
 
     sp<AMessage> mNotify;
     uint32_t mFlags;
-    bool mUIDValid;
-    uid_t mUID;
+    sp<IMediaHTTPService> mHTTPService;
 
     bool mInPreparationPhase;
 
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 20c3a76..785c515 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -170,14 +170,14 @@
             ALOGE("track %zu already selected", index);
             return BAD_VALUE;
         }
-        ALOGV("selected track %d", index);
+        ALOGV("selected track %zu", index);
         mSelectedIndex = index;
     } else {
         if (mSelectedIndex != (ssize_t)index) {
             ALOGE("track %zu is not selected", index);
             return BAD_VALUE;
         }
-        ALOGV("unselected track %d", index);
+        ALOGV("unselected track %zu", index);
         mSelectedIndex = -1;
     }
 
@@ -798,7 +798,8 @@
                 if (MakeURL(baseURI.c_str(), val.c_str(), &absURI)) {
                     val = absURI;
                 } else {
-                    ALOGE("failed to make absolute url for <URL suppressed>.");
+                    ALOGE("failed to make absolute url for %s.",
+                            uriDebugString(baseURI).c_str());
                 }
             }
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 513f114..c34f3cb 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -317,7 +317,7 @@
         maxDelayUs = minDelayUs;
     }
     if (delayUs > maxDelayUs) {
-        ALOGV("Need to refresh playlist in %lld", maxDelayUs);
+        ALOGV("Need to refresh playlist in %" PRId64 , maxDelayUs);
         delayUs = maxDelayUs;
     }
     sp<AMessage> msg = new AMessage(kWhatMonitorQueue, id());
@@ -628,7 +628,7 @@
 
             int64_t bufferedStreamDurationUs =
                 mPacketSources.valueAt(i)->getBufferedDurationUs(&finalResult);
-            ALOGV("buffered %lld for stream %d",
+            ALOGV("buffered %" PRId64 " for stream %d",
                     bufferedStreamDurationUs, mPacketSources.keyAt(i));
             if (bufferedStreamDurationUs > bufferedDurationUs) {
                 bufferedDurationUs = bufferedStreamDurationUs;
@@ -641,7 +641,7 @@
     if (!mPrepared && bufferedDurationUs > targetDurationUs && downloadMore) {
         mPrepared = true;
 
-        ALOGV("prepared, buffered=%lld > %lld",
+        ALOGV("prepared, buffered=%" PRId64 " > %" PRId64 "",
                 bufferedDurationUs, targetDurationUs);
         sp<AMessage> msg = mNotify->dup();
         msg->setInt32("what", kWhatTemporarilyDoneFetching);
@@ -649,7 +649,7 @@
     }
 
     if (finalResult == OK && downloadMore) {
-        ALOGV("monitoring, buffered=%lld < %lld",
+        ALOGV("monitoring, buffered=%" PRId64 " < %" PRId64 "",
                 bufferedDurationUs, durationToBufferUs);
         // delay the next download slightly; hopefully this gives other concurrent fetchers
         // a better chance to run.
@@ -665,7 +665,7 @@
         msg->post();
 
         int64_t delayUs = mPrepared ? kMaxMonitorDelayUs : targetDurationUs / 2;
-        ALOGV("pausing for %lld, buffered=%lld > %lld",
+        ALOGV("pausing for %" PRId64 ", buffered=%" PRId64 " > %" PRId64 "",
                 delayUs, bufferedDurationUs, durationToBufferUs);
         // :TRICKY: need to enforce minimum delay because the delay to
         // refresh the playlist will become 0
@@ -739,7 +739,7 @@
 
         if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
             mSeqNumber = getSeqNumberForTime(mStartTimeUs);
-            ALOGV("Initial sequence number for time %lld is %ld from (%ld .. %ld)",
+            ALOGV("Initial sequence number for time %" PRId64 " is %d from (%d .. %d)",
                     mStartTimeUs, mSeqNumber, firstSeqNumberInPlaylist,
                     lastSeqNumberInPlaylist);
         } else {
@@ -748,7 +748,7 @@
             if (mSeqNumber < firstSeqNumberInPlaylist) {
                 mSeqNumber = firstSeqNumberInPlaylist;
             }
-            ALOGV("Initial sequence number for live event %ld from (%ld .. %ld)",
+            ALOGV("Initial sequence number for live event %d from (%d .. %d)",
                     mSeqNumber, firstSeqNumberInPlaylist,
                     lastSeqNumberInPlaylist);
         }
@@ -772,7 +772,8 @@
                 if (delayUs > kMaxMonitorDelayUs) {
                     delayUs = kMaxMonitorDelayUs;
                 }
-                ALOGV("sequence number high: %ld from (%ld .. %ld), monitor in %lld (retry=%d)",
+                ALOGV("sequence number high: %d from (%d .. %d), "
+                      "monitor in %" PRId64 " (retry=%d)",
                         mSeqNumber, firstSeqNumberInPlaylist,
                         lastSeqNumberInPlaylist, delayUs, mNumRetries);
                 postMonitorQueue(delayUs);
@@ -915,6 +916,7 @@
 
         if (err == -EAGAIN) {
             // bad starting sequence number hint
+            mTSParser.clear();
             postMonitorQueue();
             return;
         }
diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk
index bf6f7bb..2194c38 100644
--- a/media/libstagefright/id3/Android.mk
+++ b/media/libstagefright/id3/Android.mk
@@ -4,6 +4,8 @@
 LOCAL_SRC_FILES := \
 	ID3.cpp
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_MODULE := libstagefright_id3
 
 include $(BUILD_STATIC_LIBRARY)
@@ -15,6 +17,8 @@
 LOCAL_SRC_FILES := \
 	testid3.cpp
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright libutils liblog libbinder libstagefright_foundation
 
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 1199c22..7f221a0 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -468,49 +468,6 @@
     }
 }
 
-static void convertISO8859ToString8(
-        const uint8_t *data, size_t size,
-        String8 *s) {
-    size_t utf8len = 0;
-    for (size_t i = 0; i < size; ++i) {
-        if (data[i] == '\0') {
-            size = i;
-            break;
-        } else if (data[i] < 0x80) {
-            ++utf8len;
-        } else {
-            utf8len += 2;
-        }
-    }
-
-    if (utf8len == size) {
-        // Only ASCII characters present.
-
-        s->setTo((const char *)data, size);
-        return;
-    }
-
-    char *tmp = new char[utf8len];
-    char *ptr = tmp;
-    for (size_t i = 0; i < size; ++i) {
-        if (data[i] == '\0') {
-            break;
-        } else if (data[i] < 0x80) {
-            *ptr++ = data[i];
-        } else if (data[i] < 0xc0) {
-            *ptr++ = 0xc2;
-            *ptr++ = data[i];
-        } else {
-            *ptr++ = 0xc3;
-            *ptr++ = data[i] - 64;
-        }
-    }
-
-    s->setTo(tmp, utf8len);
-
-    delete[] tmp;
-    tmp = NULL;
-}
 
 // the 2nd argument is used to get the data following the \0 in a comment field
 void ID3::Iterator::getString(String8 *id, String8 *comment) const {
@@ -543,7 +500,9 @@
             return;
         }
 
-        convertISO8859ToString8(frameData, mFrameSize, id);
+        // this is supposed to be ISO-8859-1, but pass it up as-is to the caller, who will figure
+        // out the real encoding
+        id->setTo((const char*)frameData, mFrameSize);
         return;
     }
 
@@ -561,13 +520,13 @@
     }
 
     if (encoding == 0x00) {
-        // ISO 8859-1
-        convertISO8859ToString8(frameData + 1, n, id);
+        // supposedly ISO 8859-1
+        id->setTo((const char*)frameData + 1, n);
     } else if (encoding == 0x03) {
-        // UTF-8
+        // supposedly UTF-8
         id->setTo((const char *)(frameData + 1), n);
     } else if (encoding == 0x02) {
-        // UTF-16 BE, no byte order mark.
+        // supposedly UTF-16 BE, no byte order mark.
         // API wants number of characters, not number of bytes...
         int len = n / 2;
         const char16_t *framedata = (const char16_t *) (frameData + 1);
@@ -583,7 +542,7 @@
         if (framedatacopy != NULL) {
             delete[] framedatacopy;
         }
-    } else {
+    } else if (encoding == 0x01) {
         // UCS-2
         // API wants number of characters, not number of bytes...
         int len = n / 2;
@@ -602,7 +561,27 @@
             framedata++;
             len--;
         }
-        id->setTo(framedata, len);
+
+        // check if the resulting data consists entirely of 8-bit values
+        bool eightBit = true;
+        for (int i = 0; i < len; i++) {
+            if (framedata[i] > 0xff) {
+                eightBit = false;
+                break;
+            }
+        }
+        if (eightBit) {
+            // collapse to 8 bit, then let the media scanner client figure out the real encoding
+            char *frame8 = new char[len];
+            for (int i = 0; i < len; i++) {
+                frame8[i] = framedata[i];
+            }
+            id->setTo(frame8, len);
+            delete [] frame8;
+        } else {
+            id->setTo(framedata, len);
+        }
+
         if (framedatacopy != NULL) {
             delete[] framedatacopy;
         }
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 271df8e..a81bbba 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -63,6 +63,7 @@
     void setUID(uid_t uid);
 
     status_t setDataSource(
+            const sp<IMediaHTTPService> &httpService,
             const char *uri,
             const KeyedVector<String8, String8> *headers = NULL);
 
@@ -159,6 +160,7 @@
     SystemTimeSource mSystemTimeSource;
     TimeSource *mTimeSource;
 
+    sp<IMediaHTTPService> mHTTPService;
     String8 mUri;
     KeyedVector<String8, String8> mUriHeaders;
 
@@ -247,6 +249,7 @@
     sp<MediaExtractor> mExtractor;
 
     status_t setDataSource_l(
+            const sp<IMediaHTTPService> &httpService,
             const char *uri,
             const KeyedVector<String8, String8> *headers = NULL);
 
diff --git a/media/libstagefright/include/ChromiumHTTPDataSource.h b/media/libstagefright/include/ChromiumHTTPDataSource.h
deleted file mode 100644
index da188dd..0000000
--- a/media/libstagefright/include/ChromiumHTTPDataSource.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef CHROME_HTTP_DATA_SOURCE_H_
-
-#define CHROME_HTTP_DATA_SOURCE_H_
-
-#include <media/stagefright/foundation/AString.h>
-#include <utils/threads.h>
-
-#include "HTTPBase.h"
-
-namespace android {
-
-struct SfDelegate;
-
-struct ChromiumHTTPDataSource : public HTTPBase {
-    ChromiumHTTPDataSource(uint32_t flags = 0);
-
-    virtual status_t connect(
-            const char *uri,
-            const KeyedVector<String8, String8> *headers = NULL,
-            off64_t offset = 0);
-
-    virtual void disconnect();
-
-    virtual status_t initCheck() const;
-
-    virtual ssize_t readAt(off64_t offset, void *data, size_t size);
-    virtual status_t getSize(off64_t *size);
-    virtual uint32_t flags();
-
-    virtual sp<DecryptHandle> DrmInitialization(const char *mime);
-
-    virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
-
-    virtual String8 getUri();
-
-    virtual String8 getMIMEType() const;
-
-    virtual status_t reconnectAtOffset(off64_t offset);
-
-    static status_t UpdateProxyConfig(
-            const char *host, int32_t port, const char *exclusionList);
-
-protected:
-    virtual ~ChromiumHTTPDataSource();
-
-private:
-    friend struct SfDelegate;
-
-    enum State {
-        DISCONNECTED,
-        CONNECTING,
-        CONNECTED,
-        READING,
-        DISCONNECTING
-    };
-
-    const uint32_t mFlags;
-
-    mutable Mutex mLock;
-    Condition mCondition;
-
-    State mState;
-
-    SfDelegate *mDelegate;
-
-    AString mURI;
-    KeyedVector<String8, String8> mHeaders;
-
-    off64_t mCurrentOffset;
-
-    // Any connection error or the result of a read operation
-    // (for the lattter this is the number of bytes read, if successful).
-    ssize_t mIOResult;
-
-    int64_t mContentSize;
-
-    String8 mContentType;
-
-    sp<DecryptHandle> mDecryptHandle;
-    DrmManagerClient *mDrmManagerClient;
-
-    void disconnect_l();
-
-    status_t connect_l(
-            const char *uri,
-            const KeyedVector<String8, String8> *headers,
-            off64_t offset);
-
-    static void InitiateRead(
-            ChromiumHTTPDataSource *me, void *data, size_t size);
-
-    void initiateRead(void *data, size_t size);
-
-    void onConnectionEstablished(
-            int64_t contentSize, const char *contentType);
-
-    void onConnectionFailed(status_t err);
-    void onReadCompleted(ssize_t size);
-    void onDisconnectComplete();
-    void onRedirect(const char *url);
-
-    void clearDRMState_l();
-
-    DISALLOW_EVIL_CONSTRUCTORS(ChromiumHTTPDataSource);
-};
-
-}  // namespace android
-
-#endif  // CHROME_HTTP_DATA_SOURCE_H_
diff --git a/media/libstagefright/include/FragmentedMP4Parser.h b/media/libstagefright/include/FragmentedMP4Parser.h
deleted file mode 100644
index dbe02b8..0000000
--- a/media/libstagefright/include/FragmentedMP4Parser.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef PARSER_H_
-
-#define PARSER_H_
-
-#include <media/stagefright/foundation/AHandler.h>
-#include <media/stagefright/DataSource.h>
-#include <utils/Vector.h>
-
-namespace android {
-
-struct ABuffer;
-
-struct FragmentedMP4Parser : public AHandler {
-    struct Source : public RefBase {
-        Source() {}
-
-        virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
-        virtual bool isSeekable() = 0;
-
-        protected:
-        virtual ~Source() {}
-
-        private:
-        DISALLOW_EVIL_CONSTRUCTORS(Source);
-    };
-
-    FragmentedMP4Parser();
-
-    void start(const char *filename);
-    void start(const sp<Source> &source);
-    void start(sp<DataSource> &source);
-
-    sp<AMessage> getFormat(bool audio, bool synchronous = false);
-    status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit, bool synchronous = false);
-    status_t seekTo(bool audio, int64_t timeUs);
-    bool isSeekable() const;
-
-    virtual void onMessageReceived(const sp<AMessage> &msg);
-
-protected:
-    virtual ~FragmentedMP4Parser();
-
-private:
-    enum {
-        kWhatStart,
-        kWhatProceed,
-        kWhatReadMore,
-        kWhatGetFormat,
-        kWhatDequeueAccessUnit,
-        kWhatSeekTo,
-    };
-
-    struct TrackFragment;
-    struct DynamicTrackFragment;
-    struct StaticTrackFragment;
-
-    struct DispatchEntry {
-        uint32_t mType;
-        uint32_t mParentType;
-        status_t (FragmentedMP4Parser::*mHandler)(uint32_t, size_t, uint64_t);
-    };
-
-    struct Container {
-        uint64_t mOffset;
-        uint64_t mBytesRemaining;
-        uint32_t mType;
-        bool mExtendsToEOF;
-    };
-
-    struct SampleDescription {
-        uint32_t mType;
-        uint16_t mDataRefIndex;
-
-        sp<AMessage> mFormat;
-    };
-
-    struct SampleInfo {
-        off64_t mOffset;
-        size_t mSize;
-        uint32_t mPresentationTime;
-        size_t mSampleDescIndex;
-        uint32_t mFlags;
-    };
-
-    struct MediaDataInfo {
-        sp<ABuffer> mBuffer;
-        off64_t mOffset;
-    };
-
-    struct SidxEntry {
-        size_t mSize;
-        uint32_t mDurationUs;
-    };
-
-    struct TrackInfo {
-        enum Flags {
-            kTrackEnabled     = 0x01,
-            kTrackInMovie     = 0x02,
-            kTrackInPreview   = 0x04,
-        };
-
-        uint32_t mTrackID;
-        uint32_t mFlags;
-        uint32_t mDuration;  // This is the duration in terms of movie timescale!
-        uint64_t mSidxDuration; // usec, from sidx box, which can use a different timescale
-
-        uint32_t mMediaTimeScale;
-
-        uint32_t mMediaHandlerType;
-        Vector<SampleDescription> mSampleDescs;
-
-        // from track extends:
-        uint32_t mDefaultSampleDescriptionIndex;
-        uint32_t mDefaultSampleDuration;
-        uint32_t mDefaultSampleSize;
-        uint32_t mDefaultSampleFlags;
-
-        uint32_t mDecodingTime;
-
-        Vector<SidxEntry> mSidx;
-        sp<StaticTrackFragment> mStaticFragment;
-        List<sp<TrackFragment> > mFragments;
-    };
-
-    struct TrackFragmentHeaderInfo {
-        enum Flags {
-            kBaseDataOffsetPresent         = 0x01,
-            kSampleDescriptionIndexPresent = 0x02,
-            kDefaultSampleDurationPresent  = 0x08,
-            kDefaultSampleSizePresent      = 0x10,
-            kDefaultSampleFlagsPresent     = 0x20,
-            kDurationIsEmpty               = 0x10000,
-        };
-
-        uint32_t mTrackID;
-        uint32_t mFlags;
-        uint64_t mBaseDataOffset;
-        uint32_t mSampleDescriptionIndex;
-        uint32_t mDefaultSampleDuration;
-        uint32_t mDefaultSampleSize;
-        uint32_t mDefaultSampleFlags;
-
-        uint64_t mDataOffset;
-    };
-
-    static const DispatchEntry kDispatchTable[];
-
-    sp<Source> mSource;
-    off_t mBufferPos;
-    bool mSuspended;
-    bool mDoneWithMoov;
-    off_t mFirstMoofOffset; // used as the starting point for offsets calculated from the sidx box
-    sp<ABuffer> mBuffer;
-    Vector<Container> mStack;
-    KeyedVector<uint32_t, TrackInfo> mTracks;  // TrackInfo by trackID
-    Vector<MediaDataInfo> mMediaData;
-
-    uint32_t mCurrentTrackID;
-
-    status_t mFinalResult;
-
-    TrackFragmentHeaderInfo mTrackFragmentHeaderInfo;
-
-    status_t onProceed();
-    status_t onDequeueAccessUnit(size_t trackIndex, sp<ABuffer> *accessUnit);
-    status_t onSeekTo(bool wantAudio, int64_t position);
-
-    void enter(off64_t offset, uint32_t type, uint64_t size);
-
-    uint16_t readU16(size_t offset);
-    uint32_t readU32(size_t offset);
-    uint64_t readU64(size_t offset);
-    void skip(off_t distance);
-    status_t need(size_t size);
-    bool fitsContainer(uint64_t size) const;
-
-    status_t parseTrackHeader(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseMediaHeader(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseMediaHandler(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseTrackExtends(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseTrackFragmentHeader(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseTrackFragmentRun(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseVisualSampleEntry(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseAudioSampleEntry(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseSampleSizes(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseCompactSampleSizes(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseSampleToChunk(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseChunkOffsets(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseChunkOffsets64(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseAVCCodecSpecificData(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseESDSCodecSpecificData(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseMediaData(
-            uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseSegmentIndex(
-            uint32_t type, size_t offset, uint64_t size);
-
-    TrackInfo *editTrack(uint32_t trackID, bool createIfNecessary = false);
-
-    ssize_t findTrack(bool wantAudio) const;
-
-    status_t makeAccessUnit(
-            TrackInfo *info,
-            const SampleInfo &sample,
-            const MediaDataInfo &mdatInfo,
-            sp<ABuffer> *accessUnit);
-
-    status_t getSample(
-            TrackInfo *info,
-            sp<TrackFragment> *fragment,
-            SampleInfo *sampleInfo);
-
-    static int CompareSampleLocation(
-        const SampleInfo &sample, const MediaDataInfo &mdatInfo);
-
-    void resumeIfNecessary();
-
-    void copyBuffer(
-            sp<ABuffer> *dst,
-            size_t offset, uint64_t size) const;
-
-    DISALLOW_EVIL_CONSTRUCTORS(FragmentedMP4Parser);
-};
-
-}  // namespace android
-
-#endif  // PARSER_H_
-
diff --git a/media/libstagefright/include/HTTPBase.h b/media/libstagefright/include/HTTPBase.h
index d4b7f9f..1c3cd5e 100644
--- a/media/libstagefright/include/HTTPBase.h
+++ b/media/libstagefright/include/HTTPBase.h
@@ -48,14 +48,6 @@
 
     virtual status_t setBandwidthStatCollectFreq(int32_t freqMs);
 
-    static status_t UpdateProxyConfig(
-            const char *host, int32_t port, const char *exclusionList);
-
-    void setUID(uid_t uid);
-    bool getUID(uid_t *uid) const;
-
-    static sp<HTTPBase> Create(uint32_t flags = 0);
-
     static void RegisterSocketUserTag(int sockfd, uid_t uid, uint32_t kTag);
     static void UnRegisterSocketUserTag(int sockfd);
 
@@ -87,9 +79,6 @@
     int32_t mPrevEstimatedBandWidthKbps;
     int32_t mBandWidthCollectFreqMs;
 
-    bool mUIDValid;
-    uid_t mUID;
-
     DISALLOW_EVIL_CONSTRUCTORS(HTTPBase);
 };
 
diff --git a/media/libstagefright/include/SDPLoader.h b/media/libstagefright/include/SDPLoader.h
index ca59dc0..2c4f543 100644
--- a/media/libstagefright/include/SDPLoader.h
+++ b/media/libstagefright/include/SDPLoader.h
@@ -25,6 +25,7 @@
 namespace android {
 
 struct HTTPBase;
+struct IMediaHTTPService;
 
 struct SDPLoader : public AHandler {
     enum Flags {
@@ -34,7 +35,10 @@
     enum {
         kWhatSDPLoaded = 'sdpl'
     };
-    SDPLoader(const sp<AMessage> &notify, uint32_t flags = 0, bool uidValid = false, uid_t uid = 0);
+    SDPLoader(
+            const sp<AMessage> &notify,
+            uint32_t flags,
+            const sp<IMediaHTTPService> &httpService);
 
     void load(const char* url, const KeyedVector<String8, String8> *headers);
 
@@ -55,8 +59,6 @@
     sp<AMessage> mNotify;
     const char* mUrl;
     uint32_t mFlags;
-    bool mUIDValid;
-    uid_t mUID;
     sp<ALooper> mNetLooper;
     bool mCancelled;
 
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index b02ed0e..6632c27 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -33,6 +33,7 @@
     virtual ~StagefrightMetadataRetriever();
 
     virtual status_t setDataSource(
+            const sp<IMediaHTTPService> &httpService,
             const char *url,
             const KeyedVector<String8, String8> *headers);
 
diff --git a/media/libstagefright/include/chromium_http_stub.h b/media/libstagefright/include/chromium_http_stub.h
deleted file mode 100644
index e0651a4..0000000
--- a/media/libstagefright/include/chromium_http_stub.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef CHROMIUM_HTTP_STUB_H_
-#define CHROMIUM_HTTP_STUB_H_
-
-#include <include/HTTPBase.h>
-#include <media/stagefright/DataSource.h>
-
-namespace android {
-extern "C" {
-HTTPBase *createChromiumHTTPDataSource(uint32_t flags);
-
-status_t UpdateChromiumHTTPDataSourceProxyConfig(
-        const char *host, int32_t port, const char *exclusionList);
-
-DataSource *createDataUriSource(const char *uri);
-}
-}
-
-#endif
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index 0e4dd2b..d7bec59 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -315,7 +315,7 @@
 
     *actualFrameTimeUs = -1ll;
 
-    const int64_t seekTimeNs = seekTimeUs * 1000ll;
+    const int64_t seekTimeNs = seekTimeUs * 1000ll - mExtractor->mSeekPreRollNs;
 
     mkvparser::Segment* const pSegment = mExtractor->mSegment;
 
@@ -630,7 +630,8 @@
       mReader(new DataSourceReader(mDataSource)),
       mSegment(NULL),
       mExtractedThumbnails(false),
-      mIsWebm(false) {
+      mIsWebm(false),
+      mSeekPreRollNs(0) {
     off64_t size;
     mIsLiveStreaming =
         (mDataSource->flags()
@@ -656,14 +657,22 @@
         return;
     }
 
+    // from mkvparser::Segment::Load(), but stop at first cluster
     ret = mSegment->ParseHeaders();
-    CHECK_EQ(ret, 0);
-
-    long len;
-    ret = mSegment->LoadCluster(pos, len);
-    CHECK_EQ(ret, 0);
+    if (ret == 0) {
+        long len;
+        ret = mSegment->LoadCluster(pos, len);
+        if (ret >= 1) {
+            // no more clusters
+            ret = 0;
+        }
+    } else if (ret > 0) {
+        ret = mkvparser::E_BUFFER_NOT_FULL;
+    }
 
     if (ret < 0) {
+        ALOGW("Corrupt %s source: %s", mIsWebm ? "webm" : "matroska",
+                uriDebugString(mDataSource->getUri()).c_str());
         delete mSegment;
         mSegment = NULL;
         return;
@@ -921,6 +930,12 @@
 
                     err = addVorbisCodecInfo(
                             meta, codecPrivate, codecPrivateSize);
+                } else if (!strcmp("A_OPUS", codecID)) {
+                    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_OPUS);
+                    meta->setData(kKeyOpusHeader, 0, codecPrivate, codecPrivateSize);
+                    meta->setInt64(kKeyOpusCodecDelay, track->GetCodecDelay());
+                    meta->setInt64(kKeyOpusSeekPreRoll, track->GetSeekPreRoll());
+                    mSeekPreRollNs = track->GetSeekPreRoll();
                 } else if (!strcmp("A_MPEG/L3", codecID)) {
                     meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
                 } else {
diff --git a/media/libstagefright/matroska/MatroskaExtractor.h b/media/libstagefright/matroska/MatroskaExtractor.h
index 1294b4f..cf200f3 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.h
+++ b/media/libstagefright/matroska/MatroskaExtractor.h
@@ -69,6 +69,7 @@
     bool mExtractedThumbnails;
     bool mIsLiveStreaming;
     bool mIsWebm;
+    int64_t mSeekPreRollNs;
 
     void addTracks();
     void findThumbnails();
diff --git a/media/libstagefright/mp4/FragmentedMP4Parser.cpp b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
deleted file mode 100644
index 0102656..0000000
--- a/media/libstagefright/mp4/FragmentedMP4Parser.cpp
+++ /dev/null
@@ -1,1993 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "FragmentedMP4Parser"
-#include <utils/Log.h>
-
-#include "include/avc_utils.h"
-#include "include/ESDS.h"
-#include "include/FragmentedMP4Parser.h"
-#include "TrackFragment.h"
-
-
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-
-
-namespace android {
-
-static const char *Fourcc2String(uint32_t fourcc) {
-    static char buffer[5];
-    buffer[4] = '\0';
-    buffer[0] = fourcc >> 24;
-    buffer[1] = (fourcc >> 16) & 0xff;
-    buffer[2] = (fourcc >> 8) & 0xff;
-    buffer[3] = fourcc & 0xff;
-
-    return buffer;
-}
-
-static const char *IndentString(size_t n) {
-    static const char kSpace[] = "                              ";
-    return kSpace + sizeof(kSpace) - 2 * n - 1;
-}
-
-// static
-const FragmentedMP4Parser::DispatchEntry FragmentedMP4Parser::kDispatchTable[] = {
-    { FOURCC('m', 'o', 'o', 'v'), 0, NULL },
-    { FOURCC('t', 'r', 'a', 'k'), FOURCC('m', 'o', 'o', 'v'), NULL },
-    { FOURCC('u', 'd', 't', 'a'), FOURCC('t', 'r', 'a', 'k'), NULL },
-    { FOURCC('u', 'd', 't', 'a'), FOURCC('m', 'o', 'o', 'v'), NULL },
-    { FOURCC('m', 'e', 't', 'a'), FOURCC('u', 'd', 't', 'a'), NULL },
-    { FOURCC('i', 'l', 's', 't'), FOURCC('m', 'e', 't', 'a'), NULL },
-
-    { FOURCC('t', 'k', 'h', 'd'), FOURCC('t', 'r', 'a', 'k'),
-        &FragmentedMP4Parser::parseTrackHeader
-    },
-
-    { FOURCC('m', 'v', 'e', 'x'), FOURCC('m', 'o', 'o', 'v'), NULL },
-
-    { FOURCC('t', 'r', 'e', 'x'), FOURCC('m', 'v', 'e', 'x'),
-        &FragmentedMP4Parser::parseTrackExtends
-    },
-
-    { FOURCC('e', 'd', 't', 's'), FOURCC('t', 'r', 'a', 'k'), NULL },
-    { FOURCC('m', 'd', 'i', 'a'), FOURCC('t', 'r', 'a', 'k'), NULL },
-
-    { FOURCC('m', 'd', 'h', 'd'), FOURCC('m', 'd', 'i', 'a'),
-        &FragmentedMP4Parser::parseMediaHeader
-    },
-
-    { FOURCC('h', 'd', 'l', 'r'), FOURCC('m', 'd', 'i', 'a'),
-        &FragmentedMP4Parser::parseMediaHandler
-    },
-
-    { FOURCC('m', 'i', 'n', 'f'), FOURCC('m', 'd', 'i', 'a'), NULL },
-    { FOURCC('d', 'i', 'n', 'f'), FOURCC('m', 'i', 'n', 'f'), NULL },
-    { FOURCC('s', 't', 'b', 'l'), FOURCC('m', 'i', 'n', 'f'), NULL },
-    { FOURCC('s', 't', 's', 'd'), FOURCC('s', 't', 'b', 'l'), NULL },
-
-    { FOURCC('s', 't', 's', 'z'), FOURCC('s', 't', 'b', 'l'),
-        &FragmentedMP4Parser::parseSampleSizes },
-
-    { FOURCC('s', 't', 'z', '2'), FOURCC('s', 't', 'b', 'l'),
-        &FragmentedMP4Parser::parseCompactSampleSizes },
-
-    { FOURCC('s', 't', 's', 'c'), FOURCC('s', 't', 'b', 'l'),
-        &FragmentedMP4Parser::parseSampleToChunk },
-
-    { FOURCC('s', 't', 'c', 'o'), FOURCC('s', 't', 'b', 'l'),
-        &FragmentedMP4Parser::parseChunkOffsets },
-
-    { FOURCC('c', 'o', '6', '4'), FOURCC('s', 't', 'b', 'l'),
-        &FragmentedMP4Parser::parseChunkOffsets64 },
-
-    { FOURCC('a', 'v', 'c', 'C'), FOURCC('a', 'v', 'c', '1'),
-        &FragmentedMP4Parser::parseAVCCodecSpecificData },
-
-    { FOURCC('e', 's', 'd', 's'), FOURCC('m', 'p', '4', 'a'),
-        &FragmentedMP4Parser::parseESDSCodecSpecificData },
-
-    { FOURCC('e', 's', 'd', 's'), FOURCC('m', 'p', '4', 'v'),
-        &FragmentedMP4Parser::parseESDSCodecSpecificData },
-
-    { FOURCC('m', 'd', 'a', 't'), 0, &FragmentedMP4Parser::parseMediaData },
-
-    { FOURCC('m', 'o', 'o', 'f'), 0, NULL },
-    { FOURCC('t', 'r', 'a', 'f'), FOURCC('m', 'o', 'o', 'f'), NULL },
-
-    { FOURCC('t', 'f', 'h', 'd'), FOURCC('t', 'r', 'a', 'f'),
-        &FragmentedMP4Parser::parseTrackFragmentHeader
-    },
-    { FOURCC('t', 'r', 'u', 'n'), FOURCC('t', 'r', 'a', 'f'),
-        &FragmentedMP4Parser::parseTrackFragmentRun
-    },
-
-    { FOURCC('m', 'f', 'r', 'a'), 0, NULL },
-
-    { FOURCC('s', 'i', 'd', 'x'), 0, &FragmentedMP4Parser::parseSegmentIndex },
-};
-
-struct FileSource : public FragmentedMP4Parser::Source {
-    FileSource(const char *filename)
-        : mFile(fopen(filename, "rb")) {
-            CHECK(mFile != NULL);
-        }
-
-    virtual ~FileSource() {
-        fclose(mFile);
-    }
-
-    virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
-        fseek(mFile, offset, SEEK_SET);
-        return fread(data, 1, size, mFile);
-    }
-
-    virtual bool isSeekable() {
-        return true;
-    }
-
-    private:
-    FILE *mFile;
-
-    DISALLOW_EVIL_CONSTRUCTORS(FileSource);
-};
-
-struct ReadTracker : public RefBase {
-    ReadTracker(off64_t size) {
-        allocSize = 1 + size / 8192; // 1 bit per kilobyte
-        bitmap = (char*) calloc(1, allocSize);
-    }
-    virtual ~ReadTracker() {
-        dumpToLog();
-        free(bitmap);
-    }
-    void mark(off64_t offset, size_t size) {
-        int firstbit = offset / 1024;
-        int lastbit = (offset + size - 1) / 1024;
-        for (int i = firstbit; i <= lastbit; i++) {
-            bitmap[i/8] |= (0x80 >> (i & 7));
-        }
-    }
-
- private:
-    void dumpToLog() {
-        // 96 chars per line, each char represents one kilobyte, 1 kb per bit
-        int numlines = allocSize / 12;
-        char buf[97];
-        char *cur = bitmap;
-        for (int i = 0; i < numlines; i++ && cur) {
-            for (int j = 0; j < 12; j++) {
-                for (int k = 0; k < 8; k++) {
-                    buf[(j * 8) + k] = (*cur & (0x80 >> k)) ? 'X' : '.';
-                }
-                cur++;
-            }
-            buf[96] = '\0';
-            ALOGI("%5dk: %s", i * 96, buf);
-        }
-    }
-
-    size_t allocSize;
-    char *bitmap;
-};
-
-struct DataSourceSource : public FragmentedMP4Parser::Source {
-    DataSourceSource(sp<DataSource> &source)
-        : mDataSource(source) {
-            CHECK(mDataSource != NULL);
-#if 0
-            off64_t size;
-            if (source->getSize(&size) == OK) {
-                mReadTracker = new ReadTracker(size);
-            } else {
-                ALOGE("couldn't get data source size");
-            }
-#endif
-        }
-
-    virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
-        if (mReadTracker != NULL) {
-            mReadTracker->mark(offset, size);
-        }
-        return mDataSource->readAt(offset, data, size);
-    }
-
-    virtual bool isSeekable() {
-        return true;
-    }
-
-    private:
-    sp<DataSource> mDataSource;
-    sp<ReadTracker> mReadTracker;
-
-    DISALLOW_EVIL_CONSTRUCTORS(DataSourceSource);
-};
-
-FragmentedMP4Parser::FragmentedMP4Parser()
-    : mBufferPos(0),
-      mSuspended(false),
-      mDoneWithMoov(false),
-      mFirstMoofOffset(0),
-      mFinalResult(OK) {
-}
-
-FragmentedMP4Parser::~FragmentedMP4Parser() {
-}
-
-void FragmentedMP4Parser::start(const char *filename) {
-    sp<AMessage> msg = new AMessage(kWhatStart, id());
-    msg->setObject("source", new FileSource(filename));
-    msg->post();
-    ALOGV("Parser::start(%s)", filename);
-}
-
-void FragmentedMP4Parser::start(const sp<Source> &source) {
-    sp<AMessage> msg = new AMessage(kWhatStart, id());
-    msg->setObject("source", source);
-    msg->post();
-    ALOGV("Parser::start(Source)");
-}
-
-void FragmentedMP4Parser::start(sp<DataSource> &source) {
-    sp<AMessage> msg = new AMessage(kWhatStart, id());
-    msg->setObject("source", new DataSourceSource(source));
-    msg->post();
-    ALOGV("Parser::start(DataSource)");
-}
-
-sp<AMessage> FragmentedMP4Parser::getFormat(bool audio, bool synchronous) {
-
-    while (true) {
-        bool moovDone = mDoneWithMoov;
-        sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
-        msg->setInt32("audio", audio);
-
-        sp<AMessage> response;
-        status_t err = msg->postAndAwaitResponse(&response);
-
-        if (err != OK) {
-            ALOGV("getFormat post failed: %d", err);
-            return NULL;
-        }
-
-        if (response->findInt32("err", &err) && err != OK) {
-            if (synchronous && err == -EWOULDBLOCK && !moovDone) {
-                resumeIfNecessary();
-                ALOGV("@getFormat parser not ready yet, retrying");
-                usleep(10000);
-                continue;
-            }
-            ALOGV("getFormat failed: %d", err);
-            return NULL;
-        }
-
-        sp<AMessage> format;
-        CHECK(response->findMessage("format", &format));
-
-        ALOGV("returning format %s", format->debugString().c_str());
-        return format;
-    }
-}
-
-status_t FragmentedMP4Parser::seekTo(bool wantAudio, int64_t timeUs) {
-    sp<AMessage> msg = new AMessage(kWhatSeekTo, id());
-    msg->setInt32("audio", wantAudio);
-    msg->setInt64("position", timeUs);
-
-    sp<AMessage> response;
-    status_t err = msg->postAndAwaitResponse(&response);
-    return err;
-}
-
-bool FragmentedMP4Parser::isSeekable() const {
-    while (mFirstMoofOffset == 0 && mFinalResult == OK) {
-        usleep(10000);
-    }
-    bool seekable = mSource->isSeekable();
-    for (size_t i = 0; seekable && i < mTracks.size(); i++) {
-        const TrackInfo *info = &mTracks.valueAt(i);
-        seekable &= !info->mSidx.empty();
-    }
-    return seekable;
-}
-
-status_t FragmentedMP4Parser::onSeekTo(bool wantAudio, int64_t position) {
-    status_t err = -EINVAL;
-    ssize_t trackIndex = findTrack(wantAudio);
-    if (trackIndex < 0) {
-        err = trackIndex;
-    } else {
-        TrackInfo *info = &mTracks.editValueAt(trackIndex);
-
-        int numSidxEntries = info->mSidx.size();
-        int64_t totalTime = 0;
-        off_t totalOffset = mFirstMoofOffset;
-        for (int i = 0; i < numSidxEntries; i++) {
-            const SidxEntry *se = &info->mSidx[i];
-            if (totalTime + se->mDurationUs > position) {
-                mBuffer->setRange(0,0);
-                mBufferPos = totalOffset;
-                if (mFinalResult == ERROR_END_OF_STREAM) {
-                    mFinalResult = OK;
-                    mSuspended = true; // force resume
-                    resumeIfNecessary();
-                }
-                info->mFragments.clear();
-                info->mDecodingTime = totalTime * info->mMediaTimeScale / 1000000ll;
-                return OK;
-            }
-            totalTime += se->mDurationUs;
-            totalOffset += se->mSize;
-        }
-    }
-    ALOGV("seekTo out of range");
-    return err;
-}
-
-status_t FragmentedMP4Parser::dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit,
-                                                bool synchronous) {
-
-    while (true) {
-        sp<AMessage> msg = new AMessage(kWhatDequeueAccessUnit, id());
-        msg->setInt32("audio", audio);
-
-        sp<AMessage> response;
-        status_t err = msg->postAndAwaitResponse(&response);
-
-        if (err != OK) {
-            ALOGV("dequeue fail 1: %d", err);
-            return err;
-        }
-
-        if (response->findInt32("err", &err) && err != OK) {
-            if (synchronous && err == -EWOULDBLOCK) {
-                resumeIfNecessary();
-                ALOGV("Parser not ready yet, retrying");
-                usleep(10000);
-                continue;
-            }
-            ALOGV("dequeue fail 2: %d, %d", err, synchronous);
-            return err;
-        }
-
-        CHECK(response->findBuffer("accessUnit", accessUnit));
-
-        return OK;
-    }
-}
-
-ssize_t FragmentedMP4Parser::findTrack(bool wantAudio) const {
-    for (size_t i = 0; i < mTracks.size(); ++i) {
-        const TrackInfo *info = &mTracks.valueAt(i);
-
-        bool isAudio =
-            info->mMediaHandlerType == FOURCC('s', 'o', 'u', 'n');
-
-        bool isVideo =
-            info->mMediaHandlerType == FOURCC('v', 'i', 'd', 'e');
-
-        if ((wantAudio && isAudio) || (!wantAudio && !isAudio)) {
-            if (info->mSampleDescs.empty()) {
-                break;
-            }
-
-            return i;
-        }
-    }
-
-    return -EWOULDBLOCK;
-}
-
-void FragmentedMP4Parser::onMessageReceived(const sp<AMessage> &msg) {
-    switch (msg->what()) {
-        case kWhatStart:
-        {
-            sp<RefBase> obj;
-            CHECK(msg->findObject("source", &obj));
-
-            mSource = static_cast<Source *>(obj.get());
-
-            mBuffer = new ABuffer(512 * 1024);
-            mBuffer->setRange(0, 0);
-
-            enter(0ll, 0, 0);
-
-            (new AMessage(kWhatProceed, id()))->post();
-            break;
-        }
-
-        case kWhatProceed:
-        {
-            CHECK(!mSuspended);
-
-            status_t err = onProceed();
-
-            if (err == OK) {
-                if (!mSuspended) {
-                    msg->post();
-                }
-            } else if (err != -EAGAIN) {
-                ALOGE("onProceed returned error %d", err);
-            }
-
-            break;
-        }
-
-        case kWhatReadMore:
-        {
-            size_t needed;
-            CHECK(msg->findSize("needed", &needed));
-
-            memmove(mBuffer->base(), mBuffer->data(), mBuffer->size());
-            mBufferPos += mBuffer->offset();
-            mBuffer->setRange(0, mBuffer->size());
-
-            size_t maxBytesToRead = mBuffer->capacity() - mBuffer->size();
-
-            if (maxBytesToRead < needed) {
-                ALOGV("resizing buffer.");
-
-                sp<ABuffer> newBuffer =
-                    new ABuffer((mBuffer->size() + needed + 1023) & ~1023);
-                memcpy(newBuffer->data(), mBuffer->data(), mBuffer->size());
-                newBuffer->setRange(0, mBuffer->size());
-
-                mBuffer = newBuffer;
-                maxBytesToRead = mBuffer->capacity() - mBuffer->size();
-            }
-
-            CHECK_GE(maxBytesToRead, needed);
-
-            ssize_t n = mSource->readAt(
-                    mBufferPos + mBuffer->size(),
-                    mBuffer->data() + mBuffer->size(), needed);
-
-            if (n < (ssize_t)needed) {
-                ALOGV("Reached EOF when reading %d @ %d + %d", needed, mBufferPos, mBuffer->size());
-                if (n < 0) {
-                    mFinalResult = n;
-                } else if (n == 0) {
-                    mFinalResult = ERROR_END_OF_STREAM;
-                } else {
-                    mFinalResult = ERROR_IO;
-                }
-            } else {
-                mBuffer->setRange(0, mBuffer->size() + n);
-                (new AMessage(kWhatProceed, id()))->post();
-            }
-
-            break;
-        }
-
-        case kWhatGetFormat:
-        {
-            int32_t wantAudio;
-            CHECK(msg->findInt32("audio", &wantAudio));
-
-            status_t err = -EWOULDBLOCK;
-            sp<AMessage> response = new AMessage;
-
-            ssize_t trackIndex = findTrack(wantAudio);
-
-            if (trackIndex < 0) {
-                err = trackIndex;
-            } else {
-                TrackInfo *info = &mTracks.editValueAt(trackIndex);
-
-                sp<AMessage> format = info->mSampleDescs.itemAt(0).mFormat;
-                if (info->mSidxDuration) {
-                    format->setInt64("durationUs", info->mSidxDuration);
-                } else {
-                    // this is probably going to be zero. Oh well...
-                    format->setInt64("durationUs",
-                                     1000000ll * info->mDuration / info->mMediaTimeScale);
-                }
-                response->setMessage(
-                        "format", format);
-
-                err = OK;
-            }
-
-            response->setInt32("err", err);
-
-            uint32_t replyID;
-            CHECK(msg->senderAwaitsResponse(&replyID));
-
-            response->postReply(replyID);
-            break;
-        }
-
-        case kWhatDequeueAccessUnit:
-        {
-            int32_t wantAudio;
-            CHECK(msg->findInt32("audio", &wantAudio));
-
-            status_t err = -EWOULDBLOCK;
-            sp<AMessage> response = new AMessage;
-
-            ssize_t trackIndex = findTrack(wantAudio);
-
-            if (trackIndex < 0) {
-                err = trackIndex;
-            } else {
-                sp<ABuffer> accessUnit;
-                err = onDequeueAccessUnit(trackIndex, &accessUnit);
-
-                if (err == OK) {
-                    response->setBuffer("accessUnit", accessUnit);
-                }
-            }
-
-            response->setInt32("err", err);
-
-            uint32_t replyID;
-            CHECK(msg->senderAwaitsResponse(&replyID));
-
-            response->postReply(replyID);
-            break;
-        }
-
-        case kWhatSeekTo:
-        {
-            ALOGV("kWhatSeekTo");
-            int32_t wantAudio;
-            CHECK(msg->findInt32("audio", &wantAudio));
-            int64_t position;
-            CHECK(msg->findInt64("position", &position));
-
-            status_t err = -EWOULDBLOCK;
-            sp<AMessage> response = new AMessage;
-
-            ssize_t trackIndex = findTrack(wantAudio);
-
-            if (trackIndex < 0) {
-                err = trackIndex;
-            } else {
-                err = onSeekTo(wantAudio, position);
-            }
-            response->setInt32("err", err);
-            uint32_t replyID;
-            CHECK(msg->senderAwaitsResponse(&replyID));
-            response->postReply(replyID);
-            break;
-        }
-        default:
-            TRESPASS();
-    }
-}
-
-status_t FragmentedMP4Parser::onProceed() {
-    status_t err;
-
-    if ((err = need(8)) != OK) {
-        return err;
-    }
-
-    uint64_t size = readU32(0);
-    uint32_t type = readU32(4);
-
-    size_t offset = 8;
-
-    if (size == 1) {
-        if ((err = need(16)) != OK) {
-            return err;
-        }
-
-        size = readU64(offset);
-        offset += 8;
-    }
-
-    uint8_t userType[16];
-
-    if (type == FOURCC('u', 'u', 'i', 'd')) {
-        if ((err = need(offset + 16)) != OK) {
-            return err;
-        }
-
-        memcpy(userType, mBuffer->data() + offset, 16);
-        offset += 16;
-    }
-
-    CHECK(!mStack.isEmpty());
-    uint32_t ptype = mStack.itemAt(mStack.size() - 1).mType;
-
-    static const size_t kNumDispatchers =
-        sizeof(kDispatchTable) / sizeof(kDispatchTable[0]);
-
-    size_t i;
-    for (i = 0; i < kNumDispatchers; ++i) {
-        if (kDispatchTable[i].mType == type
-                && kDispatchTable[i].mParentType == ptype) {
-            break;
-        }
-    }
-
-    // SampleEntry boxes are container boxes that start with a variable
-    // amount of data depending on the media handler type.
-    // We don't look inside 'hint' type SampleEntry boxes.
-
-    bool isSampleEntryBox =
-        (ptype == FOURCC('s', 't', 's', 'd'))
-        && editTrack(mCurrentTrackID)->mMediaHandlerType
-        != FOURCC('h', 'i', 'n', 't');
-
-    if ((i < kNumDispatchers && kDispatchTable[i].mHandler == 0)
-            || isSampleEntryBox || ptype == FOURCC('i', 'l', 's', 't')) {
-        // This is a container box.
-        if (type == FOURCC('m', 'o', 'o', 'f')) {
-            if (mFirstMoofOffset == 0) {
-                ALOGV("first moof @ %08x", mBufferPos + offset);
-                mFirstMoofOffset = mBufferPos + offset - 8; // point at the size
-            }
-        }
-        if (type == FOURCC('m', 'e', 't', 'a')) {
-            if ((err = need(offset + 4)) < OK) {
-                return err;
-            }
-
-            if (readU32(offset) != 0) {
-                return -EINVAL;
-            }
-
-            offset += 4;
-        } else if (type == FOURCC('s', 't', 's', 'd')) {
-            if ((err = need(offset + 8)) < OK) {
-                return err;
-            }
-
-            if (readU32(offset) != 0) {
-                return -EINVAL;
-            }
-
-            if (readU32(offset + 4) == 0) {
-                // We need at least some entries.
-                return -EINVAL;
-            }
-
-            offset += 8;
-        } else if (isSampleEntryBox) {
-            size_t headerSize;
-
-            switch (editTrack(mCurrentTrackID)->mMediaHandlerType) {
-                case FOURCC('v', 'i', 'd', 'e'):
-                {
-                    // 8 bytes SampleEntry + 70 bytes VisualSampleEntry
-                    headerSize = 78;
-                    break;
-                }
-
-                case FOURCC('s', 'o', 'u', 'n'):
-                {
-                    // 8 bytes SampleEntry + 20 bytes AudioSampleEntry
-                    headerSize = 28;
-                    break;
-                }
-
-                case FOURCC('m', 'e', 't', 'a'):
-                {
-                    headerSize = 8;  // 8 bytes SampleEntry
-                    break;
-                }
-
-                default:
-                    TRESPASS();
-            }
-
-            if (offset + headerSize > size) {
-                return -EINVAL;
-            }
-
-            if ((err = need(offset + headerSize)) != OK) {
-                return err;
-            }
-
-            switch (editTrack(mCurrentTrackID)->mMediaHandlerType) {
-                case FOURCC('v', 'i', 'd', 'e'):
-                {
-                    err = parseVisualSampleEntry(
-                            type, offset, offset + headerSize);
-                    break;
-                }
-
-                case FOURCC('s', 'o', 'u', 'n'):
-                {
-                    err = parseAudioSampleEntry(
-                            type, offset, offset + headerSize);
-                    break;
-                }
-
-                case FOURCC('m', 'e', 't', 'a'):
-                {
-                    err = OK;
-                    break;
-                }
-
-                default:
-                    TRESPASS();
-            }
-
-            if (err != OK) {
-                return err;
-            }
-
-            offset += headerSize;
-        }
-
-        skip(offset);
-
-        ALOGV("%sentering box of type '%s'",
-                IndentString(mStack.size()), Fourcc2String(type));
-
-        enter(mBufferPos - offset, type, size - offset);
-    } else {
-        if (!fitsContainer(size)) {
-            return -EINVAL;
-        }
-
-        if (i < kNumDispatchers && kDispatchTable[i].mHandler != 0) {
-            // We have a handler for this box type.
-
-            if ((err = need(size)) != OK) {
-                return err;
-            }
-
-            ALOGV("%sparsing box of type '%s'",
-                    IndentString(mStack.size()), Fourcc2String(type));
-
-            if ((err = (this->*kDispatchTable[i].mHandler)(
-                            type, offset, size)) != OK) {
-                return err;
-            }
-        } else {
-            // Unknown box type
-
-            ALOGV("%sskipping box of type '%s', size %llu",
-                    IndentString(mStack.size()),
-                    Fourcc2String(type), size);
-
-        }
-
-        skip(size);
-    }
-
-    return OK;
-}
-
-// static
-int FragmentedMP4Parser::CompareSampleLocation(
-        const SampleInfo &sample, const MediaDataInfo &mdatInfo) {
-    if (sample.mOffset + sample.mSize < mdatInfo.mOffset) {
-        return -1;
-    }
-
-    if (sample.mOffset >= mdatInfo.mOffset + mdatInfo.mBuffer->size()) {
-        return 1;
-    }
-
-    // Otherwise make sure the sample is completely contained within this
-    // media data block.
-
-    CHECK_GE(sample.mOffset, mdatInfo.mOffset);
-
-    CHECK_LE(sample.mOffset + sample.mSize,
-             mdatInfo.mOffset + mdatInfo.mBuffer->size());
-
-    return 0;
-}
-
-void FragmentedMP4Parser::resumeIfNecessary() {
-    if (!mSuspended) {
-        return;
-    }
-
-    ALOGV("resuming.");
-
-    mSuspended = false;
-    (new AMessage(kWhatProceed, id()))->post();
-}
-
-status_t FragmentedMP4Parser::getSample(
-        TrackInfo *info, sp<TrackFragment> *fragment, SampleInfo *sampleInfo) {
-    for (;;) {
-        if (info->mFragments.empty()) {
-            if (mFinalResult != OK) {
-                return mFinalResult;
-            }
-
-            resumeIfNecessary();
-            return -EWOULDBLOCK;
-        }
-
-        *fragment = *info->mFragments.begin();
-
-        status_t err = (*fragment)->getSample(sampleInfo);
-
-        if (err == OK) {
-            return OK;
-        } else if (err != ERROR_END_OF_STREAM) {
-            return err;
-        }
-
-        // Really, end of this fragment...
-
-        info->mFragments.erase(info->mFragments.begin());
-    }
-}
-
-status_t FragmentedMP4Parser::onDequeueAccessUnit(
-        size_t trackIndex, sp<ABuffer> *accessUnit) {
-    TrackInfo *info = &mTracks.editValueAt(trackIndex);
-
-    sp<TrackFragment> fragment;
-    SampleInfo sampleInfo;
-    status_t err = getSample(info, &fragment, &sampleInfo);
-
-    if (err == -EWOULDBLOCK) {
-        resumeIfNecessary();
-        return err;
-    } else if (err != OK) {
-        return err;
-    }
-
-    err = -EWOULDBLOCK;
-
-    bool checkDroppable = false;
-
-    for (size_t i = 0; i < mMediaData.size(); ++i) {
-        const MediaDataInfo &mdatInfo = mMediaData.itemAt(i);
-
-        int cmp = CompareSampleLocation(sampleInfo, mdatInfo);
-
-        if (cmp < 0 && !mSource->isSeekable()) {
-            return -EPIPE;
-        } else if (cmp == 0) {
-            if (i > 0) {
-                checkDroppable = true;
-            }
-
-            err = makeAccessUnit(info, sampleInfo, mdatInfo, accessUnit);
-            break;
-        }
-    }
-
-    if (err != OK) {
-        return err;
-    }
-
-    fragment->advance();
-
-    if (!mMediaData.empty() && checkDroppable) {
-        size_t numDroppable = 0;
-        bool done = false;
-
-        // XXX FIXME: if one of the tracks is not advanced (e.g. if you play an audio+video
-        // file with sf2), then mMediaData will not be pruned and keeps growing
-        for (size_t i = 0; !done && i < mMediaData.size(); ++i) {
-            const MediaDataInfo &mdatInfo = mMediaData.itemAt(i);
-
-            for (size_t j = 0; j < mTracks.size(); ++j) {
-                TrackInfo *info = &mTracks.editValueAt(j);
-
-                sp<TrackFragment> fragment;
-                SampleInfo sampleInfo;
-                err = getSample(info, &fragment, &sampleInfo);
-
-                if (err != OK) {
-                    done = true;
-                    break;
-                }
-
-                int cmp = CompareSampleLocation(sampleInfo, mdatInfo);
-
-                if (cmp <= 0) {
-                    done = true;
-                    break;
-                }
-            }
-
-            if (!done) {
-                ++numDroppable;
-            }
-        }
-
-        if (numDroppable > 0) {
-            mMediaData.removeItemsAt(0, numDroppable);
-
-            if (mMediaData.size() < 5) {
-                resumeIfNecessary();
-            }
-        }
-    }
-
-    return err;
-}
-
-static size_t parseNALSize(size_t nalLengthSize, const uint8_t *data) {
-    switch (nalLengthSize) {
-        case 1:
-            return *data;
-        case 2:
-            return U16_AT(data);
-        case 3:
-            return ((size_t)data[0] << 16) | U16_AT(&data[1]);
-        case 4:
-            return U32_AT(data);
-    }
-
-    // This cannot happen, mNALLengthSize springs to life by adding 1 to
-    // a 2-bit integer.
-    TRESPASS();
-
-    return 0;
-}
-
-status_t FragmentedMP4Parser::makeAccessUnit(
-        TrackInfo *info,
-        const SampleInfo &sample,
-        const MediaDataInfo &mdatInfo,
-        sp<ABuffer> *accessUnit) {
-    if (sample.mSampleDescIndex < 1
-            || sample.mSampleDescIndex > info->mSampleDescs.size()) {
-        return ERROR_MALFORMED;
-    }
-
-    int64_t presentationTimeUs =
-        1000000ll * sample.mPresentationTime / info->mMediaTimeScale;
-
-    const SampleDescription &sampleDesc =
-        info->mSampleDescs.itemAt(sample.mSampleDescIndex - 1);
-
-    size_t nalLengthSize;
-    if (!sampleDesc.mFormat->findSize("nal-length-size", &nalLengthSize)) {
-        *accessUnit = new ABuffer(sample.mSize);
-
-        memcpy((*accessUnit)->data(),
-               mdatInfo.mBuffer->data() + (sample.mOffset - mdatInfo.mOffset),
-               sample.mSize);
-
-        (*accessUnit)->meta()->setInt64("timeUs", presentationTimeUs);
-        if (IsIDR(*accessUnit)) {
-            (*accessUnit)->meta()->setInt32("is-sync-frame", 1);
-        }
-
-        return OK;
-    }
-
-    const uint8_t *srcPtr =
-        mdatInfo.mBuffer->data() + (sample.mOffset - mdatInfo.mOffset);
-
-    for (int i = 0; i < 2 ; ++i) {
-        size_t srcOffset = 0;
-        size_t dstOffset = 0;
-
-        while (srcOffset < sample.mSize) {
-            if (srcOffset + nalLengthSize > sample.mSize) {
-                return ERROR_MALFORMED;
-            }
-
-            size_t nalSize = parseNALSize(nalLengthSize, &srcPtr[srcOffset]);
-            srcOffset += nalLengthSize;
-
-            if (srcOffset + nalSize > sample.mSize) {
-                return ERROR_MALFORMED;
-            }
-
-            if (i == 1) {
-                memcpy((*accessUnit)->data() + dstOffset,
-                       "\x00\x00\x00\x01",
-                       4);
-
-                memcpy((*accessUnit)->data() + dstOffset + 4,
-                       srcPtr + srcOffset,
-                       nalSize);
-            }
-
-            srcOffset += nalSize;
-            dstOffset += nalSize + 4;
-        }
-
-        if (i == 0) {
-            (*accessUnit) = new ABuffer(dstOffset);
-            (*accessUnit)->meta()->setInt64(
-                    "timeUs", presentationTimeUs);
-        }
-    }
-    if (IsIDR(*accessUnit)) {
-        (*accessUnit)->meta()->setInt32("is-sync-frame", 1);
-    }
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::need(size_t size) {
-    if (!fitsContainer(size)) {
-        return -EINVAL;
-    }
-
-    if (size <= mBuffer->size()) {
-        return OK;
-    }
-
-    sp<AMessage> msg = new AMessage(kWhatReadMore, id());
-    msg->setSize("needed", size - mBuffer->size());
-    msg->post();
-
-    // ALOGV("need(%d) returning -EAGAIN, only have %d", size, mBuffer->size());
-
-    return -EAGAIN;
-}
-
-void FragmentedMP4Parser::enter(off64_t offset, uint32_t type, uint64_t size) {
-    Container container;
-    container.mOffset = offset;
-    container.mType = type;
-    container.mExtendsToEOF = (size == 0);
-    container.mBytesRemaining = size;
-
-    mStack.push(container);
-}
-
-bool FragmentedMP4Parser::fitsContainer(uint64_t size) const {
-    CHECK(!mStack.isEmpty());
-    const Container &container = mStack.itemAt(mStack.size() - 1);
-
-    return container.mExtendsToEOF || size <= container.mBytesRemaining;
-}
-
-uint16_t FragmentedMP4Parser::readU16(size_t offset) {
-    CHECK_LE(offset + 2, mBuffer->size());
-
-    const uint8_t *ptr = mBuffer->data() + offset;
-    return (ptr[0] << 8) | ptr[1];
-}
-
-uint32_t FragmentedMP4Parser::readU32(size_t offset) {
-    CHECK_LE(offset + 4, mBuffer->size());
-
-    const uint8_t *ptr = mBuffer->data() + offset;
-    return (ptr[0] << 24) | (ptr[1] << 16) | (ptr[2] << 8) | ptr[3];
-}
-
-uint64_t FragmentedMP4Parser::readU64(size_t offset) {
-    return (((uint64_t)readU32(offset)) << 32) | readU32(offset + 4);
-}
-
-void FragmentedMP4Parser::skip(off_t distance) {
-    CHECK(!mStack.isEmpty());
-    for (size_t i = mStack.size(); i-- > 0;) {
-        Container *container = &mStack.editItemAt(i);
-        if (!container->mExtendsToEOF) {
-            CHECK_LE(distance, (off_t)container->mBytesRemaining);
-
-            container->mBytesRemaining -= distance;
-
-            if (container->mBytesRemaining == 0) {
-                ALOGV("%sleaving box of type '%s'",
-                        IndentString(mStack.size() - 1),
-                        Fourcc2String(container->mType));
-
-#if 0
-                if (container->mType == FOURCC('s', 't', 's', 'd')) {
-                    TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-                    for (size_t i = 0;
-                            i < trackInfo->mSampleDescs.size(); ++i) {
-                        ALOGI("format #%d: %s",
-                              i,
-                              trackInfo->mSampleDescs.itemAt(i)
-                                .mFormat->debugString().c_str());
-                    }
-                }
-#endif
-
-                if (container->mType == FOURCC('s', 't', 'b', 'l')) {
-                    TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-
-                    trackInfo->mStaticFragment->signalCompletion();
-
-                    CHECK(trackInfo->mFragments.empty());
-                    trackInfo->mFragments.push_back(trackInfo->mStaticFragment);
-                    trackInfo->mStaticFragment.clear();
-                } else if (container->mType == FOURCC('t', 'r', 'a', 'f')) {
-                    TrackInfo *trackInfo =
-                        editTrack(mTrackFragmentHeaderInfo.mTrackID);
-
-                    const sp<TrackFragment> &fragment =
-                        *--trackInfo->mFragments.end();
-
-                    static_cast<DynamicTrackFragment *>(
-                            fragment.get())->signalCompletion();
-                } else if (container->mType == FOURCC('m', 'o', 'o', 'v')) {
-                    mDoneWithMoov = true;
-                }
-
-                container = NULL;
-                mStack.removeItemsAt(i);
-            }
-        }
-    }
-
-    if (distance < (off_t)mBuffer->size()) {
-        mBuffer->setRange(mBuffer->offset() + distance, mBuffer->size() - distance);
-        mBufferPos += distance;
-        return;
-    }
-
-    mBuffer->setRange(0, 0);
-    mBufferPos += distance;
-}
-
-status_t FragmentedMP4Parser::parseTrackHeader(
-        uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 4 > size) {
-        return -EINVAL;
-    }
-
-    uint32_t flags = readU32(offset);
-
-    uint32_t version = flags >> 24;
-    flags &= 0xffffff;
-
-    uint32_t trackID;
-    uint64_t duration;
-
-    if (version == 1) {
-        if (offset + 36 > size) {
-            return -EINVAL;
-        }
-
-        trackID = readU32(offset + 20);
-        duration = readU64(offset + 28);
-
-        offset += 36;
-    } else if (version == 0) {
-        if (offset + 24 > size) {
-            return -EINVAL;
-        }
-
-        trackID = readU32(offset + 12);
-        duration = readU32(offset + 20);
-
-        offset += 24;
-    } else {
-        return -EINVAL;
-    }
-
-    TrackInfo *info = editTrack(trackID, true /* createIfNecessary */);
-    info->mFlags = flags;
-    info->mDuration = duration;
-    if (info->mDuration == 0xffffffff) {
-        // ffmpeg sets this to -1, which is incorrect.
-        info->mDuration = 0;
-    }
-
-    info->mStaticFragment = new StaticTrackFragment;
-
-    mCurrentTrackID = trackID;
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::parseMediaHeader(
-        uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 4 > size) {
-        return -EINVAL;
-    }
-
-    uint32_t versionAndFlags = readU32(offset);
-
-    if (versionAndFlags & 0xffffff) {
-        return ERROR_MALFORMED;
-    }
-
-    uint32_t version = versionAndFlags >> 24;
-
-    TrackInfo *info = editTrack(mCurrentTrackID);
-
-    if (version == 1) {
-        if (offset + 4 + 32 > size) {
-            return -EINVAL;
-        }
-        info->mMediaTimeScale = U32_AT(mBuffer->data() + offset + 20);
-    } else if (version == 0) {
-        if (offset + 4 + 20 > size) {
-            return -EINVAL;
-        }
-        info->mMediaTimeScale = U32_AT(mBuffer->data() + offset + 12);
-    } else {
-        return ERROR_MALFORMED;
-    }
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::parseMediaHandler(
-        uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 12 > size) {
-        return -EINVAL;
-    }
-
-    if (readU32(offset) != 0) {
-        return -EINVAL;
-    }
-
-    uint32_t handlerType = readU32(offset + 8);
-
-    switch (handlerType) {
-        case FOURCC('v', 'i', 'd', 'e'):
-        case FOURCC('s', 'o', 'u', 'n'):
-        case FOURCC('h', 'i', 'n', 't'):
-        case FOURCC('m', 'e', 't', 'a'):
-            break;
-
-        default:
-            return -EINVAL;
-    }
-
-    editTrack(mCurrentTrackID)->mMediaHandlerType = handlerType;
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::parseVisualSampleEntry(
-        uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 78 > size) {
-        return -EINVAL;
-    }
-
-    TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-
-    trackInfo->mSampleDescs.push();
-    SampleDescription *sampleDesc =
-        &trackInfo->mSampleDescs.editItemAt(
-                trackInfo->mSampleDescs.size() - 1);
-
-    sampleDesc->mType = type;
-    sampleDesc->mDataRefIndex = readU16(offset + 6);
-
-    sp<AMessage> format = new AMessage;
-
-    switch (type) {
-        case FOURCC('a', 'v', 'c', '1'):
-            format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
-            break;
-        case FOURCC('m', 'p', '4', 'v'):
-            format->setString("mime", MEDIA_MIMETYPE_VIDEO_MPEG4);
-            break;
-        case FOURCC('s', '2', '6', '3'):
-        case FOURCC('h', '2', '6', '3'):
-        case FOURCC('H', '2', '6', '3'):
-            format->setString("mime", MEDIA_MIMETYPE_VIDEO_H263);
-            break;
-        default:
-            format->setString("mime", "application/octet-stream");
-            break;
-    }
-
-    format->setInt32("width", readU16(offset + 8 + 16));
-    format->setInt32("height", readU16(offset + 8 + 18));
-
-    sampleDesc->mFormat = format;
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::parseAudioSampleEntry(
-        uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 28 > size) {
-        return -EINVAL;
-    }
-
-    TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-
-    trackInfo->mSampleDescs.push();
-    SampleDescription *sampleDesc =
-        &trackInfo->mSampleDescs.editItemAt(
-                trackInfo->mSampleDescs.size() - 1);
-
-    sampleDesc->mType = type;
-    sampleDesc->mDataRefIndex = readU16(offset + 6);
-
-    sp<AMessage> format = new AMessage;
-
-    format->setInt32("channel-count", readU16(offset + 8 + 8));
-    format->setInt32("sample-size", readU16(offset + 8 + 10));
-    format->setInt32("sample-rate", readU32(offset + 8 + 16) / 65536.0f);
-
-    switch (type) {
-        case FOURCC('m', 'p', '4', 'a'):
-            format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
-            break;
-
-        case FOURCC('s', 'a', 'm', 'r'):
-            format->setString("mime", MEDIA_MIMETYPE_AUDIO_AMR_NB);
-            format->setInt32("channel-count", 1);
-            format->setInt32("sample-rate", 8000);
-            break;
-
-        case FOURCC('s', 'a', 'w', 'b'):
-            format->setString("mime", MEDIA_MIMETYPE_AUDIO_AMR_WB);
-            format->setInt32("channel-count", 1);
-            format->setInt32("sample-rate", 16000);
-            break;
-        default:
-            format->setString("mime", "application/octet-stream");
-            break;
-    }
-
-    sampleDesc->mFormat = format;
-
-    return OK;
-}
-
-static void addCodecSpecificData(
-        const sp<AMessage> &format, int32_t index,
-        const void *data, size_t size,
-        bool insertStartCode = false) {
-    sp<ABuffer> csd = new ABuffer(insertStartCode ? size + 4 : size);
-
-    memcpy(csd->data() + (insertStartCode ? 4 : 0), data, size);
-
-    if (insertStartCode) {
-        memcpy(csd->data(), "\x00\x00\x00\x01", 4);
-    }
-
-    csd->meta()->setInt32("csd", true);
-    csd->meta()->setInt64("timeUs", 0ll);
-
-    format->setBuffer(StringPrintf("csd-%d", index).c_str(), csd);
-}
-
-status_t FragmentedMP4Parser::parseSampleSizes(
-        uint32_t type, size_t offset, uint64_t size) {
-    return editTrack(mCurrentTrackID)->mStaticFragment->parseSampleSizes(
-            this, type, offset, size);
-}
-
-status_t FragmentedMP4Parser::parseCompactSampleSizes(
-        uint32_t type, size_t offset, uint64_t size) {
-    return editTrack(mCurrentTrackID)->mStaticFragment->parseCompactSampleSizes(
-            this, type, offset, size);
-}
-
-status_t FragmentedMP4Parser::parseSampleToChunk(
-        uint32_t type, size_t offset, uint64_t size) {
-    return editTrack(mCurrentTrackID)->mStaticFragment->parseSampleToChunk(
-            this, type, offset, size);
-}
-
-status_t FragmentedMP4Parser::parseChunkOffsets(
-        uint32_t type, size_t offset, uint64_t size) {
-    return editTrack(mCurrentTrackID)->mStaticFragment->parseChunkOffsets(
-            this, type, offset, size);
-}
-
-status_t FragmentedMP4Parser::parseChunkOffsets64(
-        uint32_t type, size_t offset, uint64_t size) {
-    return editTrack(mCurrentTrackID)->mStaticFragment->parseChunkOffsets64(
-            this, type, offset, size);
-}
-
-status_t FragmentedMP4Parser::parseAVCCodecSpecificData(
-        uint32_t type, size_t offset, uint64_t size) {
-    TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-
-    SampleDescription *sampleDesc =
-        &trackInfo->mSampleDescs.editItemAt(
-                trackInfo->mSampleDescs.size() - 1);
-
-    if (sampleDesc->mType != FOURCC('a', 'v', 'c', '1')) {
-        return -EINVAL;
-    }
-
-    const uint8_t *ptr = mBuffer->data() + offset;
-
-    size -= offset;
-    offset = 0;
-
-    if (size < 7 || ptr[0] != 0x01) {
-        return ERROR_MALFORMED;
-    }
-
-    sampleDesc->mFormat->setSize("nal-length-size", 1 + (ptr[4] & 3));
-
-    size_t numSPS = ptr[5] & 31;
-
-    ptr += 6;
-    size -= 6;
-
-    for (size_t i = 0; i < numSPS; ++i) {
-        if (size < 2) {
-            return ERROR_MALFORMED;
-        }
-
-        size_t length = U16_AT(ptr);
-
-        ptr += 2;
-        size -= 2;
-
-        if (size < length) {
-            return ERROR_MALFORMED;
-        }
-
-        addCodecSpecificData(
-                sampleDesc->mFormat, i, ptr, length,
-                true /* insertStartCode */);
-
-        ptr += length;
-        size -= length;
-    }
-
-    if (size < 1) {
-        return ERROR_MALFORMED;
-    }
-
-    size_t numPPS = *ptr;
-    ++ptr;
-    --size;
-
-    for (size_t i = 0; i < numPPS; ++i) {
-        if (size < 2) {
-            return ERROR_MALFORMED;
-        }
-
-        size_t length = U16_AT(ptr);
-
-        ptr += 2;
-        size -= 2;
-
-        if (size < length) {
-            return ERROR_MALFORMED;
-        }
-
-        addCodecSpecificData(
-                sampleDesc->mFormat, numSPS + i, ptr, length,
-                true /* insertStartCode */);
-
-        ptr += length;
-        size -= length;
-    }
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::parseESDSCodecSpecificData(
-        uint32_t type, size_t offset, uint64_t size) {
-    TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-
-    SampleDescription *sampleDesc =
-        &trackInfo->mSampleDescs.editItemAt(
-                trackInfo->mSampleDescs.size() - 1);
-
-    if (sampleDesc->mType != FOURCC('m', 'p', '4', 'a')
-            && sampleDesc->mType != FOURCC('m', 'p', '4', 'v')) {
-        return -EINVAL;
-    }
-
-    const uint8_t *ptr = mBuffer->data() + offset;
-
-    size -= offset;
-    offset = 0;
-
-    if (size < 4) {
-        return -EINVAL;
-    }
-
-    if (U32_AT(ptr) != 0) {
-        return -EINVAL;
-    }
-
-    ptr += 4;
-    size -=4;
-
-    ESDS esds(ptr, size);
-
-    uint8_t objectTypeIndication;
-    if (esds.getObjectTypeIndication(&objectTypeIndication) != OK) {
-        return ERROR_MALFORMED;
-    }
-
-    const uint8_t *csd;
-    size_t csd_size;
-    if (esds.getCodecSpecificInfo(
-                (const void **)&csd, &csd_size) != OK) {
-        return ERROR_MALFORMED;
-    }
-
-    addCodecSpecificData(sampleDesc->mFormat, 0, csd, csd_size);
-
-    if (sampleDesc->mType != FOURCC('m', 'p', '4', 'a')) {
-        return OK;
-    }
-
-    if (csd_size == 0) {
-        // There's no further information, i.e. no codec specific data
-        // Let's assume that the information provided in the mpeg4 headers
-        // is accurate and hope for the best.
-
-        return OK;
-    }
-
-    if (csd_size < 2) {
-        return ERROR_MALFORMED;
-    }
-
-    uint32_t objectType = csd[0] >> 3;
-
-    if (objectType == 31) {
-        return ERROR_UNSUPPORTED;
-    }
-
-    uint32_t freqIndex = (csd[0] & 7) << 1 | (csd[1] >> 7);
-    int32_t sampleRate = 0;
-    int32_t numChannels = 0;
-    if (freqIndex == 15) {
-        if (csd_size < 5) {
-            return ERROR_MALFORMED;
-        }
-
-        sampleRate = (csd[1] & 0x7f) << 17
-                        | csd[2] << 9
-                        | csd[3] << 1
-                        | (csd[4] >> 7);
-
-        numChannels = (csd[4] >> 3) & 15;
-    } else {
-        static uint32_t kSamplingRate[] = {
-            96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
-            16000, 12000, 11025, 8000, 7350
-        };
-
-        if (freqIndex == 13 || freqIndex == 14) {
-            return ERROR_MALFORMED;
-        }
-
-        sampleRate = kSamplingRate[freqIndex];
-        numChannels = (csd[1] >> 3) & 15;
-    }
-
-    if (numChannels == 0) {
-        return ERROR_UNSUPPORTED;
-    }
-
-    sampleDesc->mFormat->setInt32("sample-rate", sampleRate);
-    sampleDesc->mFormat->setInt32("channel-count", numChannels);
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::parseMediaData(
-        uint32_t type, size_t offset, uint64_t size) {
-    ALOGV("skipping 'mdat' chunk at offsets 0x%08lx-0x%08llx.",
-          mBufferPos + offset, mBufferPos + size);
-
-    sp<ABuffer> buffer = new ABuffer(size - offset);
-    memcpy(buffer->data(), mBuffer->data() + offset, size - offset);
-
-    mMediaData.push();
-    MediaDataInfo *info = &mMediaData.editItemAt(mMediaData.size() - 1);
-    info->mBuffer = buffer;
-    info->mOffset = mBufferPos + offset;
-
-    if (mMediaData.size() > 10) {
-        ALOGV("suspending for now.");
-        mSuspended = true;
-    }
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::parseSegmentIndex(
-        uint32_t type, size_t offset, uint64_t size) {
-    ALOGV("sidx box type %d, offset %d, size %d", type, int(offset), int(size));
-//    AString sidxstr;
-//    hexdump(mBuffer->data() + offset, size, 0 /* indent */, &sidxstr);
-//    ALOGV("raw sidx:");
-//    ALOGV("%s", sidxstr.c_str());
-    if (offset + 12 > size) {
-        return -EINVAL;
-    }
-
-    uint32_t flags = readU32(offset);
-
-    uint32_t version = flags >> 24;
-    flags &= 0xffffff;
-
-    ALOGV("sidx version %d", version);
-
-    uint32_t referenceId = readU32(offset + 4);
-    uint32_t timeScale = readU32(offset + 8);
-    ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
-
-    uint64_t earliestPresentationTime;
-    uint64_t firstOffset;
-
-    offset += 12;
-
-    if (version == 0) {
-        if (offset + 8 > size) {
-            return -EINVAL;
-        }
-        earliestPresentationTime = readU32(offset);
-        firstOffset = readU32(offset + 4);
-        offset += 8;
-    } else {
-        if (offset + 16 > size) {
-            return -EINVAL;
-        }
-        earliestPresentationTime = readU64(offset);
-        firstOffset = readU64(offset + 8);
-        offset += 16;
-    }
-    ALOGV("sidx pres/off: %Ld/%Ld", earliestPresentationTime, firstOffset);
-
-    if (offset + 4 > size) {
-        return -EINVAL;
-    }
-    if (readU16(offset) != 0) { // reserved
-        return -EINVAL;
-    }
-    int32_t referenceCount = readU16(offset + 2);
-    offset += 4;
-    ALOGV("refcount: %d", referenceCount);
-
-    if (offset + referenceCount * 12 > size) {
-        return -EINVAL;
-    }
-
-    TrackInfo *info = editTrack(mCurrentTrackID);
-    uint64_t total_duration = 0;
-    for (int i = 0; i < referenceCount; i++) {
-        uint32_t d1 = readU32(offset);
-        uint32_t d2 = readU32(offset + 4);
-        uint32_t d3 = readU32(offset + 8);
-
-        if (d1 & 0x80000000) {
-            ALOGW("sub-sidx boxes not supported yet");
-        }
-        bool sap = d3 & 0x80000000;
-        bool saptype = d3 >> 28;
-        if (!sap || saptype > 2) {
-            ALOGW("not a stream access point, or unsupported type");
-        }
-        total_duration += d2;
-        offset += 12;
-        ALOGV(" item %d, %08x %08x %08x", i, d1, d2, d3);
-        SidxEntry se;
-        se.mSize = d1 & 0x7fffffff;
-        se.mDurationUs = 1000000LL * d2 / timeScale;
-        info->mSidx.add(se);
-    }
-
-    info->mSidxDuration = total_duration * 1000000 / timeScale;
-    ALOGV("duration: %lld", info->mSidxDuration);
-    return OK;
-}
-
-status_t FragmentedMP4Parser::parseTrackExtends(
-        uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 24 > size) {
-        return -EINVAL;
-    }
-
-    if (readU32(offset) != 0) {
-        return -EINVAL;
-    }
-
-    uint32_t trackID = readU32(offset + 4);
-
-    TrackInfo *info = editTrack(trackID, true /* createIfNecessary */);
-    info->mDefaultSampleDescriptionIndex = readU32(offset + 8);
-    info->mDefaultSampleDuration = readU32(offset + 12);
-    info->mDefaultSampleSize = readU32(offset + 16);
-    info->mDefaultSampleFlags = readU32(offset + 20);
-
-    return OK;
-}
-
-FragmentedMP4Parser::TrackInfo *FragmentedMP4Parser::editTrack(
-        uint32_t trackID, bool createIfNecessary) {
-    ssize_t i = mTracks.indexOfKey(trackID);
-
-    if (i >= 0) {
-        return &mTracks.editValueAt(i);
-    }
-
-    if (!createIfNecessary) {
-        return NULL;
-    }
-
-    TrackInfo info;
-    info.mTrackID = trackID;
-    info.mFlags = 0;
-    info.mDuration = 0xffffffff;
-    info.mSidxDuration = 0;
-    info.mMediaTimeScale = 0;
-    info.mMediaHandlerType = 0;
-    info.mDefaultSampleDescriptionIndex = 0;
-    info.mDefaultSampleDuration = 0;
-    info.mDefaultSampleSize = 0;
-    info.mDefaultSampleFlags = 0;
-
-    info.mDecodingTime = 0;
-
-    mTracks.add(trackID, info);
-    return &mTracks.editValueAt(mTracks.indexOfKey(trackID));
-}
-
-status_t FragmentedMP4Parser::parseTrackFragmentHeader(
-        uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 8 > size) {
-        return -EINVAL;
-    }
-
-    uint32_t flags = readU32(offset);
-
-    if (flags & 0xff000000) {
-        return -EINVAL;
-    }
-
-    mTrackFragmentHeaderInfo.mFlags = flags;
-
-    mTrackFragmentHeaderInfo.mTrackID = readU32(offset + 4);
-    offset += 8;
-
-    if (flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent) {
-        if (offset + 8 > size) {
-            return -EINVAL;
-        }
-
-        mTrackFragmentHeaderInfo.mBaseDataOffset = readU64(offset);
-        offset += 8;
-    }
-
-    if (flags & TrackFragmentHeaderInfo::kSampleDescriptionIndexPresent) {
-        if (offset + 4 > size) {
-            return -EINVAL;
-        }
-
-        mTrackFragmentHeaderInfo.mSampleDescriptionIndex = readU32(offset);
-        offset += 4;
-    }
-
-    if (flags & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
-        if (offset + 4 > size) {
-            return -EINVAL;
-        }
-
-        mTrackFragmentHeaderInfo.mDefaultSampleDuration = readU32(offset);
-        offset += 4;
-    }
-
-    if (flags & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
-        if (offset + 4 > size) {
-            return -EINVAL;
-        }
-
-        mTrackFragmentHeaderInfo.mDefaultSampleSize = readU32(offset);
-        offset += 4;
-    }
-
-    if (flags & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
-        if (offset + 4 > size) {
-            return -EINVAL;
-        }
-
-        mTrackFragmentHeaderInfo.mDefaultSampleFlags = readU32(offset);
-        offset += 4;
-    }
-
-    if (!(flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent)) {
-        // This should point to the position of the first byte of the
-        // enclosing 'moof' container for the first track and
-        // the end of the data of the preceding fragment for subsequent
-        // tracks.
-
-        CHECK_GE(mStack.size(), 2u);
-
-        mTrackFragmentHeaderInfo.mBaseDataOffset =
-            mStack.itemAt(mStack.size() - 2).mOffset;
-
-        // XXX TODO: This does not do the right thing for the 2nd and
-        // subsequent tracks yet.
-    }
-
-    mTrackFragmentHeaderInfo.mDataOffset =
-        mTrackFragmentHeaderInfo.mBaseDataOffset;
-
-    TrackInfo *trackInfo = editTrack(mTrackFragmentHeaderInfo.mTrackID);
-
-    if (trackInfo->mFragments.empty()
-            || (*trackInfo->mFragments.begin())->complete()) {
-        trackInfo->mFragments.push_back(new DynamicTrackFragment);
-    }
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::parseTrackFragmentRun(
-        uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 8 > size) {
-        return -EINVAL;
-    }
-
-    enum {
-        kDataOffsetPresent                  = 0x01,
-        kFirstSampleFlagsPresent            = 0x04,
-        kSampleDurationPresent              = 0x100,
-        kSampleSizePresent                  = 0x200,
-        kSampleFlagsPresent                 = 0x400,
-        kSampleCompositionTimeOffsetPresent = 0x800,
-    };
-
-    uint32_t flags = readU32(offset);
-
-    if (flags & 0xff000000) {
-        return -EINVAL;
-    }
-
-    if ((flags & kFirstSampleFlagsPresent) && (flags & kSampleFlagsPresent)) {
-        // These two shall not be used together.
-        return -EINVAL;
-    }
-
-    uint32_t sampleCount = readU32(offset + 4);
-    offset += 8;
-
-    uint64_t dataOffset = mTrackFragmentHeaderInfo.mDataOffset;
-
-    uint32_t firstSampleFlags = 0;
-
-    if (flags & kDataOffsetPresent) {
-        if (offset + 4 > size) {
-            return -EINVAL;
-        }
-
-        int32_t dataOffsetDelta = (int32_t)readU32(offset);
-
-        dataOffset = mTrackFragmentHeaderInfo.mBaseDataOffset + dataOffsetDelta;
-
-        offset += 4;
-    }
-
-    if (flags & kFirstSampleFlagsPresent) {
-        if (offset + 4 > size) {
-            return -EINVAL;
-        }
-
-        firstSampleFlags = readU32(offset);
-        offset += 4;
-    }
-
-    TrackInfo *info = editTrack(mTrackFragmentHeaderInfo.mTrackID);
-
-    if (info == NULL) {
-        return -EINVAL;
-    }
-
-    uint32_t sampleDuration = 0, sampleSize = 0, sampleFlags = 0,
-             sampleCtsOffset = 0;
-
-    size_t bytesPerSample = 0;
-    if (flags & kSampleDurationPresent) {
-        bytesPerSample += 4;
-    } else if (mTrackFragmentHeaderInfo.mFlags
-            & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
-        sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration;
-    } else {
-        sampleDuration = info->mDefaultSampleDuration;
-    }
-
-    if (flags & kSampleSizePresent) {
-        bytesPerSample += 4;
-    } else if (mTrackFragmentHeaderInfo.mFlags
-            & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
-        sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize;
-    } else {
-        sampleSize = info->mDefaultSampleSize;
-    }
-
-    if (flags & kSampleFlagsPresent) {
-        bytesPerSample += 4;
-    } else if (mTrackFragmentHeaderInfo.mFlags
-            & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
-        sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags;
-    } else {
-        sampleFlags = info->mDefaultSampleFlags;
-    }
-
-    if (flags & kSampleCompositionTimeOffsetPresent) {
-        bytesPerSample += 4;
-    } else {
-        sampleCtsOffset = 0;
-    }
-
-    if (offset + sampleCount * bytesPerSample > size) {
-        return -EINVAL;
-    }
-
-    uint32_t sampleDescIndex =
-        (mTrackFragmentHeaderInfo.mFlags
-            & TrackFragmentHeaderInfo::kSampleDescriptionIndexPresent)
-            ? mTrackFragmentHeaderInfo.mSampleDescriptionIndex
-            : info->mDefaultSampleDescriptionIndex;
-
-    for (uint32_t i = 0; i < sampleCount; ++i) {
-        if (flags & kSampleDurationPresent) {
-            sampleDuration = readU32(offset);
-            offset += 4;
-        }
-
-        if (flags & kSampleSizePresent) {
-            sampleSize = readU32(offset);
-            offset += 4;
-        }
-
-        if (flags & kSampleFlagsPresent) {
-            sampleFlags = readU32(offset);
-            offset += 4;
-        }
-
-        if (flags & kSampleCompositionTimeOffsetPresent) {
-            sampleCtsOffset = readU32(offset);
-            offset += 4;
-        }
-
-        ALOGV("adding sample at offset 0x%08llx, size %u, duration %u, "
-              "sampleDescIndex=%u, flags 0x%08x",
-                dataOffset, sampleSize, sampleDuration,
-                sampleDescIndex,
-                (flags & kFirstSampleFlagsPresent) && i == 0
-                    ? firstSampleFlags : sampleFlags);
-
-        const sp<TrackFragment> &fragment = *--info->mFragments.end();
-
-        uint32_t decodingTime = info->mDecodingTime;
-        info->mDecodingTime += sampleDuration;
-        uint32_t presentationTime = decodingTime + sampleCtsOffset;
-
-        static_cast<DynamicTrackFragment *>(
-                fragment.get())->addSample(
-                    dataOffset,
-                    sampleSize,
-                    presentationTime,
-                    sampleDescIndex,
-                    ((flags & kFirstSampleFlagsPresent) && i == 0)
-                        ? firstSampleFlags : sampleFlags);
-
-        dataOffset += sampleSize;
-    }
-
-    mTrackFragmentHeaderInfo.mDataOffset = dataOffset;
-
-    return OK;
-}
-
-void FragmentedMP4Parser::copyBuffer(
-        sp<ABuffer> *dst, size_t offset, uint64_t size) const {
-    sp<ABuffer> buf = new ABuffer(size);
-    memcpy(buf->data(), mBuffer->data() + offset, size);
-
-    *dst = buf;
-}
-
-}  // namespace android
diff --git a/media/libstagefright/mp4/TrackFragment.cpp b/media/libstagefright/mp4/TrackFragment.cpp
deleted file mode 100644
index 3699038..0000000
--- a/media/libstagefright/mp4/TrackFragment.cpp
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "TrackFragment"
-#include <utils/Log.h>
-
-#include "TrackFragment.h"
-
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/hexdump.h>
-
-namespace android {
-
-FragmentedMP4Parser::DynamicTrackFragment::DynamicTrackFragment()
-    : mComplete(false),
-      mSampleIndex(0) {
-}
-
-FragmentedMP4Parser::DynamicTrackFragment::~DynamicTrackFragment() {
-}
-
-status_t FragmentedMP4Parser::DynamicTrackFragment::getSample(SampleInfo *info) {
-    if (mSampleIndex >= mSamples.size()) {
-        return mComplete ? ERROR_END_OF_STREAM : -EWOULDBLOCK;
-    }
-
-    *info = mSamples.itemAt(mSampleIndex);
-
-    return OK;
-}
-
-void FragmentedMP4Parser::DynamicTrackFragment::advance() {
-    ++mSampleIndex;
-}
-
-void FragmentedMP4Parser::DynamicTrackFragment::addSample(
-        off64_t dataOffset, size_t sampleSize,
-        uint32_t presentationTime,
-        size_t sampleDescIndex,
-        uint32_t flags) {
-    mSamples.push();
-    SampleInfo *sampleInfo = &mSamples.editItemAt(mSamples.size() - 1);
-
-    sampleInfo->mOffset = dataOffset;
-    sampleInfo->mSize = sampleSize;
-    sampleInfo->mPresentationTime = presentationTime;
-    sampleInfo->mSampleDescIndex = sampleDescIndex;
-    sampleInfo->mFlags = flags;
-}
-
-status_t FragmentedMP4Parser::DynamicTrackFragment::signalCompletion() {
-    mComplete = true;
-
-    return OK;
-}
-
-bool FragmentedMP4Parser::DynamicTrackFragment::complete() const {
-    return mComplete;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-FragmentedMP4Parser::StaticTrackFragment::StaticTrackFragment()
-    : mSampleIndex(0),
-      mSampleCount(0),
-      mChunkIndex(0),
-      mSampleToChunkIndex(-1),
-      mSampleToChunkRemaining(0),
-      mPrevChunkIndex(0xffffffff),
-      mNextSampleOffset(0) {
-}
-
-FragmentedMP4Parser::StaticTrackFragment::~StaticTrackFragment() {
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::getSample(SampleInfo *info) {
-    if (mSampleIndex >= mSampleCount) {
-        return ERROR_END_OF_STREAM;
-    }
-
-    *info = mSampleInfo;
-
-    ALOGV("returning sample %d at [0x%08llx, 0x%08llx)",
-          mSampleIndex,
-          info->mOffset, info->mOffset + info->mSize);
-
-    return OK;
-}
-
-void FragmentedMP4Parser::StaticTrackFragment::updateSampleInfo() {
-    if (mSampleIndex >= mSampleCount) {
-        return;
-    }
-
-    if (mSampleSizes != NULL) {
-        uint32_t defaultSampleSize = U32_AT(mSampleSizes->data() + 4);
-        if (defaultSampleSize > 0) {
-            mSampleInfo.mSize = defaultSampleSize;
-        } else {
-            mSampleInfo.mSize= U32_AT(mSampleSizes->data() + 12 + 4 * mSampleIndex);
-        }
-    } else {
-        CHECK(mCompactSampleSizes != NULL);
-
-        uint32_t fieldSize = U32_AT(mCompactSampleSizes->data() + 4);
-
-        switch (fieldSize) {
-            case 4:
-            {
-                unsigned byte = mCompactSampleSizes->data()[12 + mSampleIndex / 2];
-                mSampleInfo.mSize = (mSampleIndex & 1) ? byte & 0x0f : byte >> 4;
-                break;
-            }
-
-            case 8:
-            {
-                mSampleInfo.mSize = mCompactSampleSizes->data()[12 + mSampleIndex];
-                break;
-            }
-
-            default:
-            {
-                CHECK_EQ(fieldSize, 16);
-                mSampleInfo.mSize =
-                    U16_AT(mCompactSampleSizes->data() + 12 + mSampleIndex * 2);
-                break;
-            }
-        }
-    }
-
-    CHECK_GT(mSampleToChunkRemaining, 0);
-
-    // The sample desc index is 1-based... XXX
-    mSampleInfo.mSampleDescIndex =
-        U32_AT(mSampleToChunk->data() + 8 + 12 * mSampleToChunkIndex + 8);
-
-    if (mChunkIndex != mPrevChunkIndex) {
-        mPrevChunkIndex = mChunkIndex;
-
-        if (mChunkOffsets != NULL) {
-            uint32_t entryCount = U32_AT(mChunkOffsets->data() + 4);
-
-            if (mChunkIndex >= entryCount) {
-                mSampleIndex = mSampleCount;
-                return;
-            }
-
-            mNextSampleOffset =
-                U32_AT(mChunkOffsets->data() + 8 + 4 * mChunkIndex);
-        } else {
-            CHECK(mChunkOffsets64 != NULL);
-
-            uint32_t entryCount = U32_AT(mChunkOffsets64->data() + 4);
-
-            if (mChunkIndex >= entryCount) {
-                mSampleIndex = mSampleCount;
-                return;
-            }
-
-            mNextSampleOffset =
-                U64_AT(mChunkOffsets64->data() + 8 + 8 * mChunkIndex);
-        }
-    }
-
-    mSampleInfo.mOffset = mNextSampleOffset;
-
-    mSampleInfo.mPresentationTime = 0;
-    mSampleInfo.mFlags = 0;
-}
-
-void FragmentedMP4Parser::StaticTrackFragment::advance() {
-    mNextSampleOffset += mSampleInfo.mSize;
-
-    ++mSampleIndex;
-    if (--mSampleToChunkRemaining == 0) {
-        ++mChunkIndex;
-
-        uint32_t entryCount = U32_AT(mSampleToChunk->data() + 4);
-
-        // If this is the last entry in the sample to chunk table, we will
-        // stay on this entry.
-        if ((uint32_t)(mSampleToChunkIndex + 1) < entryCount) {
-            uint32_t nextChunkIndex =
-                U32_AT(mSampleToChunk->data() + 8 + 12 * (mSampleToChunkIndex + 1));
-
-            CHECK_GE(nextChunkIndex, 1u);
-            --nextChunkIndex;
-
-            if (mChunkIndex >= nextChunkIndex) {
-                CHECK_EQ(mChunkIndex, nextChunkIndex);
-                ++mSampleToChunkIndex;
-            }
-        }
-
-        mSampleToChunkRemaining =
-            U32_AT(mSampleToChunk->data() + 8 + 12 * mSampleToChunkIndex + 4);
-    }
-
-    updateSampleInfo();
-}
-
-static void setU32At(uint8_t *ptr, uint32_t x) {
-    ptr[0] = x >> 24;
-    ptr[1] = (x >> 16) & 0xff;
-    ptr[2] = (x >> 8) & 0xff;
-    ptr[3] = x & 0xff;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::signalCompletion() {
-    mSampleToChunkIndex = 0;
-
-    mSampleToChunkRemaining =
-        (mSampleToChunk == NULL)
-            ? 0
-            : U32_AT(mSampleToChunk->data() + 8 + 12 * mSampleToChunkIndex + 4);
-
-    updateSampleInfo();
-
-    return OK;
-}
-
-bool FragmentedMP4Parser::StaticTrackFragment::complete() const {
-    return true;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::parseSampleSizes(
-        FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 12 > size) {
-        return ERROR_MALFORMED;
-    }
-
-    if (parser->readU32(offset) != 0) {
-        return ERROR_MALFORMED;
-    }
-
-    uint32_t sampleSize = parser->readU32(offset + 4);
-    uint32_t sampleCount = parser->readU32(offset + 8);
-
-    if (sampleSize == 0 && offset + 12 + sampleCount * 4 != size) {
-        return ERROR_MALFORMED;
-    }
-
-    parser->copyBuffer(&mSampleSizes, offset, size);
-
-    mSampleCount = sampleCount;
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::parseCompactSampleSizes(
-        FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 12 > size) {
-        return ERROR_MALFORMED;
-    }
-
-    if (parser->readU32(offset) != 0) {
-        return ERROR_MALFORMED;
-    }
-
-    uint32_t fieldSize = parser->readU32(offset + 4);
-
-    if (fieldSize != 4 && fieldSize != 8 && fieldSize != 16) {
-        return ERROR_MALFORMED;
-    }
-
-    uint32_t sampleCount = parser->readU32(offset + 8);
-
-    if (offset + 12 + (sampleCount * fieldSize + 4) / 8 != size) {
-        return ERROR_MALFORMED;
-    }
-
-    parser->copyBuffer(&mCompactSampleSizes, offset, size);
-
-    mSampleCount = sampleCount;
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::parseSampleToChunk(
-        FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 8 > size) {
-        return ERROR_MALFORMED;
-    }
-
-    if (parser->readU32(offset) != 0) {
-        return ERROR_MALFORMED;
-    }
-
-    uint32_t entryCount = parser->readU32(offset + 4);
-
-    if (entryCount == 0) {
-        return OK;
-    }
-
-    if (offset + 8 + entryCount * 12 != size) {
-        return ERROR_MALFORMED;
-    }
-
-    parser->copyBuffer(&mSampleToChunk, offset, size);
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::parseChunkOffsets(
-        FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 8 > size) {
-        return ERROR_MALFORMED;
-    }
-
-    if (parser->readU32(offset) != 0) {
-        return ERROR_MALFORMED;
-    }
-
-    uint32_t entryCount = parser->readU32(offset + 4);
-
-    if (offset + 8 + entryCount * 4 != size) {
-        return ERROR_MALFORMED;
-    }
-
-    parser->copyBuffer(&mChunkOffsets, offset, size);
-
-    return OK;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::parseChunkOffsets64(
-        FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
-    if (offset + 8 > size) {
-        return ERROR_MALFORMED;
-    }
-
-    if (parser->readU32(offset) != 0) {
-        return ERROR_MALFORMED;
-    }
-
-    uint32_t entryCount = parser->readU32(offset + 4);
-
-    if (offset + 8 + entryCount * 8 != size) {
-        return ERROR_MALFORMED;
-    }
-
-    parser->copyBuffer(&mChunkOffsets64, offset, size);
-
-    return OK;
-}
-
-}  // namespace android
-
diff --git a/media/libstagefright/mp4/TrackFragment.h b/media/libstagefright/mp4/TrackFragment.h
deleted file mode 100644
index e1ad46e..0000000
--- a/media/libstagefright/mp4/TrackFragment.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TRACK_FRAGMENT_H_
-
-#define TRACK_FRAGMENT_H_
-
-#include "include/FragmentedMP4Parser.h"
-
-namespace android {
-
-struct FragmentedMP4Parser::TrackFragment : public RefBase {
-    TrackFragment() {}
-
-    virtual status_t getSample(SampleInfo *info) = 0;
-    virtual void advance() = 0;
-
-    virtual status_t signalCompletion() = 0;
-    virtual bool complete() const = 0;
-
-protected:
-    virtual ~TrackFragment() {}
-
-private:
-    DISALLOW_EVIL_CONSTRUCTORS(TrackFragment);
-};
-
-struct FragmentedMP4Parser::DynamicTrackFragment : public FragmentedMP4Parser::TrackFragment {
-    DynamicTrackFragment();
-
-    virtual status_t getSample(SampleInfo *info);
-    virtual void advance();
-
-    void addSample(
-            off64_t dataOffset, size_t sampleSize,
-            uint32_t presentationTime,
-            size_t sampleDescIndex,
-            uint32_t flags);
-
-    // No more samples will be added to this fragment.
-    virtual status_t signalCompletion();
-
-    virtual bool complete() const;
-
-protected:
-    virtual ~DynamicTrackFragment();
-
-private:
-    bool mComplete;
-    size_t mSampleIndex;
-    Vector<SampleInfo> mSamples;
-
-    DISALLOW_EVIL_CONSTRUCTORS(DynamicTrackFragment);
-};
-
-struct FragmentedMP4Parser::StaticTrackFragment : public FragmentedMP4Parser::TrackFragment {
-    StaticTrackFragment();
-
-    virtual status_t getSample(SampleInfo *info);
-    virtual void advance();
-
-    virtual status_t signalCompletion();
-    virtual bool complete() const;
-
-    status_t parseSampleSizes(
-            FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseCompactSampleSizes(
-            FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseSampleToChunk(
-            FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseChunkOffsets(
-            FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
-
-    status_t parseChunkOffsets64(
-            FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
-
-protected:
-    virtual ~StaticTrackFragment();
-
-private:
-    size_t mSampleIndex;
-    size_t mSampleCount;
-    uint32_t mChunkIndex;
-
-    SampleInfo mSampleInfo;
-
-    sp<ABuffer> mSampleSizes;
-    sp<ABuffer> mCompactSampleSizes;
-
-    sp<ABuffer> mSampleToChunk;
-    ssize_t mSampleToChunkIndex;
-    size_t mSampleToChunkRemaining;
-
-    sp<ABuffer> mChunkOffsets;
-    sp<ABuffer> mChunkOffsets64;
-    uint32_t mPrevChunkIndex;
-    uint64_t mNextSampleOffset;
-
-    void updateSampleInfo();
-
-    DISALLOW_EVIL_CONSTRUCTORS(StaticTrackFragment);
-};
-
-}  // namespace android
-
-#endif  // TRACK_FRAGMENT_H_
diff --git a/media/libstagefright/mpeg2ts/Android.mk b/media/libstagefright/mpeg2ts/Android.mk
index c1a7a9d..c17a0b7 100644
--- a/media/libstagefright/mpeg2ts/Android.mk
+++ b/media/libstagefright/mpeg2ts/Android.mk
@@ -13,6 +13,8 @@
 	$(TOP)/frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_MODULE:= libstagefright_mpeg2ts
 
 ifeq ($(TARGET_ARCH),arm)
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 6dfaa94..021b640 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -26,6 +26,8 @@
 #include <media/stagefright/MetaData.h>
 #include <utils/Vector.h>
 
+#include <inttypes.h>
+
 namespace android {
 
 const int64_t kNearEOSMarkUs = 2000000ll; // 2 secs
@@ -186,7 +188,7 @@
     int64_t lastQueuedTimeUs;
     CHECK(buffer->meta()->findInt64("timeUs", &lastQueuedTimeUs));
     mLastQueuedTimeUs = lastQueuedTimeUs;
-    ALOGV("queueAccessUnit timeUs=%lld us (%.2f secs)", mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6);
+    ALOGV("queueAccessUnit timeUs=%" PRIi64 " us (%.2f secs)", mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6);
 
     Mutex::Autolock autoLock(mLock);
     mBuffers.push_back(buffer);
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index e9252cc..f7abf01 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -265,7 +265,7 @@
 
                 if (startOffset > 0) {
                     ALOGI("found something resembling an H.264/MPEG syncword "
-                          "at offset %d",
+                          "at offset %zd",
                           startOffset);
                 }
 
@@ -359,7 +359,7 @@
 
                 if (startOffset > 0) {
                     ALOGI("found something resembling an AC3 syncword at "
-                          "offset %d",
+                          "offset %zd",
                           startOffset);
                 }
 
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 3fe9c23..16f6c58 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -29,6 +29,8 @@
 #include <media/hardware/MetadataBufferType.h>
 #include <ui/GraphicBuffer.h>
 
+#include <inttypes.h>
+
 namespace android {
 
 static const bool EXTRA_CHECK = true;
@@ -43,16 +45,21 @@
     mNumFramesAvailable(0),
     mEndOfStream(false),
     mEndOfStreamSent(false),
-    mRepeatAfterUs(-1ll),
     mMaxTimestampGapUs(-1ll),
     mPrevOriginalTimeUs(-1ll),
     mPrevModifiedTimeUs(-1ll),
+    mSkipFramesBeforeNs(-1ll),
+    mRepeatAfterUs(-1ll),
     mRepeatLastFrameGeneration(0),
     mRepeatLastFrameTimestamp(-1ll),
     mLatestSubmittedBufferId(-1),
     mLatestSubmittedBufferFrameNum(0),
     mLatestSubmittedBufferUseCount(0),
-    mRepeatBufferDeferred(false) {
+    mRepeatBufferDeferred(false),
+    mTimePerCaptureUs(-1ll),
+    mTimePerFrameUs(-1ll),
+    mPrevCaptureUs(-1ll),
+    mPrevFrameUs(-1ll) {
 
     ALOGV("GraphicBufferSource w=%u h=%u c=%u",
             bufferWidth, bufferHeight, bufferCount);
@@ -65,13 +72,13 @@
 
     String8 name("GraphicBufferSource");
 
-    mBufferQueue = new BufferQueue();
-    mBufferQueue->setConsumerName(name);
-    mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight);
-    mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
+    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+    mConsumer->setConsumerName(name);
+    mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
+    mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
             GRALLOC_USAGE_HW_TEXTURE);
 
-    mInitCheck = mBufferQueue->setMaxAcquiredBufferCount(bufferCount);
+    mInitCheck = mConsumer->setMaxAcquiredBufferCount(bufferCount);
     if (mInitCheck != NO_ERROR) {
         ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
                 bufferCount, mInitCheck);
@@ -85,7 +92,7 @@
     wp<BufferQueue::ConsumerListener> listener = static_cast<BufferQueue::ConsumerListener*>(this);
     sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
 
-    mInitCheck = mBufferQueue->consumerConnect(proxy, false);
+    mInitCheck = mConsumer->consumerConnect(proxy, false);
     if (mInitCheck != NO_ERROR) {
         ALOGE("Error connecting to BufferQueue: %s (%d)",
                 strerror(-mInitCheck), mInitCheck);
@@ -97,8 +104,8 @@
 
 GraphicBufferSource::~GraphicBufferSource() {
     ALOGV("~GraphicBufferSource");
-    if (mBufferQueue != NULL) {
-        status_t err = mBufferQueue->consumerDisconnect();
+    if (mConsumer != NULL) {
+        status_t err = mConsumer->consumerDisconnect();
         if (err != NO_ERROR) {
             ALOGW("consumerDisconnect failed: %d", err);
         }
@@ -270,7 +277,7 @@
         if (id == mLatestSubmittedBufferId) {
             CHECK_GT(mLatestSubmittedBufferUseCount--, 0);
         } else {
-            mBufferQueue->releaseBuffer(id, codecBuffer.mFrameNumber,
+            mConsumer->releaseBuffer(id, codecBuffer.mFrameNumber,
                     EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
         }
     } else {
@@ -339,7 +346,7 @@
 
         while (mNumFramesAvailable > 0) {
             BufferQueue::BufferItem item;
-            status_t err = mBufferQueue->acquireBuffer(&item, 0);
+            status_t err = mConsumer->acquireBuffer(&item, 0);
 
             if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
                 // shouldn't happen.
@@ -352,7 +359,7 @@
 
             --mNumFramesAvailable;
 
-            mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+            mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
                     EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
         }
         return;
@@ -389,7 +396,7 @@
     ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%d",
             mNumFramesAvailable);
     BufferQueue::BufferItem item;
-    status_t err = mBufferQueue->acquireBuffer(&item, 0);
+    status_t err = mConsumer->acquireBuffer(&item, 0);
     if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
         // shouldn't happen
         ALOGW("fillCodecBuffer_l: frame was not available");
@@ -416,10 +423,21 @@
         mBufferSlot[item.mBuf] = item.mGraphicBuffer;
     }
 
-    err = submitBuffer_l(item, cbi);
+    err = UNKNOWN_ERROR;
+
+    // only submit sample if start time is unspecified, or sample
+    // is queued after the specified start time
+    if (mSkipFramesBeforeNs < 0ll || item.mTimestamp >= mSkipFramesBeforeNs) {
+        // if start time is set, offset time stamp by start time
+        if (mSkipFramesBeforeNs > 0) {
+            item.mTimestamp -= mSkipFramesBeforeNs;
+        }
+        err = submitBuffer_l(item, cbi);
+    }
+
     if (err != OK) {
         ALOGV("submitBuffer_l failed, releasing bq buf %d", item.mBuf);
-        mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+        mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
                 EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
     } else {
         ALOGV("buffer submitted (bq %d, cbi %d)", item.mBuf, cbi);
@@ -442,7 +460,7 @@
         //
         // To be on the safe side we try to release the buffer.
         ALOGD("repeatLatestSubmittedBuffer_l: slot was NULL");
-        mBufferQueue->releaseBuffer(
+        mConsumer->releaseBuffer(
                 mLatestSubmittedBufferId,
                 mLatestSubmittedBufferFrameNum,
                 EGL_NO_DISPLAY,
@@ -496,7 +514,7 @@
 
     if (mLatestSubmittedBufferId >= 0) {
         if (mLatestSubmittedBufferUseCount == 0) {
-            mBufferQueue->releaseBuffer(
+            mConsumer->releaseBuffer(
                     mLatestSubmittedBufferId,
                     mLatestSubmittedBufferFrameNum,
                     EGL_NO_DISPLAY,
@@ -550,7 +568,30 @@
 int64_t GraphicBufferSource::getTimestamp(const BufferQueue::BufferItem &item) {
     int64_t timeUs = item.mTimestamp / 1000;
 
-    if (mMaxTimestampGapUs > 0ll) {
+    if (mTimePerCaptureUs > 0ll) {
+        // Time lapse or slow motion mode
+        if (mPrevCaptureUs < 0ll) {
+            // first capture
+            mPrevCaptureUs = timeUs;
+            mPrevFrameUs = timeUs;
+        } else {
+            // snap to nearest capture point
+            int64_t nFrames = (timeUs + mTimePerCaptureUs / 2 - mPrevCaptureUs)
+                    / mTimePerCaptureUs;
+            if (nFrames <= 0) {
+                // skip this frame as it's too close to previous capture
+                ALOGV("skipping frame, timeUs %lld", timeUs);
+                return -1;
+            }
+            mPrevCaptureUs = mPrevCaptureUs + nFrames * mTimePerCaptureUs;
+            mPrevFrameUs += mTimePerFrameUs * nFrames;
+        }
+
+        ALOGV("timeUs %lld, captureUs %lld, frameUs %lld",
+                timeUs, mPrevCaptureUs, mPrevFrameUs);
+
+        return mPrevFrameUs;
+    } else if (mMaxTimestampGapUs > 0ll) {
         /* Cap timestamp gap between adjacent frames to specified max
          *
          * In the scenario of cast mirroring, encoding could be suspended for
@@ -696,15 +737,15 @@
         }
 
         BufferQueue::BufferItem item;
-        status_t err = mBufferQueue->acquireBuffer(&item, 0);
+        status_t err = mConsumer->acquireBuffer(&item, 0);
         if (err == OK) {
             // If this is the first time we're seeing this buffer, add it to our
             // slot table.
             if (item.mGraphicBuffer != NULL) {
-                ALOGV("fillCodecBuffer_l: setting mBufferSlot %d", item.mBuf);
+                ALOGV("onFrameAvailable: setting mBufferSlot %d", item.mBuf);
                 mBufferSlot[item.mBuf] = item.mGraphicBuffer;
             }
-            mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+            mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
                     EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
         }
         return;
@@ -724,13 +765,13 @@
 void GraphicBufferSource::onBuffersReleased() {
     Mutex::Autolock lock(mMutex);
 
-    uint32_t slotMask;
-    if (mBufferQueue->getReleasedBuffers(&slotMask) != NO_ERROR) {
+    uint64_t slotMask;
+    if (mConsumer->getReleasedBuffers(&slotMask) != NO_ERROR) {
         ALOGW("onBuffersReleased: unable to get released buffer set");
-        slotMask = 0xffffffff;
+        slotMask = 0xffffffffffffffffULL;
     }
 
-    ALOGV("onBuffersReleased: 0x%08x", slotMask);
+    ALOGV("onBuffersReleased: 0x%016" PRIx64, slotMask);
 
     for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
         if ((slotMask & 0x01) != 0) {
@@ -740,6 +781,11 @@
     }
 }
 
+// BufferQueue::ConsumerListener callback
+void GraphicBufferSource::onSidebandStreamChanged() {
+    ALOG_ASSERT(false, "GraphicBufferSource can't consume sideband streams");
+}
+
 status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(
         int64_t repeatAfterUs) {
     Mutex::Autolock autoLock(mMutex);
@@ -764,6 +810,27 @@
 
     return OK;
 }
+
+void GraphicBufferSource::setSkipFramesBeforeUs(int64_t skipFramesBeforeUs) {
+    Mutex::Autolock autoLock(mMutex);
+
+    mSkipFramesBeforeNs =
+            (skipFramesBeforeUs > 0) ? (skipFramesBeforeUs * 1000) : -1ll;
+}
+
+status_t GraphicBufferSource::setTimeLapseUs(int64_t* data) {
+    Mutex::Autolock autoLock(mMutex);
+
+    if (mExecuting || data[0] <= 0ll || data[1] <= 0ll) {
+        return INVALID_OPERATION;
+    }
+
+    mTimePerFrameUs = data[0];
+    mTimePerCaptureUs = data[1];
+
+    return OK;
+}
+
 void GraphicBufferSource::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
         case kWhatRepeatLastFrame:
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 3b0e454..a70cc1a 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -61,7 +61,7 @@
     // Returns the handle to the producer side of the BufferQueue.  Buffers
     // queued on this will be received by GraphicBufferSource.
     sp<IGraphicBufferProducer> getIGraphicBufferProducer() const {
-        return mBufferQueue;
+        return mProducer;
     }
 
     // This is called when OMX transitions to OMX_StateExecuting, which means
@@ -118,6 +118,17 @@
     // of suspension on input.
     status_t setMaxTimestampGapUs(int64_t maxGapUs);
 
+    // Sets the time lapse (or slow motion) parameters.
+    // data[0] is the time (us) between two frames for playback
+    // data[1] is the time (us) between two frames for capture
+    // When set, the sample's timestamp will be modified to playback framerate,
+    // and capture timestamp will be modified to capture rate.
+    status_t setTimeLapseUs(int64_t* data);
+
+    // Sets the start time us (in system time), samples before which should
+    // be dropped and not submitted to encoder
+    void setSkipFramesBeforeUs(int64_t startTimeUs);
+
 protected:
     // BufferQueue::ConsumerListener interface, called when a new frame of
     // data is available.  If we're executing and a codec buffer is
@@ -132,6 +143,11 @@
     // set of mBufferSlot entries.
     virtual void onBuffersReleased();
 
+    // BufferQueue::ConsumerListener interface, called when the client has
+    // changed the sideband stream. GraphicBufferSource doesn't handle sideband
+    // streams so this is a no-op (and should never be called).
+    virtual void onSidebandStreamChanged();
+
 private:
     // Keep track of codec input buffers.  They may either be available
     // (mGraphicBuffer == NULL) or in use by the codec.
@@ -194,8 +210,11 @@
 
     bool mSuspended;
 
-    // We consume graphic buffers from this.
-    sp<BufferQueue> mBufferQueue;
+    // Our BufferQueue interfaces. mProducer is passed to the producer through
+    // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
+    // the buffers queued by the producer.
+    sp<IGraphicBufferProducer> mProducer;
+    sp<IGraphicBufferConsumer> mConsumer;
 
     // Number of frames pending in BufferQueue that haven't yet been
     // forwarded to the codec.
@@ -223,16 +242,17 @@
     enum {
         kRepeatLastFrameCount = 10,
     };
-    int64_t mRepeatAfterUs;
-    int64_t mMaxTimestampGapUs;
 
     KeyedVector<int64_t, int64_t> mOriginalTimeUs;
+    int64_t mMaxTimestampGapUs;
     int64_t mPrevOriginalTimeUs;
     int64_t mPrevModifiedTimeUs;
+    int64_t mSkipFramesBeforeNs;
 
     sp<ALooper> mLooper;
     sp<AHandlerReflector<GraphicBufferSource> > mReflector;
 
+    int64_t mRepeatAfterUs;
     int32_t mRepeatLastFrameGeneration;
     int64_t mRepeatLastFrameTimestamp;
     int32_t mRepeatLastFrameCount;
@@ -245,6 +265,12 @@
     // no codec buffer was available at the time.
     bool mRepeatBufferDeferred;
 
+    // Time lapse / slow motion configuration
+    int64_t mTimePerCaptureUs;
+    int64_t mTimePerFrameUs;
+    int64_t mPrevCaptureUs;
+    int64_t mPrevFrameUs;
+
     void onMessageReceived(const sp<AMessage> &msg);
 
     DISALLOW_EVIL_CONSTRUCTORS(GraphicBufferSource);
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 8391290..0fb38fa 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -850,6 +850,8 @@
         case IOMX::INTERNAL_OPTION_SUSPEND:
         case IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY:
         case IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP:
+        case IOMX::INTERNAL_OPTION_START_TIME:
+        case IOMX::INTERNAL_OPTION_TIME_LAPSE:
         {
             const sp<GraphicBufferSource> &bufferSource =
                 getGraphicBufferSource();
@@ -874,7 +876,8 @@
                 int64_t delayUs = *(int64_t *)data;
 
                 return bufferSource->setRepeatPreviousFrameDelayUs(delayUs);
-            } else {
+            } else if (type ==
+                    IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP){
                 if (size != sizeof(int64_t)) {
                     return INVALID_OPERATION;
                 }
@@ -882,6 +885,20 @@
                 int64_t maxGapUs = *(int64_t *)data;
 
                 return bufferSource->setMaxTimestampGapUs(maxGapUs);
+            } else if (type == IOMX::INTERNAL_OPTION_START_TIME) {
+                if (size != sizeof(int64_t)) {
+                    return INVALID_OPERATION;
+                }
+
+                int64_t skipFramesBeforeUs = *(int64_t *)data;
+
+                bufferSource->setSkipFramesBeforeUs(skipFramesBeforeUs);
+            } else { // IOMX::INTERNAL_OPTION_TIME_LAPSE
+                if (size != sizeof(int64_t) * 2) {
+                    return INVALID_OPERATION;
+                }
+
+                bufferSource->setTimeLapseUs((int64_t *)data);
             }
 
             return OK;
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index d49e50b..65f5404 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -50,6 +50,7 @@
     { "OMX.google.mpeg4.encoder", "mpeg4enc", "video_encoder.mpeg4" },
     { "OMX.google.mp3.decoder", "mp3dec", "audio_decoder.mp3" },
     { "OMX.google.vorbis.decoder", "vorbisdec", "audio_decoder.vorbis" },
+    { "OMX.google.opus.decoder", "opusdec", "audio_decoder.opus" },
     { "OMX.google.vp8.decoder", "vpxdec", "video_decoder.vp8" },
     { "OMX.google.vp9.decoder", "vpxdec", "video_decoder.vp9" },
     { "OMX.google.vp8.encoder", "vpxenc", "video_encoder.vp8" },
diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk
index e368134..447b29e 100644
--- a/media/libstagefright/omx/tests/Android.mk
+++ b/media/libstagefright/omx/tests/Android.mk
@@ -11,6 +11,8 @@
 	$(TOP)/frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_MODULE := omx_tests
 
 LOCAL_MODULE_TAGS := tests
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 44e4f9d..f4dfd6b 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -26,6 +26,7 @@
 #include <binder/ProcessState.h>
 #include <binder/IServiceManager.h>
 #include <binder/MemoryDealer.h>
+#include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayerService.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
@@ -242,7 +243,8 @@
 };
 
 static sp<MediaExtractor> CreateExtractorFromURI(const char *uri) {
-    sp<DataSource> source = DataSource::CreateFromURI(uri);
+    sp<DataSource> source =
+        DataSource::CreateFromURI(NULL /* httpService */, uri);
 
     if (source == NULL) {
         return NULL;
@@ -461,6 +463,7 @@
         { "audio_decoder.aac", "audio/mp4a-latm" },
         { "audio_decoder.mp3", "audio/mpeg" },
         { "audio_decoder.vorbis", "audio/vorbis" },
+        { "audio_decoder.opus", "audio/opus" },
         { "audio_decoder.g711alaw", MEDIA_MIMETYPE_AUDIO_G711_ALAW },
         { "audio_decoder.g711mlaw", MEDIA_MIMETYPE_AUDIO_G711_MLAW },
     };
@@ -493,6 +496,7 @@
         { "audio/mpeg",
           "file:///sdcard/media_api/music/MP3_48KHz_128kbps_s_1_17_CBR.mp3" },
         { "audio/vorbis", NULL },
+        { "audio/opus", NULL },
         { "video/x-vnd.on2.vp8",
           "file:///sdcard/media_api/video/big-buck-bunny_trailer.webm" },
         { MEDIA_MIMETYPE_AUDIO_G711_ALAW, "file:///sdcard/M1F1-Alaw-AFsp.wav" },
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index 462c384..09f52bc 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -23,7 +23,7 @@
 #include "ARawAudioAssembler.h"
 #include "ASessionDescription.h"
 
-#include "avc_utils.h"
+#include "include/avc_utils.h"
 
 #include <ctype.h>
 
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 492bd4a..f25539c 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -33,7 +33,7 @@
 #include <openssl/md5.h>
 #include <sys/socket.h>
 
-#include "HTTPBase.h"
+#include "include/HTTPBase.h"
 
 namespace android {
 
@@ -239,7 +239,7 @@
         // right here, since we currently have no way of asking the user
         // for this information.
 
-        ALOGE("Malformed rtsp url <URL suppressed>");
+        ALOGE("Malformed rtsp url %s", uriDebugString(url).c_str());
 
         reply->setInt32("result", ERROR_MALFORMED);
         reply->post();
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index e77c69c..39eedc0 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -20,7 +20,7 @@
         SDPLoader.cpp               \
 
 LOCAL_C_INCLUDES:= \
-	$(TOP)/frameworks/av/media/libstagefright/include \
+	$(TOP)/frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax \
 	$(TOP)/external/openssl/include
 
@@ -30,6 +30,8 @@
     LOCAL_CFLAGS += -Wno-psabi
 endif
 
+LOCAL_CFLAGS += -Werror
+
 include $(BUILD_STATIC_LIBRARY)
 
 ################################################################################
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index e7580c2..f3dfc59 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -19,7 +19,11 @@
 #define MY_HANDLER_H_
 
 //#define LOG_NDEBUG 0
+
+#ifndef LOG_TAG
 #define LOG_TAG "MyHandler"
+#endif
+
 #include <utils/Log.h>
 
 #include "APacketSource.h"
@@ -42,6 +46,12 @@
 
 #include "HTTPBase.h"
 
+#if LOG_NDEBUG
+#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
+#else
+#define UNUSED_UNLESS_VERBOSE(x)
+#endif
+
 // If no access units are received within 5 secs, assume that the rtp
 // stream has ended and signal end of stream.
 static int64_t kAccessUnitTimeoutUs = 10000000ll;
@@ -178,7 +188,7 @@
         mConn->connect(mOriginalSessionURL.c_str(), reply);
     }
 
-    AString getControlURL(sp<ASessionDescription> desc) {
+    AString getControlURL() {
         AString sessionLevelControlURL;
         if (mSessionDesc->findAttribute(
                 0,
@@ -556,7 +566,7 @@
                                 mBaseURL = tmp;
                             }
 
-                            mControlURL = getControlURL(mSessionDesc);
+                            mControlURL = getControlURL();
 
                             if (mSessionDesc->countTracks() < 2) {
                                 // There's no actual tracks in this session.
@@ -602,7 +612,7 @@
 
                         mSeekable = !isLiveStream(mSessionDesc);
 
-                        mControlURL = getControlURL(mSessionDesc);
+                        mControlURL = getControlURL();
 
                         if (mSessionDesc->countTracks() < 2) {
                             // There's no actual tracks in this session.
@@ -1816,6 +1826,8 @@
     bool addMediaTimestamp(
             int32_t trackIndex, const TrackInfo *track,
             const sp<ABuffer> &accessUnit) {
+        UNUSED_UNLESS_VERBOSE(trackIndex);
+
         uint32_t rtpTime;
         CHECK(accessUnit->meta()->findInt32(
                     "rtp-time", (int32_t *)&rtpTime));
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index 89ff17d..424badf 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -18,34 +18,30 @@
 #define LOG_TAG "SDPLoader"
 #include <utils/Log.h>
 
-#include "SDPLoader.h"
+#include "include/SDPLoader.h"
 
 #include "ASessionDescription.h"
-#include "HTTPBase.h"
 
+#include <media/IMediaHTTPConnection.h>
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/MediaHTTP.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/Utils.h>
 
 #define DEFAULT_SDP_SIZE 100000
 
 namespace android {
 
-SDPLoader::SDPLoader(const sp<AMessage> &notify, uint32_t flags, bool uidValid, uid_t uid)
+SDPLoader::SDPLoader(
+        const sp<AMessage> &notify,
+        uint32_t flags,
+        const sp<IMediaHTTPService> &httpService)
     : mNotify(notify),
       mFlags(flags),
-      mUIDValid(uidValid),
-      mUID(uid),
       mNetLooper(new ALooper),
       mCancelled(false),
-      mHTTPDataSource(
-              HTTPBase::Create(
-                  (mFlags & kFlagIncognito)
-                    ? HTTPBase::kFlagIncognito
-                    : 0)) {
-    if (mUIDValid) {
-        mHTTPDataSource->setUID(mUID);
-    }
-
+      mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())) {
     mNetLooper->setName("sdp net");
     mNetLooper->start(false /* runOnCallingThread */,
                       false /* canCallJava */,
@@ -94,11 +90,7 @@
     KeyedVector<String8, String8> *headers = NULL;
     msg->findPointer("headers", (void **)&headers);
 
-    if (!(mFlags & kFlagIncognito)) {
-        ALOGV("onLoad '%s'", url.c_str());
-    } else {
-        ALOGI("onLoad <URL suppressed>");
-    }
+    ALOGV("onLoad %s", uriDebugString(url, mFlags & kFlagIncognito).c_str());
 
     if (!mCancelled) {
         err = mHTTPDataSource->connect(url.c_str(), headers);
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index 49ffcd6..fd889f9 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -35,7 +35,6 @@
 #include <gui/SurfaceComposerClient.h>
 
 #include <binder/ProcessState.h>
-#include <ui/FramebufferNativeWindow.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaBufferGroup.h>
@@ -110,7 +109,7 @@
         } else {
             ALOGV("No actual display. Choosing EGLSurface based on SurfaceMediaSource");
             sp<IGraphicBufferProducer> sms = (new SurfaceMediaSource(
-                    getSurfaceWidth(), getSurfaceHeight()))->getBufferQueue();
+                    getSurfaceWidth(), getSurfaceHeight()))->getProducer();
             sp<Surface> stc = new Surface(sms);
             sp<ANativeWindow> window = stc;
 
@@ -361,9 +360,7 @@
     virtual void SetUp() {
         android::ProcessState::self()->startThreadPool();
         mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
-
-        // Manual cast is required to avoid constructor ambiguity
-        mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
+        mSTC = new Surface(mSMS->getProducer());
         mANW = mSTC;
     }
 
@@ -398,7 +395,7 @@
         ALOGV("SMS-GLTest::SetUp()");
         android::ProcessState::self()->startThreadPool();
         mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
-        mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
+        mSTC = new Surface(mSMS->getProducer());
         mANW = mSTC;
 
         // Doing the setup related to the GL Side
@@ -527,7 +524,8 @@
 }
 
 // Dequeuing and queuing the buffer without really filling it in.
-void SurfaceMediaSourceTest::oneBufferPassNoFill(int width, int height ) {
+void SurfaceMediaSourceTest::oneBufferPassNoFill(
+        int /* width */, int /* height  */) {
     ANativeWindowBuffer* anb;
     ASSERT_EQ(NO_ERROR, native_window_dequeue_buffer_and_wait(mANW.get(), &anb));
     ASSERT_TRUE(anb != NULL);
@@ -746,9 +744,8 @@
     CHECK(fd >= 0);
 
     sp<MediaRecorder> mr = SurfaceMediaSourceGLTest::setUpMediaRecorder(fd,
-            VIDEO_SOURCE_GRALLOC_BUFFER,
-            OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth,
-            mYuvTexHeight, 30);
+            VIDEO_SOURCE_SURFACE, OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264,
+            mYuvTexWidth, mYuvTexHeight, 30);
     // get the reference to the surfacemediasource living in
     // mediaserver that is created by stagefrightrecorder
     sp<IGraphicBufferProducer> iST = mr->querySurfaceMediaSourceFromMediaServer();
@@ -783,7 +780,7 @@
     ALOGV("Verify creating a surface w/ right config + dummy writer*********");
 
     mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
-    mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
+    mSTC = new Surface(mSMS->getProducer());
     mANW = mSTC;
 
     DummyRecorder writer(mSMS);
@@ -880,7 +877,7 @@
     }
     CHECK(fd >= 0);
 
-    sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_GRALLOC_BUFFER,
+    sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_SURFACE,
             OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, mYuvTexHeight, 30);
 
     // get the reference to the surfacemediasource living in
@@ -923,7 +920,7 @@
     }
     CHECK(fd >= 0);
 
-    sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_GRALLOC_BUFFER,
+    sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_SURFACE,
             OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, mYuvTexHeight, 30);
 
     // get the reference to the surfacemediasource living in
diff --git a/media/libstagefright/timedtext/TimedTextDriver.cpp b/media/libstagefright/timedtext/TimedTextDriver.cpp
index 12fd7f4..71aa21e 100644
--- a/media/libstagefright/timedtext/TimedTextDriver.cpp
+++ b/media/libstagefright/timedtext/TimedTextDriver.cpp
@@ -20,6 +20,7 @@
 
 #include <binder/IPCThreadState.h>
 
+#include <media/IMediaHTTPService.h>
 #include <media/mediaplayer.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/DataSource.h>
@@ -40,9 +41,11 @@
 namespace android {
 
 TimedTextDriver::TimedTextDriver(
-        const wp<MediaPlayerBase> &listener)
+        const wp<MediaPlayerBase> &listener,
+        const sp<IMediaHTTPService> &httpService)
     : mLooper(new ALooper),
       mListener(listener),
+      mHTTPService(httpService),
       mState(UNINITIALIZED),
       mCurrentTrackIndex(UINT_MAX) {
     mLooper->setName("TimedTextDriver");
@@ -207,7 +210,7 @@
     }
 
     sp<DataSource> dataSource =
-            DataSource::CreateFromURI(uri);
+            DataSource::CreateFromURI(mHTTPService, uri);
     return createOutOfBandTextSource(trackIndex, mimeType, dataSource);
 }
 
diff --git a/media/libstagefright/timedtext/test/Android.mk b/media/libstagefright/timedtext/test/Android.mk
index a5e7ba2..9a9fde2 100644
--- a/media/libstagefright/timedtext/test/Android.mk
+++ b/media/libstagefright/timedtext/test/Android.mk
@@ -2,7 +2,6 @@
 
 # ================================================================
 # Unit tests for libstagefright_timedtext
-# See also /development/testrunner/test_defs.xml
 # ================================================================
 
 # ================================================================
@@ -18,10 +17,13 @@
 
 LOCAL_C_INCLUDES := \
     $(TOP)/external/expat/lib \
-    $(TOP)/frameworks/base/media/libstagefright/timedtext
+    $(TOP)/frameworks/av/media/libstagefright/timedtext
 
 LOCAL_SHARED_LIBRARIES := \
+    libbinder \
     libexpat \
-    libstagefright
+    libstagefright \
+    libstagefright_foundation \
+    libutils
 
 include $(BUILD_NATIVE_TEST)
diff --git a/media/libstagefright/webm/Android.mk b/media/libstagefright/webm/Android.mk
new file mode 100644
index 0000000..7081463
--- /dev/null
+++ b/media/libstagefright/webm/Android.mk
@@ -0,0 +1,23 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_CPPFLAGS += -D__STDINT_LIMITS \
+                  -Werror
+
+LOCAL_SRC_FILES:= EbmlUtil.cpp        \
+                  WebmElement.cpp     \
+                  WebmFrame.cpp       \
+                  WebmFrameThread.cpp \
+                  WebmWriter.cpp
+
+
+LOCAL_C_INCLUDES += $(TOP)/frameworks/av/include
+
+LOCAL_SHARED_LIBRARIES += libstagefright_foundation \
+                          libstagefright \
+                          libutils \
+                          liblog
+
+LOCAL_MODULE:= libstagefright_webm
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/webm/EbmlUtil.cpp b/media/libstagefright/webm/EbmlUtil.cpp
new file mode 100644
index 0000000..449fec6
--- /dev/null
+++ b/media/libstagefright/webm/EbmlUtil.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+namespace {
+
+// Table for Seal's algorithm for Number of Trailing Zeros. Hacker's Delight
+// online, Figure 5-18 (http://www.hackersdelight.org/revisions.pdf)
+// The entries whose value is -1 are never referenced.
+int NTZ_TABLE[] = {
+    32,  0,  1, 12,  2,  6, -1, 13,   3, -1,  7, -1, -1, -1, -1, 14,
+    10,  4, -1, -1,  8, -1, -1, 25,  -1, -1, -1, -1, -1, 21, 27, 15,
+    31, 11,  5, -1, -1, -1, -1, -1,   9, -1, -1, 24, -1, -1, 20, 26,
+    30, -1, -1, -1, -1, 23, -1, 19,  29, -1, 22, 18, 28, 17, 16, -1
+};
+
+int numberOfTrailingZeros32(int32_t i) {
+    uint32_t u = (i & -i) * 0x0450FBAF;
+    return NTZ_TABLE[(u) >> 26];
+}
+
+uint64_t highestOneBit(uint64_t n) {
+    n |= (n >> 1);
+    n |= (n >> 2);
+    n |= (n >> 4);
+    n |= (n >> 8);
+    n |= (n >> 16);
+    n |= (n >> 32);
+    return n - (n >> 1);
+}
+
+uint64_t _powerOf2(uint64_t u) {
+    uint64_t powerOf2 = highestOneBit(u);
+    return powerOf2 ? powerOf2 : 1;
+}
+
+// Based on Long.numberOfTrailingZeros in Long.java
+int numberOfTrailingZeros(uint64_t u) {
+    int32_t low = u;
+    return low !=0 ? numberOfTrailingZeros32(low)
+                   : 32 + numberOfTrailingZeros32((int32_t) (u >> 32));
+}
+}
+
+namespace webm {
+
+// Encode the id and/or size of an EBML element bytes by setting a leading length descriptor bit:
+//
+//   1xxxxxxx                                                                - 1-byte values
+//   01xxxxxx xxxxxxxx                                                       -
+//   001xxxxx xxxxxxxx xxxxxxxx                                              -
+//   0001xxxx xxxxxxxx xxxxxxxx xxxxxxxx                                     - ...
+//   00001xxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx                            -
+//   000001xx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx                   -
+//   0000001x xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx          -
+//   00000001 xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - 8-byte values
+//
+// This function uses the least the number of bytes possible.
+uint64_t encodeUnsigned(uint64_t u) {
+    uint64_t powerOf2 = _powerOf2(u);
+    if (u + 1 == powerOf2 << 1)
+        powerOf2 <<= 1;
+    int shiftWidth = (7 + numberOfTrailingZeros(powerOf2)) / 7 * 7;
+    long lengthDescriptor = 1 << shiftWidth;
+    return lengthDescriptor | u;
+}
+
+// Like above but pads the input value with leading zeros up to the specified width. The length
+// descriptor is calculated based on width.
+uint64_t encodeUnsigned(uint64_t u, int width) {
+    int shiftWidth = 7 * width;
+    uint64_t lengthDescriptor = 1;
+    lengthDescriptor <<= shiftWidth;
+    return lengthDescriptor | u;
+}
+
+// Calculate the length of an EBML coded id or size from its length descriptor.
+int sizeOf(uint64_t u) {
+    uint64_t powerOf2 = _powerOf2(u);
+    int unsignedLength = numberOfTrailingZeros(powerOf2) / 8 + 1;
+    return unsignedLength;
+}
+
+// Serialize an EBML coded id or size in big-endian order.
+int serializeCodedUnsigned(uint64_t u, uint8_t* bary) {
+    int unsignedLength = sizeOf(u);
+    for (int i = unsignedLength - 1; i >= 0; i--) {
+        bary[i] = u & 0xff;
+        u >>= 8;
+    }
+    return unsignedLength;
+}
+
+}
diff --git a/media/libstagefright/webm/EbmlUtil.h b/media/libstagefright/webm/EbmlUtil.h
new file mode 100644
index 0000000..eb9c37c
--- /dev/null
+++ b/media/libstagefright/webm/EbmlUtil.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EBMLUTIL_H_
+#define EBMLUTIL_H_
+
+#include <stdint.h>
+
+namespace webm {
+
+// Encode the id and/or size of an EBML element bytes by setting a leading length descriptor bit:
+//
+//   1xxxxxxx                                                                - 1-byte values
+//   01xxxxxx xxxxxxxx                                                       -
+//   001xxxxx xxxxxxxx xxxxxxxx                                              -
+//   0001xxxx xxxxxxxx xxxxxxxx xxxxxxxx                                     - ...
+//   00001xxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx                            -
+//   000001xx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx                   -
+//   0000001x xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx          -
+//   00000001 xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - 8-byte values
+//
+// This function uses the least the number of bytes possible.
+uint64_t encodeUnsigned(uint64_t u);
+
+// Like above but pads the input value with leading zeros up to the specified width. The length
+// descriptor is calculated based on width.
+uint64_t encodeUnsigned(uint64_t u, int width);
+
+// Serialize an EBML coded id or size in big-endian order.
+int serializeCodedUnsigned(uint64_t u, uint8_t* bary);
+
+// Calculate the length of an EBML coded id or size from its length descriptor.
+int sizeOf(uint64_t u);
+
+}
+
+#endif /* EBMLUTIL_H_ */
diff --git a/media/libstagefright/webm/LinkedBlockingQueue.h b/media/libstagefright/webm/LinkedBlockingQueue.h
new file mode 100644
index 0000000..0b6a9a1
--- /dev/null
+++ b/media/libstagefright/webm/LinkedBlockingQueue.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LINKEDBLOCKINGQUEUE_H_
+#define LINKEDBLOCKINGQUEUE_H_
+
+#include <utils/List.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+
+namespace android {
+
+template<typename T>
+class LinkedBlockingQueue {
+    List<T> mList;
+    Mutex mLock;
+    Condition mContentAvailableCondition;
+
+    T front(bool remove) {
+        Mutex::Autolock autolock(mLock);
+        while (mList.empty()) {
+            mContentAvailableCondition.wait(mLock);
+        }
+        T e = *(mList.begin());
+        if (remove) {
+            mList.erase(mList.begin());
+        }
+        return e;
+    }
+
+    DISALLOW_EVIL_CONSTRUCTORS(LinkedBlockingQueue);
+
+public:
+    LinkedBlockingQueue() {
+    }
+
+    ~LinkedBlockingQueue() {
+    }
+
+    bool empty() {
+        Mutex::Autolock autolock(mLock);
+        return mList.empty();
+    }
+
+    void clear() {
+        Mutex::Autolock autolock(mLock);
+        mList.clear();
+    }
+
+    T peek() {
+        return front(false);
+    }
+
+    T take() {
+        return front(true);
+    }
+
+    void push(T e) {
+        Mutex::Autolock autolock(mLock);
+        mList.push_back(e);
+        mContentAvailableCondition.signal();
+    }
+};
+
+} /* namespace android */
+#endif /* LINKEDBLOCKINGQUEUE_H_ */
diff --git a/media/libstagefright/webm/WebmConstants.h b/media/libstagefright/webm/WebmConstants.h
new file mode 100644
index 0000000..c53f458
--- /dev/null
+++ b/media/libstagefright/webm/WebmConstants.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WEBMCONSTANTS_H_
+#define WEBMCONSTANTS_H_
+
+#include <stdint.h>
+
+namespace webm {
+
+const int kMinEbmlVoidSize = 2;
+const int64_t kMaxMetaSeekSize = 64;
+const int64_t kMkvUnknownLength = 0x01ffffffffffffffl;
+
+// EBML element id's from http://matroska.org/technical/specs/index.html
+enum Mkv {
+    kMkvEbml = 0x1A45DFA3,
+    kMkvEbmlVersion = 0x4286,
+    kMkvEbmlReadVersion = 0x42F7,
+    kMkvEbmlMaxIdlength = 0x42F2,
+    kMkvEbmlMaxSizeLength = 0x42F3,
+    kMkvDocType = 0x4282,
+    kMkvDocTypeVersion = 0x4287,
+    kMkvDocTypeReadVersion = 0x4285,
+    kMkvVoid = 0xEC,
+    kMkvSignatureSlot = 0x1B538667,
+    kMkvSignatureAlgo = 0x7E8A,
+    kMkvSignatureHash = 0x7E9A,
+    kMkvSignaturePublicKey = 0x7EA5,
+    kMkvSignature = 0x7EB5,
+    kMkvSignatureElements = 0x7E5B,
+    kMkvSignatureElementList = 0x7E7B,
+    kMkvSignedElement = 0x6532,
+    kMkvSegment = 0x18538067,
+    kMkvSeekHead = 0x114D9B74,
+    kMkvSeek = 0x4DBB,
+    kMkvSeekId = 0x53AB,
+    kMkvSeekPosition = 0x53AC,
+    kMkvInfo = 0x1549A966,
+    kMkvTimecodeScale = 0x2AD7B1,
+    kMkvSegmentDuration = 0x4489,
+    kMkvDateUtc = 0x4461,
+    kMkvMuxingApp = 0x4D80,
+    kMkvWritingApp = 0x5741,
+    kMkvCluster = 0x1F43B675,
+    kMkvTimecode = 0xE7,
+    kMkvPrevSize = 0xAB,
+    kMkvBlockGroup = 0xA0,
+    kMkvBlock = 0xA1,
+    kMkvBlockAdditions = 0x75A1,
+    kMkvBlockMore = 0xA6,
+    kMkvBlockAddId = 0xEE,
+    kMkvBlockAdditional = 0xA5,
+    kMkvBlockDuration = 0x9B,
+    kMkvReferenceBlock = 0xFB,
+    kMkvLaceNumber = 0xCC,
+    kMkvSimpleBlock = 0xA3,
+    kMkvTracks = 0x1654AE6B,
+    kMkvTrackEntry = 0xAE,
+    kMkvTrackNumber = 0xD7,
+    kMkvTrackUid = 0x73C5,
+    kMkvTrackType = 0x83,
+    kMkvFlagEnabled = 0xB9,
+    kMkvFlagDefault = 0x88,
+    kMkvFlagForced = 0x55AA,
+    kMkvFlagLacing = 0x9C,
+    kMkvDefaultDuration = 0x23E383,
+    kMkvMaxBlockAdditionId = 0x55EE,
+    kMkvName = 0x536E,
+    kMkvLanguage = 0x22B59C,
+    kMkvCodecId = 0x86,
+    kMkvCodecPrivate = 0x63A2,
+    kMkvCodecName = 0x258688,
+    kMkvVideo = 0xE0,
+    kMkvFlagInterlaced = 0x9A,
+    kMkvStereoMode = 0x53B8,
+    kMkvAlphaMode = 0x53C0,
+    kMkvPixelWidth = 0xB0,
+    kMkvPixelHeight = 0xBA,
+    kMkvPixelCropBottom = 0x54AA,
+    kMkvPixelCropTop = 0x54BB,
+    kMkvPixelCropLeft = 0x54CC,
+    kMkvPixelCropRight = 0x54DD,
+    kMkvDisplayWidth = 0x54B0,
+    kMkvDisplayHeight = 0x54BA,
+    kMkvDisplayUnit = 0x54B2,
+    kMkvAspectRatioType = 0x54B3,
+    kMkvFrameRate = 0x2383E3,
+    kMkvAudio = 0xE1,
+    kMkvSamplingFrequency = 0xB5,
+    kMkvOutputSamplingFrequency = 0x78B5,
+    kMkvChannels = 0x9F,
+    kMkvBitDepth = 0x6264,
+    kMkvCues = 0x1C53BB6B,
+    kMkvCuePoint = 0xBB,
+    kMkvCueTime = 0xB3,
+    kMkvCueTrackPositions = 0xB7,
+    kMkvCueTrack = 0xF7,
+    kMkvCueClusterPosition = 0xF1,
+    kMkvCueBlockNumber = 0x5378
+};
+
+enum TrackTypes {
+    kInvalidType = -1,
+    kVideoType = 0x1,
+    kAudioType = 0x2,
+    kComplexType = 0x3,
+    kLogoType = 0x10,
+    kSubtitleType = 0x11,
+    kButtonsType = 0x12,
+    kControlType = 0x20
+};
+
+enum TrackNum {
+    kVideoTrackNum = 0x1,
+    kAudioTrackNum = 0x2
+};
+}
+
+#endif /* WEBMCONSTANTS_H_ */
diff --git a/media/libstagefright/webm/WebmElement.cpp b/media/libstagefright/webm/WebmElement.cpp
new file mode 100644
index 0000000..a008cab
--- /dev/null
+++ b/media/libstagefright/webm/WebmElement.cpp
@@ -0,0 +1,367 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "WebmElement"
+
+#include "EbmlUtil.h"
+#include "WebmElement.h"
+#include "WebmConstants.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <utils/Log.h>
+
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+using namespace android;
+using namespace webm;
+
+namespace {
+
+int64_t voidSize(int64_t totalSize) {
+    if (totalSize < 2) {
+        return -1;
+    }
+    if (totalSize < 9) {
+        return totalSize - 2;
+    }
+    return totalSize - 9;
+}
+
+uint64_t childrenSum(const List<sp<WebmElement> >& children) {
+    uint64_t total = 0;
+    for (List<sp<WebmElement> >::const_iterator it = children.begin();
+            it != children.end(); ++it) {
+        total += (*it)->totalSize();
+    }
+    return total;
+}
+
+void populateCommonTrackEntries(
+        int num,
+        uint64_t uid,
+        bool lacing,
+        const char *lang,
+        const char *codec,
+        TrackTypes type,
+        List<sp<WebmElement> > &ls) {
+    ls.push_back(new WebmUnsigned(kMkvTrackNumber, num));
+    ls.push_back(new WebmUnsigned(kMkvTrackUid, uid));
+    ls.push_back(new WebmUnsigned(kMkvFlagLacing, lacing));
+    ls.push_back(new WebmString(kMkvLanguage, lang));
+    ls.push_back(new WebmString(kMkvCodecId, codec));
+    ls.push_back(new WebmUnsigned(kMkvTrackType, type));
+}
+}
+
+namespace android {
+
+WebmElement::WebmElement(uint64_t id, uint64_t size)
+    : mId(id), mSize(size) {
+}
+
+WebmElement::~WebmElement() {
+}
+
+int WebmElement::serializePayloadSize(uint8_t *buf) {
+    return serializeCodedUnsigned(encodeUnsigned(mSize), buf);
+}
+
+uint64_t WebmElement::serializeInto(uint8_t *buf) {
+    uint8_t *cur = buf;
+    int head = serializeCodedUnsigned(mId, cur);
+    cur += head;
+    int neck = serializePayloadSize(cur);
+    cur += neck;
+    serializePayload(cur);
+    cur += mSize;
+    return cur - buf;
+}
+
+uint64_t WebmElement::totalSize() {
+    uint8_t buf[8];
+    //............... + sizeOf(encodeUnsigned(size))
+    return sizeOf(mId) + serializePayloadSize(buf) + mSize;
+}
+
+uint8_t *WebmElement::serialize(uint64_t& size) {
+    size = totalSize();
+    uint8_t *buf = new uint8_t[size];
+    serializeInto(buf);
+    return buf;
+}
+
+int WebmElement::write(int fd, uint64_t& size) {
+    uint8_t buf[8];
+    size = totalSize();
+    off64_t off = ::lseek64(fd, (size - 1), SEEK_CUR) - (size - 1);
+    ::write(fd, buf, 1); // extend file
+
+    off64_t curOff = off + size;
+    off64_t alignedOff = off & ~(::sysconf(_SC_PAGE_SIZE) - 1);
+    off64_t mapSize = curOff - alignedOff;
+    off64_t pageOff = off - alignedOff;
+    void *dst = ::mmap64(NULL, mapSize, PROT_WRITE, MAP_SHARED, fd, alignedOff);
+    if (dst == MAP_FAILED) {
+        ALOGE("mmap64 failed; errno = %d", errno);
+        ALOGE("fd %d; flags: %o", fd, ::fcntl(fd, F_GETFL, 0));
+        return errno;
+    } else {
+        serializeInto((uint8_t*) dst + pageOff);
+        ::msync(dst, mapSize, MS_SYNC);
+        return ::munmap(dst, mapSize);
+    }
+}
+
+//=================================================================================================
+
+WebmUnsigned::WebmUnsigned(uint64_t id, uint64_t value)
+    : WebmElement(id, sizeOf(value)), mValue(value) {
+}
+
+void WebmUnsigned::serializePayload(uint8_t *buf) {
+    serializeCodedUnsigned(mValue, buf);
+}
+
+//=================================================================================================
+
+WebmFloat::WebmFloat(uint64_t id, double value)
+    : WebmElement(id, sizeof(double)), mValue(value) {
+}
+
+WebmFloat::WebmFloat(uint64_t id, float value)
+    : WebmElement(id, sizeof(float)), mValue(value) {
+}
+
+void WebmFloat::serializePayload(uint8_t *buf) {
+    uint64_t data;
+    if (mSize == sizeof(float)) {
+        float f = mValue;
+        data = *reinterpret_cast<const uint32_t*>(&f);
+    } else {
+        data = *reinterpret_cast<const uint64_t*>(&mValue);
+    }
+    for (int i = mSize - 1; i >= 0; --i) {
+        buf[i] = data & 0xff;
+        data >>= 8;
+    }
+}
+
+//=================================================================================================
+
+WebmBinary::WebmBinary(uint64_t id, const sp<ABuffer> &ref)
+    : WebmElement(id, ref->size()), mRef(ref) {
+}
+
+void WebmBinary::serializePayload(uint8_t *buf) {
+    memcpy(buf, mRef->data(), mRef->size());
+}
+
+//=================================================================================================
+
+WebmString::WebmString(uint64_t id, const char *str)
+    : WebmElement(id, strlen(str)), mStr(str) {
+}
+
+void WebmString::serializePayload(uint8_t *buf) {
+    memcpy(buf, mStr, strlen(mStr));
+}
+
+//=================================================================================================
+
+WebmSimpleBlock::WebmSimpleBlock(
+        int trackNum,
+        int16_t relTimecode,
+        bool key,
+        const sp<ABuffer>& orig)
+    // ............................ trackNum*1 + timecode*2 + flags*1
+    //                                ^^^
+    // Only the least significant byte of trackNum is encoded
+    : WebmElement(kMkvSimpleBlock, orig->size() + 4),
+      mTrackNum(trackNum),
+      mRelTimecode(relTimecode),
+      mKey(key),
+      mRef(orig) {
+}
+
+void WebmSimpleBlock::serializePayload(uint8_t *buf) {
+    serializeCodedUnsigned(encodeUnsigned(mTrackNum), buf);
+    buf[1] = (mRelTimecode & 0xff00) >> 8;
+    buf[2] = mRelTimecode & 0xff;
+    buf[3] = mKey ? 0x80 : 0;
+    memcpy(buf + 4, mRef->data(), mSize - 4);
+}
+
+//=================================================================================================
+
+EbmlVoid::EbmlVoid(uint64_t totalSize)
+    : WebmElement(kMkvVoid, voidSize(totalSize)),
+      mSizeWidth(totalSize - sizeOf(kMkvVoid) - voidSize(totalSize)) {
+    CHECK_GE(voidSize(totalSize), 0);
+}
+
+int EbmlVoid::serializePayloadSize(uint8_t *buf) {
+    return serializeCodedUnsigned(encodeUnsigned(mSize, mSizeWidth), buf);
+}
+
+void EbmlVoid::serializePayload(uint8_t *buf) {
+    ::memset(buf, 0, mSize);
+    return;
+}
+
+//=================================================================================================
+
+WebmMaster::WebmMaster(uint64_t id, const List<sp<WebmElement> >& children)
+    : WebmElement(id, childrenSum(children)), mChildren(children) {
+}
+
+WebmMaster::WebmMaster(uint64_t id)
+    : WebmElement(id, 0) {
+}
+
+int WebmMaster::serializePayloadSize(uint8_t *buf) {
+    if (mSize == 0){
+        return serializeCodedUnsigned(kMkvUnknownLength, buf);
+    }
+    return WebmElement::serializePayloadSize(buf);
+}
+
+void WebmMaster::serializePayload(uint8_t *buf) {
+    uint64_t off = 0;
+    for (List<sp<WebmElement> >::const_iterator it = mChildren.begin(); it != mChildren.end();
+            ++it) {
+        sp<WebmElement> child = (*it);
+        child->serializeInto(buf + off);
+        off += child->totalSize();
+    }
+}
+
+//=================================================================================================
+
+sp<WebmElement> WebmElement::CuePointEntry(uint64_t time, int track, uint64_t off) {
+    List<sp<WebmElement> > cuePointEntryFields;
+    cuePointEntryFields.push_back(new WebmUnsigned(kMkvCueTrack, track));
+    cuePointEntryFields.push_back(new WebmUnsigned(kMkvCueClusterPosition, off));
+    WebmElement *cueTrackPositions = new WebmMaster(kMkvCueTrackPositions, cuePointEntryFields);
+
+    cuePointEntryFields.clear();
+    cuePointEntryFields.push_back(new WebmUnsigned(kMkvCueTime, time));
+    cuePointEntryFields.push_back(cueTrackPositions);
+    return new WebmMaster(kMkvCuePoint, cuePointEntryFields);
+}
+
+sp<WebmElement> WebmElement::SeekEntry(uint64_t id, uint64_t off) {
+    List<sp<WebmElement> > seekEntryFields;
+    seekEntryFields.push_back(new WebmUnsigned(kMkvSeekId, id));
+    seekEntryFields.push_back(new WebmUnsigned(kMkvSeekPosition, off));
+    return new WebmMaster(kMkvSeek, seekEntryFields);
+}
+
+sp<WebmElement> WebmElement::EbmlHeader(
+        int ver,
+        int readVer,
+        int maxIdLen,
+        int maxSizeLen,
+        int docVer,
+        int docReadVer) {
+    List<sp<WebmElement> > headerFields;
+    headerFields.push_back(new WebmUnsigned(kMkvEbmlVersion, ver));
+    headerFields.push_back(new WebmUnsigned(kMkvEbmlReadVersion, readVer));
+    headerFields.push_back(new WebmUnsigned(kMkvEbmlMaxIdlength, maxIdLen));
+    headerFields.push_back(new WebmUnsigned(kMkvEbmlMaxSizeLength, maxSizeLen));
+    headerFields.push_back(new WebmString(kMkvDocType, "webm"));
+    headerFields.push_back(new WebmUnsigned(kMkvDocTypeVersion, docVer));
+    headerFields.push_back(new WebmUnsigned(kMkvDocTypeReadVersion, docReadVer));
+    return new WebmMaster(kMkvEbml, headerFields);
+}
+
+sp<WebmElement> WebmElement::SegmentInfo(uint64_t scale, double dur) {
+    List<sp<WebmElement> > segmentInfo;
+    // place duration first; easier to patch
+    segmentInfo.push_back(new WebmFloat(kMkvSegmentDuration, dur));
+    segmentInfo.push_back(new WebmUnsigned(kMkvTimecodeScale, scale));
+    segmentInfo.push_back(new WebmString(kMkvMuxingApp, "android"));
+    segmentInfo.push_back(new WebmString(kMkvWritingApp, "android"));
+    return new WebmMaster(kMkvInfo, segmentInfo);
+}
+
+sp<WebmElement> WebmElement::AudioTrackEntry(
+        int chans,
+        double rate,
+        const sp<ABuffer> &buf,
+        int bps,
+        uint64_t uid,
+        bool lacing,
+        const char *lang) {
+    if (uid == 0) {
+        uid = kAudioTrackNum;
+    }
+
+    List<sp<WebmElement> > trackEntryFields;
+    populateCommonTrackEntries(
+            kAudioTrackNum,
+            uid,
+            lacing,
+            lang,
+            "A_VORBIS",
+            kAudioType,
+            trackEntryFields);
+
+    List<sp<WebmElement> > audioInfo;
+    audioInfo.push_back(new WebmUnsigned(kMkvChannels, chans));
+    audioInfo.push_back(new WebmFloat(kMkvSamplingFrequency, rate));
+    if (bps) {
+        WebmElement *bitDepth = new WebmUnsigned(kMkvBitDepth, bps);
+        audioInfo.push_back(bitDepth);
+    }
+
+    trackEntryFields.push_back(new WebmMaster(kMkvAudio, audioInfo));
+    trackEntryFields.push_back(new WebmBinary(kMkvCodecPrivate, buf));
+    return new WebmMaster(kMkvTrackEntry, trackEntryFields);
+}
+
+sp<WebmElement> WebmElement::VideoTrackEntry(
+        uint64_t width,
+        uint64_t height,
+        uint64_t uid,
+        bool lacing,
+        const char *lang) {
+    if (uid == 0) {
+        uid = kVideoTrackNum;
+    }
+
+    List<sp<WebmElement> > trackEntryFields;
+    populateCommonTrackEntries(
+            kVideoTrackNum,
+            uid,
+            lacing,
+            lang,
+            "V_VP8",
+            kVideoType,
+            trackEntryFields);
+
+    List<sp<WebmElement> > videoInfo;
+    videoInfo.push_back(new WebmUnsigned(kMkvPixelWidth, width));
+    videoInfo.push_back(new WebmUnsigned(kMkvPixelHeight, height));
+
+    trackEntryFields.push_back(new WebmMaster(kMkvVideo, videoInfo));
+    return new WebmMaster(kMkvTrackEntry, trackEntryFields);
+}
+} /* namespace android */
diff --git a/media/libstagefright/webm/WebmElement.h b/media/libstagefright/webm/WebmElement.h
new file mode 100644
index 0000000..f19933e
--- /dev/null
+++ b/media/libstagefright/webm/WebmElement.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WEBMELEMENT_H_
+#define WEBMELEMENT_H_
+
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/List.h>
+
+namespace android {
+
+struct WebmElement : public LightRefBase<WebmElement> {
+    const uint64_t mId, mSize;
+
+    WebmElement(uint64_t id, uint64_t size);
+    virtual ~WebmElement();
+
+    virtual int serializePayloadSize(uint8_t *buf);
+    virtual void serializePayload(uint8_t *buf)=0;
+    uint64_t totalSize();
+    uint64_t serializeInto(uint8_t *buf);
+    uint8_t *serialize(uint64_t& size);
+    int write(int fd, uint64_t& size);
+
+    static sp<WebmElement> EbmlHeader(
+            int ver = 1,
+            int readVer = 1,
+            int maxIdLen = 4,
+            int maxSizeLen = 8,
+            int docVer = 2,
+            int docReadVer = 2);
+
+    static sp<WebmElement> SegmentInfo(uint64_t scale = 1000000, double dur = 0);
+
+    static sp<WebmElement> AudioTrackEntry(
+            int chans,
+            double rate,
+            const sp<ABuffer> &buf,
+            int bps = 0,
+            uint64_t uid = 0,
+            bool lacing = false,
+            const char *lang = "und");
+
+    static sp<WebmElement> VideoTrackEntry(
+            uint64_t width,
+            uint64_t height,
+            uint64_t uid = 0,
+            bool lacing = false,
+            const char *lang = "und");
+
+    static sp<WebmElement> SeekEntry(uint64_t id, uint64_t off);
+    static sp<WebmElement> CuePointEntry(uint64_t time, int track, uint64_t off);
+    static sp<WebmElement> SimpleBlock(
+            int trackNum,
+            int16_t timecode,
+            bool key,
+            const uint8_t *data,
+            uint64_t dataSize);
+};
+
+struct WebmUnsigned : public WebmElement {
+    WebmUnsigned(uint64_t id, uint64_t value);
+    const uint64_t mValue;
+    void serializePayload(uint8_t *buf);
+};
+
+struct WebmFloat : public WebmElement {
+    const double mValue;
+    WebmFloat(uint64_t id, float value);
+    WebmFloat(uint64_t id, double value);
+    void serializePayload(uint8_t *buf);
+};
+
+struct WebmBinary : public WebmElement {
+    const sp<ABuffer> mRef;
+    WebmBinary(uint64_t id, const sp<ABuffer> &ref);
+    void serializePayload(uint8_t *buf);
+};
+
+struct WebmString : public WebmElement {
+    const char *const mStr;
+    WebmString(uint64_t id, const char *str);
+    void serializePayload(uint8_t *buf);
+};
+
+struct WebmSimpleBlock : public WebmElement {
+    const int mTrackNum;
+    const int16_t mRelTimecode;
+    const bool mKey;
+    const sp<ABuffer> mRef;
+
+    WebmSimpleBlock(int trackNum, int16_t timecode, bool key, const sp<ABuffer>& orig);
+    void serializePayload(uint8_t *buf);
+};
+
+struct EbmlVoid : public WebmElement {
+    const uint64_t mSizeWidth;
+    EbmlVoid(uint64_t totalSize);
+    int serializePayloadSize(uint8_t *buf);
+    void serializePayload(uint8_t *buf);
+};
+
+struct WebmMaster : public WebmElement {
+    const List<sp<WebmElement> > mChildren;
+    WebmMaster(uint64_t id);
+    WebmMaster(uint64_t id, const List<sp<WebmElement> > &children);
+    int serializePayloadSize(uint8_t *buf);
+    void serializePayload(uint8_t *buf);
+};
+
+} /* namespace android */
+#endif /* WEBMELEMENT_H_ */
diff --git a/media/libstagefright/webm/WebmFrame.cpp b/media/libstagefright/webm/WebmFrame.cpp
new file mode 100644
index 0000000..e5134ed
--- /dev/null
+++ b/media/libstagefright/webm/WebmFrame.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "WebmFrame"
+
+#include "WebmFrame.h"
+#include "WebmConstants.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <unistd.h>
+
+using namespace android;
+using namespace webm;
+
+namespace {
+sp<ABuffer> toABuffer(MediaBuffer *mbuf) {
+    sp<ABuffer> abuf = new ABuffer(mbuf->range_length());
+    memcpy(abuf->data(), (uint8_t*) mbuf->data() + mbuf->range_offset(), mbuf->range_length());
+    return abuf;
+}
+}
+
+namespace android {
+
+const sp<WebmFrame> WebmFrame::EOS = new WebmFrame();
+
+WebmFrame::WebmFrame()
+    : mType(kInvalidType),
+      mKey(false),
+      mAbsTimecode(UINT64_MAX),
+      mData(new ABuffer(0)),
+      mEos(true) {
+}
+
+WebmFrame::WebmFrame(int type, bool key, uint64_t absTimecode, MediaBuffer *mbuf)
+    : mType(type),
+      mKey(key),
+      mAbsTimecode(absTimecode),
+      mData(toABuffer(mbuf)),
+      mEos(false) {
+}
+
+sp<WebmElement> WebmFrame::SimpleBlock(uint64_t baseTimecode) const {
+    return new WebmSimpleBlock(
+            mType == kVideoType ? kVideoTrackNum : kAudioTrackNum,
+            mAbsTimecode - baseTimecode,
+            mKey,
+            mData);
+}
+
+bool WebmFrame::operator<(const WebmFrame &other) const {
+    if (this->mEos) {
+        return false;
+    }
+    if (other.mEos) {
+        return true;
+    }
+    if (this->mAbsTimecode == other.mAbsTimecode) {
+        if (this->mType == kAudioType && other.mType == kVideoType) {
+            return true;
+        }
+        if (this->mType == kVideoType && other.mType == kAudioType) {
+            return false;
+        }
+        return false;
+    }
+    return this->mAbsTimecode < other.mAbsTimecode;
+}
+} /* namespace android */
diff --git a/media/libstagefright/webm/WebmFrame.h b/media/libstagefright/webm/WebmFrame.h
new file mode 100644
index 0000000..4f0b055
--- /dev/null
+++ b/media/libstagefright/webm/WebmFrame.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WEBMFRAME_H_
+#define WEBMFRAME_H_
+
+#include "WebmElement.h"
+
+namespace android {
+
+struct WebmFrame : LightRefBase<WebmFrame> {
+public:
+    const int mType;
+    const bool mKey;
+    const uint64_t mAbsTimecode;
+    const sp<ABuffer> mData;
+    const bool mEos;
+
+    WebmFrame();
+    WebmFrame(int type, bool key, uint64_t absTimecode, MediaBuffer *buf);
+    ~WebmFrame() {}
+
+    sp<WebmElement> SimpleBlock(uint64_t baseTimecode) const;
+
+    bool operator<(const WebmFrame &other) const;
+
+    static const sp<WebmFrame> EOS;
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(WebmFrame);
+};
+
+} /* namespace android */
+#endif /* WEBMFRAME_H_ */
diff --git a/media/libstagefright/webm/WebmFrameThread.cpp b/media/libstagefright/webm/WebmFrameThread.cpp
new file mode 100644
index 0000000..a4b8a42
--- /dev/null
+++ b/media/libstagefright/webm/WebmFrameThread.cpp
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "WebmFrameThread"
+
+#include "WebmConstants.h"
+#include "WebmFrameThread.h"
+
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <utils/Log.h>
+#include <inttypes.h>
+
+using namespace webm;
+
+namespace android {
+
+void *WebmFrameThread::wrap(void *arg) {
+    WebmFrameThread *worker = reinterpret_cast<WebmFrameThread*>(arg);
+    worker->run();
+    return NULL;
+}
+
+status_t WebmFrameThread::start() {
+    pthread_attr_t attr;
+    pthread_attr_init(&attr);
+    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+    pthread_create(&mThread, &attr, WebmFrameThread::wrap, this);
+    pthread_attr_destroy(&attr);
+    return OK;
+}
+
+status_t WebmFrameThread::stop() {
+    void *status;
+    pthread_join(mThread, &status);
+    return (status_t)(intptr_t)status;
+}
+
+//=================================================================================================
+
+WebmFrameSourceThread::WebmFrameSourceThread(
+    int type,
+    LinkedBlockingQueue<const sp<WebmFrame> >& sink)
+    : mType(type), mSink(sink) {
+}
+
+//=================================================================================================
+
+WebmFrameSinkThread::WebmFrameSinkThread(
+        const int& fd,
+        const uint64_t& off,
+        sp<WebmFrameSourceThread> videoThread,
+        sp<WebmFrameSourceThread> audioThread,
+        List<sp<WebmElement> >& cues)
+    : mFd(fd),
+      mSegmentDataStart(off),
+      mVideoFrames(videoThread->mSink),
+      mAudioFrames(audioThread->mSink),
+      mCues(cues),
+      mDone(true) {
+}
+
+WebmFrameSinkThread::WebmFrameSinkThread(
+        const int& fd,
+        const uint64_t& off,
+        LinkedBlockingQueue<const sp<WebmFrame> >& videoSource,
+        LinkedBlockingQueue<const sp<WebmFrame> >& audioSource,
+        List<sp<WebmElement> >& cues)
+    : mFd(fd),
+      mSegmentDataStart(off),
+      mVideoFrames(videoSource),
+      mAudioFrames(audioSource),
+      mCues(cues),
+      mDone(true) {
+}
+
+// Initializes a webm cluster with its starting timecode.
+//
+// frames:
+//   sequence of input audio/video frames received from the source.
+//
+// clusterTimecodeL:
+//   the starting timecode of the cluster; this is the timecode of the first
+//   frame since frames are ordered by timestamp.
+//
+// children:
+//   list to hold child elements in a webm cluster (start timecode and
+//   simple blocks).
+//
+// static
+void WebmFrameSinkThread::initCluster(
+    List<const sp<WebmFrame> >& frames,
+    uint64_t& clusterTimecodeL,
+    List<sp<WebmElement> >& children) {
+    CHECK(!frames.empty() && children.empty());
+
+    const sp<WebmFrame> f = *(frames.begin());
+    clusterTimecodeL = f->mAbsTimecode;
+    WebmUnsigned *clusterTimecode = new WebmUnsigned(kMkvTimecode, clusterTimecodeL);
+    children.clear();
+    children.push_back(clusterTimecode);
+}
+
+void WebmFrameSinkThread::writeCluster(List<sp<WebmElement> >& children) {
+    // children must contain at least one simpleblock and its timecode
+    CHECK_GE(children.size(), 2);
+
+    uint64_t size;
+    sp<WebmElement> cluster = new WebmMaster(kMkvCluster, children);
+    cluster->write(mFd, size);
+    children.clear();
+}
+
+// Write out (possibly multiple) webm cluster(s) from frames split on video key frames.
+//
+// last:
+//   current flush is triggered by EOS instead of a second outstanding video key frame.
+void WebmFrameSinkThread::flushFrames(List<const sp<WebmFrame> >& frames, bool last) {
+    if (frames.empty()) {
+        return;
+    }
+
+    uint64_t clusterTimecodeL;
+    List<sp<WebmElement> > children;
+    initCluster(frames, clusterTimecodeL, children);
+
+    uint64_t cueTime = clusterTimecodeL;
+    off_t fpos = ::lseek(mFd, 0, SEEK_CUR);
+    size_t n = frames.size();
+    if (!last) {
+        // If we are not flushing the last sequence of outstanding frames, flushFrames
+        // must have been called right after we have pushed a second outstanding video key
+        // frame (the last frame), which belongs to the next cluster; also hold back on
+        // flushing the second to last frame before we check its type. A audio frame
+        // should precede the aforementioned video key frame in the next sequence, a video
+        // frame should be the last frame in the current (to-be-flushed) sequence.
+        CHECK_GE(n, 2);
+        n -= 2;
+    }
+
+    for (size_t i = 0; i < n; i++) {
+        const sp<WebmFrame> f = *(frames.begin());
+        if (f->mType == kVideoType && f->mKey) {
+            cueTime = f->mAbsTimecode;
+        }
+
+        if (f->mAbsTimecode - clusterTimecodeL > INT16_MAX) {
+            writeCluster(children);
+            initCluster(frames, clusterTimecodeL, children);
+        }
+
+        frames.erase(frames.begin());
+        children.push_back(f->SimpleBlock(clusterTimecodeL));
+    }
+
+    // equivalent to last==false
+    if (!frames.empty()) {
+        // decide whether to write out the second to last frame.
+        const sp<WebmFrame> secondLastFrame = *(frames.begin());
+        if (secondLastFrame->mType == kVideoType) {
+            frames.erase(frames.begin());
+            children.push_back(secondLastFrame->SimpleBlock(clusterTimecodeL));
+        }
+    }
+
+    writeCluster(children);
+    sp<WebmElement> cuePoint = WebmElement::CuePointEntry(cueTime, 1, fpos - mSegmentDataStart);
+    mCues.push_back(cuePoint);
+}
+
+status_t WebmFrameSinkThread::start() {
+    mDone = false;
+    return WebmFrameThread::start();
+}
+
+status_t WebmFrameSinkThread::stop() {
+    mDone = true;
+    mVideoFrames.push(WebmFrame::EOS);
+    mAudioFrames.push(WebmFrame::EOS);
+    return WebmFrameThread::stop();
+}
+
+void WebmFrameSinkThread::run() {
+    int numVideoKeyFrames = 0;
+    List<const sp<WebmFrame> > outstandingFrames;
+    while (!mDone) {
+        ALOGV("wait v frame");
+        const sp<WebmFrame> videoFrame = mVideoFrames.peek();
+        ALOGV("v frame: %p", videoFrame.get());
+
+        ALOGV("wait a frame");
+        const sp<WebmFrame> audioFrame = mAudioFrames.peek();
+        ALOGV("a frame: %p", audioFrame.get());
+
+        if (videoFrame->mEos && audioFrame->mEos) {
+            break;
+        }
+
+        if (*audioFrame < *videoFrame) {
+            ALOGV("take a frame");
+            mAudioFrames.take();
+            outstandingFrames.push_back(audioFrame);
+        } else {
+            ALOGV("take v frame");
+            mVideoFrames.take();
+            outstandingFrames.push_back(videoFrame);
+            if (videoFrame->mKey)
+                numVideoKeyFrames++;
+        }
+
+        if (numVideoKeyFrames == 2) {
+            flushFrames(outstandingFrames, /* last = */ false);
+            numVideoKeyFrames--;
+        }
+    }
+    ALOGV("flushing last cluster (size %zu)", outstandingFrames.size());
+    flushFrames(outstandingFrames, /* last = */ true);
+    mDone = true;
+}
+
+//=================================================================================================
+
+static const int64_t kInitialDelayTimeUs = 700000LL;
+
+void WebmFrameMediaSourceThread::clearFlags() {
+    mDone = false;
+    mPaused = false;
+    mResumed = false;
+    mStarted = false;
+    mReachedEOS = false;
+}
+
+WebmFrameMediaSourceThread::WebmFrameMediaSourceThread(
+        const sp<MediaSource>& source,
+        int type,
+        LinkedBlockingQueue<const sp<WebmFrame> >& sink,
+        uint64_t timeCodeScale,
+        int64_t startTimeRealUs,
+        int32_t startTimeOffsetMs,
+        int numTracks,
+        bool realTimeRecording)
+    : WebmFrameSourceThread(type, sink),
+      mSource(source),
+      mTimeCodeScale(timeCodeScale),
+      mTrackDurationUs(0) {
+    clearFlags();
+    mStartTimeUs = startTimeRealUs;
+    if (realTimeRecording && numTracks > 1) {
+        /*
+         * Copied from MPEG4Writer
+         *
+         * This extra delay of accepting incoming audio/video signals
+         * helps to align a/v start time at the beginning of a recording
+         * session, and it also helps eliminate the "recording" sound for
+         * camcorder applications.
+         *
+         * If client does not set the start time offset, we fall back to
+         * use the default initial delay value.
+         */
+        int64_t startTimeOffsetUs = startTimeOffsetMs * 1000LL;
+        if (startTimeOffsetUs < 0) {  // Start time offset was not set
+            startTimeOffsetUs = kInitialDelayTimeUs;
+        }
+        mStartTimeUs += startTimeOffsetUs;
+        ALOGI("Start time offset: %" PRId64 " us", startTimeOffsetUs);
+    }
+}
+
+status_t WebmFrameMediaSourceThread::start() {
+    sp<MetaData> meta = new MetaData;
+    meta->setInt64(kKeyTime, mStartTimeUs);
+    status_t err = mSource->start(meta.get());
+    if (err != OK) {
+        mDone = true;
+        mReachedEOS = true;
+        return err;
+    } else {
+        mStarted = true;
+        return WebmFrameThread::start();
+    }
+}
+
+status_t WebmFrameMediaSourceThread::resume() {
+    if (!mDone && mPaused) {
+        mPaused = false;
+        mResumed = true;
+    }
+    return OK;
+}
+
+status_t WebmFrameMediaSourceThread::pause() {
+    if (mStarted) {
+        mPaused = true;
+    }
+    return OK;
+}
+
+status_t WebmFrameMediaSourceThread::stop() {
+    if (mStarted) {
+        mStarted = false;
+        mDone = true;
+        mSource->stop();
+        return WebmFrameThread::stop();
+    }
+    return OK;
+}
+
+void WebmFrameMediaSourceThread::run() {
+    int32_t count = 0;
+    int64_t timestampUs = 0xdeadbeef;
+    int64_t lastTimestampUs = 0; // Previous sample time stamp
+    int64_t lastDurationUs = 0; // Previous sample duration
+    int64_t previousPausedDurationUs = 0;
+
+    const uint64_t kUninitialized = 0xffffffffffffffffL;
+    mStartTimeUs = kUninitialized;
+
+    status_t err = OK;
+    MediaBuffer *buffer;
+    while (!mDone && (err = mSource->read(&buffer, NULL)) == OK) {
+        if (buffer->range_length() == 0) {
+            buffer->release();
+            buffer = NULL;
+            continue;
+        }
+
+        sp<MetaData> md = buffer->meta_data();
+        CHECK(md->findInt64(kKeyTime, &timestampUs));
+        if (mStartTimeUs == kUninitialized) {
+            mStartTimeUs = timestampUs;
+        }
+        timestampUs -= mStartTimeUs;
+
+        if (mPaused && !mResumed) {
+            lastDurationUs = timestampUs - lastTimestampUs;
+            lastTimestampUs = timestampUs;
+            buffer->release();
+            buffer = NULL;
+            continue;
+        }
+        ++count;
+
+        // adjust time-stamps after pause/resume
+        if (mResumed) {
+            int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
+            CHECK_GE(durExcludingEarlierPausesUs, 0ll);
+            int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
+            CHECK_GE(pausedDurationUs, lastDurationUs);
+            previousPausedDurationUs += pausedDurationUs - lastDurationUs;
+            mResumed = false;
+        }
+        timestampUs -= previousPausedDurationUs;
+        CHECK_GE(timestampUs, 0ll);
+
+        int32_t isSync = false;
+        md->findInt32(kKeyIsSyncFrame, &isSync);
+        const sp<WebmFrame> f = new WebmFrame(
+            mType,
+            isSync,
+            timestampUs * 1000 / mTimeCodeScale,
+            buffer);
+        mSink.push(f);
+
+        ALOGV(
+            "%s %s frame at %" PRId64 " size %zu\n",
+            mType == kVideoType ? "video" : "audio",
+            isSync ? "I" : "P",
+            timestampUs * 1000 / mTimeCodeScale,
+            buffer->range_length());
+
+        buffer->release();
+        buffer = NULL;
+
+        if (timestampUs > mTrackDurationUs) {
+            mTrackDurationUs = timestampUs;
+        }
+        lastDurationUs = timestampUs - lastTimestampUs;
+        lastTimestampUs = timestampUs;
+    }
+
+    mTrackDurationUs += lastDurationUs;
+    mSink.push(WebmFrame::EOS);
+}
+}
diff --git a/media/libstagefright/webm/WebmFrameThread.h b/media/libstagefright/webm/WebmFrameThread.h
new file mode 100644
index 0000000..d65d9b7
--- /dev/null
+++ b/media/libstagefright/webm/WebmFrameThread.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WEBMFRAMETHREAD_H_
+#define WEBMFRAMETHREAD_H_
+
+#include "WebmFrame.h"
+#include "LinkedBlockingQueue.h"
+
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaSource.h>
+
+#include <utils/List.h>
+#include <utils/Errors.h>
+
+#include <pthread.h>
+
+namespace android {
+
+class WebmFrameThread : public LightRefBase<WebmFrameThread> {
+public:
+    virtual void run() = 0;
+    virtual bool running() { return false; }
+    virtual status_t start();
+    virtual status_t pause() { return OK; }
+    virtual status_t resume() { return OK; }
+    virtual status_t stop();
+    virtual ~WebmFrameThread() { stop(); }
+    static void *wrap(void *arg);
+
+protected:
+    WebmFrameThread()
+        : mThread(0) {
+    }
+
+private:
+    pthread_t mThread;
+    DISALLOW_EVIL_CONSTRUCTORS(WebmFrameThread);
+};
+
+//=================================================================================================
+
+class WebmFrameSourceThread;
+class WebmFrameSinkThread : public WebmFrameThread {
+public:
+    WebmFrameSinkThread(
+            const int& fd,
+            const uint64_t& off,
+            sp<WebmFrameSourceThread> videoThread,
+            sp<WebmFrameSourceThread> audioThread,
+            List<sp<WebmElement> >& cues);
+
+    WebmFrameSinkThread(
+            const int& fd,
+            const uint64_t& off,
+            LinkedBlockingQueue<const sp<WebmFrame> >& videoSource,
+            LinkedBlockingQueue<const sp<WebmFrame> >& audioSource,
+            List<sp<WebmElement> >& cues);
+
+    void run();
+    bool running() {
+        return !mDone;
+    }
+    status_t start();
+    status_t stop();
+
+private:
+    const int& mFd;
+    const uint64_t& mSegmentDataStart;
+    LinkedBlockingQueue<const sp<WebmFrame> >& mVideoFrames;
+    LinkedBlockingQueue<const sp<WebmFrame> >& mAudioFrames;
+    List<sp<WebmElement> >& mCues;
+
+    volatile bool mDone;
+
+    static void initCluster(
+            List<const sp<WebmFrame> >& frames,
+            uint64_t& clusterTimecodeL,
+            List<sp<WebmElement> >& children);
+    void writeCluster(List<sp<WebmElement> >& children);
+    void flushFrames(List<const sp<WebmFrame> >& frames, bool last);
+};
+
+//=================================================================================================
+
+class WebmFrameSourceThread : public WebmFrameThread {
+public:
+    WebmFrameSourceThread(int type, LinkedBlockingQueue<const sp<WebmFrame> >& sink);
+    virtual int64_t getDurationUs() = 0;
+protected:
+    const int mType;
+    LinkedBlockingQueue<const sp<WebmFrame> >& mSink;
+
+    friend class WebmFrameSinkThread;
+};
+
+//=================================================================================================
+
+class WebmFrameEmptySourceThread : public WebmFrameSourceThread {
+public:
+    WebmFrameEmptySourceThread(int type, LinkedBlockingQueue<const sp<WebmFrame> >& sink)
+        : WebmFrameSourceThread(type, sink) {
+    }
+    void run() { mSink.push(WebmFrame::EOS); }
+    int64_t getDurationUs() { return 0; }
+};
+
+//=================================================================================================
+
+class WebmFrameMediaSourceThread: public WebmFrameSourceThread {
+public:
+    WebmFrameMediaSourceThread(
+            const sp<MediaSource>& source,
+            int type,
+            LinkedBlockingQueue<const sp<WebmFrame> >& sink,
+            uint64_t timeCodeScale,
+            int64_t startTimeRealUs,
+            int32_t startTimeOffsetMs,
+            int numPeers,
+            bool realTimeRecording);
+
+    void run();
+    status_t start();
+    status_t resume();
+    status_t pause();
+    status_t stop();
+    int64_t getDurationUs() {
+        return mTrackDurationUs;
+    }
+
+private:
+    const sp<MediaSource> mSource;
+    const uint64_t mTimeCodeScale;
+    uint64_t mStartTimeUs;
+
+    volatile bool mDone;
+    volatile bool mPaused;
+    volatile bool mResumed;
+    volatile bool mStarted;
+    volatile bool mReachedEOS;
+    int64_t mTrackDurationUs;
+
+    void clearFlags();
+};
+} /* namespace android */
+
+#endif /* WEBMFRAMETHREAD_H_ */
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
new file mode 100644
index 0000000..03cf92a
--- /dev/null
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "WebmWriter"
+
+#include "EbmlUtil.h"
+#include "WebmWriter.h"
+
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <utils/Errors.h>
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <inttypes.h>
+
+using namespace webm;
+
+namespace {
+size_t XiphLaceCodeLen(size_t size) {
+    return size / 0xff + 1;
+}
+
+size_t XiphLaceEnc(uint8_t *buf, size_t size) {
+    size_t i;
+    for (i = 0; size >= 0xff; ++i, size -= 0xff) {
+        buf[i] = 0xff;
+    }
+    buf[i++] = size;
+    return i;
+}
+}
+
+namespace android {
+
+static const int64_t kMinStreamableFileSizeInBytes = 5 * 1024 * 1024;
+
+WebmWriter::WebmWriter(int fd)
+    : mFd(dup(fd)),
+      mInitCheck(mFd < 0 ? NO_INIT : OK),
+      mTimeCodeScale(1000000),
+      mStartTimestampUs(0),
+      mStartTimeOffsetMs(0),
+      mSegmentOffset(0),
+      mSegmentDataStart(0),
+      mInfoOffset(0),
+      mInfoSize(0),
+      mTracksOffset(0),
+      mCuesOffset(0),
+      mPaused(false),
+      mStarted(false),
+      mIsFileSizeLimitExplicitlyRequested(false),
+      mIsRealTimeRecording(false),
+      mStreamableFile(true),
+      mEstimatedCuesSize(0) {
+    mStreams[kAudioIndex] = WebmStream(kAudioType, "Audio", &WebmWriter::audioTrack);
+    mStreams[kVideoIndex] = WebmStream(kVideoType, "Video", &WebmWriter::videoTrack);
+    mSinkThread = new WebmFrameSinkThread(
+            mFd,
+            mSegmentDataStart,
+            mStreams[kVideoIndex].mSink,
+            mStreams[kAudioIndex].mSink,
+            mCuePoints);
+}
+
+WebmWriter::WebmWriter(const char *filename)
+    : mInitCheck(NO_INIT),
+      mTimeCodeScale(1000000),
+      mStartTimestampUs(0),
+      mStartTimeOffsetMs(0),
+      mSegmentOffset(0),
+      mSegmentDataStart(0),
+      mInfoOffset(0),
+      mInfoSize(0),
+      mTracksOffset(0),
+      mCuesOffset(0),
+      mPaused(false),
+      mStarted(false),
+      mIsFileSizeLimitExplicitlyRequested(false),
+      mIsRealTimeRecording(false),
+      mStreamableFile(true),
+      mEstimatedCuesSize(0) {
+    mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+    if (mFd >= 0) {
+        ALOGV("fd %d; flags: %o", mFd, fcntl(mFd, F_GETFL, 0));
+        mInitCheck = OK;
+    }
+    mStreams[kAudioIndex] = WebmStream(kAudioType, "Audio", &WebmWriter::audioTrack);
+    mStreams[kVideoIndex] = WebmStream(kVideoType, "Video", &WebmWriter::videoTrack);
+    mSinkThread = new WebmFrameSinkThread(
+            mFd,
+            mSegmentDataStart,
+            mStreams[kVideoIndex].mSink,
+            mStreams[kAudioIndex].mSink,
+            mCuePoints);
+}
+
+// static
+sp<WebmElement> WebmWriter::videoTrack(const sp<MetaData>& md) {
+    int32_t width, height;
+    CHECK(md->findInt32(kKeyWidth, &width));
+    CHECK(md->findInt32(kKeyHeight, &height));
+    return WebmElement::VideoTrackEntry(width, height);
+}
+
+// static
+sp<WebmElement> WebmWriter::audioTrack(const sp<MetaData>& md) {
+    int32_t nChannels, samplerate;
+    uint32_t type;
+    const void *headerData1;
+    const char headerData2[] = { 3, 'v', 'o', 'r', 'b', 'i', 's', 7, 0, 0, 0,
+            'a', 'n', 'd', 'r', 'o', 'i', 'd', 0, 0, 0, 0, 1 };
+    const void *headerData3;
+    size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
+
+    CHECK(md->findInt32(kKeyChannelCount, &nChannels));
+    CHECK(md->findInt32(kKeySampleRate, &samplerate));
+    CHECK(md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1));
+    CHECK(md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3));
+
+    size_t codecPrivateSize = 1;
+    codecPrivateSize += XiphLaceCodeLen(headerSize1);
+    codecPrivateSize += XiphLaceCodeLen(headerSize2);
+    codecPrivateSize += headerSize1 + headerSize2 + headerSize3;
+
+    off_t off = 0;
+    sp<ABuffer> codecPrivateBuf = new ABuffer(codecPrivateSize);
+    uint8_t *codecPrivateData = codecPrivateBuf->data();
+    codecPrivateData[off++] = 2;
+
+    off += XiphLaceEnc(codecPrivateData + off, headerSize1);
+    off += XiphLaceEnc(codecPrivateData + off, headerSize2);
+
+    memcpy(codecPrivateData + off, headerData1, headerSize1);
+    off += headerSize1;
+    memcpy(codecPrivateData + off, headerData2, headerSize2);
+    off += headerSize2;
+    memcpy(codecPrivateData + off, headerData3, headerSize3);
+
+    sp<WebmElement> entry = WebmElement::AudioTrackEntry(
+            nChannels,
+            samplerate,
+            codecPrivateBuf);
+    return entry;
+}
+
+size_t WebmWriter::numTracks() {
+    Mutex::Autolock autolock(mLock);
+
+    size_t numTracks = 0;
+    for (size_t i = 0; i < kMaxStreams; ++i) {
+        if (mStreams[i].mTrackEntry != NULL) {
+            numTracks++;
+        }
+    }
+
+    return numTracks;
+}
+
+uint64_t WebmWriter::estimateCuesSize(int32_t bitRate) {
+    // This implementation is based on estimateMoovBoxSize in MPEG4Writer.
+    //
+    // Statistical analysis shows that metadata usually accounts
+    // for a small portion of the total file size, usually < 0.6%.
+
+    // The default MIN_MOOV_BOX_SIZE is set to 0.6% x 1MB / 2,
+    // where 1MB is the common file size limit for MMS application.
+    // The default MAX _MOOV_BOX_SIZE value is based on about 3
+    // minute video recording with a bit rate about 3 Mbps, because
+    // statistics also show that most of the video captured are going
+    // to be less than 3 minutes.
+
+    // If the estimation is wrong, we will pay the price of wasting
+    // some reserved space. This should not happen so often statistically.
+    static const int32_t factor = 2;
+    static const int64_t MIN_CUES_SIZE = 3 * 1024;  // 3 KB
+    static const int64_t MAX_CUES_SIZE = (180 * 3000000 * 6LL / 8000);
+    int64_t size = MIN_CUES_SIZE;
+
+    // Max file size limit is set
+    if (mMaxFileSizeLimitBytes != 0 && mIsFileSizeLimitExplicitlyRequested) {
+        size = mMaxFileSizeLimitBytes * 6 / 1000;
+    }
+
+    // Max file duration limit is set
+    if (mMaxFileDurationLimitUs != 0) {
+        if (bitRate > 0) {
+            int64_t size2 = ((mMaxFileDurationLimitUs * bitRate * 6) / 1000 / 8000000);
+            if (mMaxFileSizeLimitBytes != 0 && mIsFileSizeLimitExplicitlyRequested) {
+                // When both file size and duration limits are set,
+                // we use the smaller limit of the two.
+                if (size > size2) {
+                    size = size2;
+                }
+            } else {
+                // Only max file duration limit is set
+                size = size2;
+            }
+        }
+    }
+
+    if (size < MIN_CUES_SIZE) {
+        size = MIN_CUES_SIZE;
+    }
+
+    // Any long duration recording will be probably end up with
+    // non-streamable webm file.
+    if (size > MAX_CUES_SIZE) {
+        size = MAX_CUES_SIZE;
+    }
+
+    ALOGV("limits: %" PRId64 "/%" PRId64 " bytes/us,"
+            " bit rate: %d bps and the estimated cues size %" PRId64 " bytes",
+            mMaxFileSizeLimitBytes, mMaxFileDurationLimitUs, bitRate, size);
+    return factor * size;
+}
+
+void WebmWriter::initStream(size_t idx) {
+    if (mStreams[idx].mThread != NULL) {
+        return;
+    }
+    if (mStreams[idx].mSource == NULL) {
+        ALOGV("adding dummy source ... ");
+        mStreams[idx].mThread = new WebmFrameEmptySourceThread(
+                mStreams[idx].mType, mStreams[idx].mSink);
+    } else {
+        ALOGV("adding source %p", mStreams[idx].mSource.get());
+        mStreams[idx].mThread = new WebmFrameMediaSourceThread(
+                mStreams[idx].mSource,
+                mStreams[idx].mType,
+                mStreams[idx].mSink,
+                mTimeCodeScale,
+                mStartTimestampUs,
+                mStartTimeOffsetMs,
+                numTracks(),
+                mIsRealTimeRecording);
+    }
+}
+
+void WebmWriter::release() {
+    close(mFd);
+    mFd = -1;
+    mInitCheck = NO_INIT;
+    mStarted = false;
+}
+
+status_t WebmWriter::reset() {
+    if (mInitCheck != OK) {
+        return OK;
+    } else {
+        if (!mStarted) {
+            release();
+            return OK;
+        }
+    }
+
+    status_t err = OK;
+    int64_t maxDurationUs = 0;
+    int64_t minDurationUs = 0x7fffffffffffffffLL;
+    for (int i = 0; i < kMaxStreams; ++i) {
+        if (mStreams[i].mThread == NULL) {
+            continue;
+        }
+
+        status_t status = mStreams[i].mThread->stop();
+        if (err == OK && status != OK) {
+            err = status;
+        }
+
+        int64_t durationUs = mStreams[i].mThread->getDurationUs();
+        if (durationUs > maxDurationUs) {
+            maxDurationUs = durationUs;
+        }
+        if (durationUs < minDurationUs) {
+            minDurationUs = durationUs;
+        }
+    }
+
+    if (numTracks() > 1) {
+        ALOGD("Duration from tracks range is [%" PRId64 ", %" PRId64 "] us", minDurationUs, maxDurationUs);
+    }
+
+    mSinkThread->stop();
+
+    // Do not write out movie header on error.
+    if (err != OK) {
+        release();
+        return err;
+    }
+
+    sp<WebmElement> cues = new WebmMaster(kMkvCues, mCuePoints);
+    uint64_t cuesSize = cues->totalSize();
+    // TRICKY Even when the cues do fit in the space we reserved, if they do not fit
+    // perfectly, we still need to check if there is enough "extra space" to write an
+    // EBML void element.
+    if (cuesSize != mEstimatedCuesSize && cuesSize > mEstimatedCuesSize - kMinEbmlVoidSize) {
+        mCuesOffset = ::lseek(mFd, 0, SEEK_CUR);
+        cues->write(mFd, cuesSize);
+    } else {
+        uint64_t spaceSize;
+        ::lseek(mFd, mCuesOffset, SEEK_SET);
+        cues->write(mFd, cuesSize);
+        sp<WebmElement> space = new EbmlVoid(mEstimatedCuesSize - cuesSize);
+        space->write(mFd, spaceSize);
+    }
+
+    mCuePoints.clear();
+    mStreams[kVideoIndex].mSink.clear();
+    mStreams[kAudioIndex].mSink.clear();
+
+    uint8_t bary[sizeof(uint64_t)];
+    uint64_t totalSize = ::lseek(mFd, 0, SEEK_END);
+    uint64_t segmentSize = totalSize - mSegmentDataStart;
+    ::lseek(mFd, mSegmentOffset + sizeOf(kMkvSegment), SEEK_SET);
+    uint64_t segmentSizeCoded = encodeUnsigned(segmentSize, sizeOf(kMkvUnknownLength));
+    serializeCodedUnsigned(segmentSizeCoded, bary);
+    ::write(mFd, bary, sizeOf(kMkvUnknownLength));
+
+    uint64_t size;
+    uint64_t durationOffset = mInfoOffset + sizeOf(kMkvInfo) + sizeOf(mInfoSize)
+        + sizeOf(kMkvSegmentDuration) + sizeOf(sizeof(double));
+    sp<WebmElement> duration = new WebmFloat(
+            kMkvSegmentDuration,
+            (double) (maxDurationUs * 1000 / mTimeCodeScale));
+    duration->serializePayload(bary);
+    ::lseek(mFd, durationOffset, SEEK_SET);
+    ::write(mFd, bary, sizeof(double));
+
+    List<sp<WebmElement> > seekEntries;
+    seekEntries.push_back(WebmElement::SeekEntry(kMkvInfo, mInfoOffset - mSegmentDataStart));
+    seekEntries.push_back(WebmElement::SeekEntry(kMkvTracks, mTracksOffset - mSegmentDataStart));
+    seekEntries.push_back(WebmElement::SeekEntry(kMkvCues, mCuesOffset - mSegmentDataStart));
+    sp<WebmElement> seekHead = new WebmMaster(kMkvSeekHead, seekEntries);
+
+    uint64_t metaSeekSize;
+    ::lseek(mFd, mSegmentDataStart, SEEK_SET);
+    seekHead->write(mFd, metaSeekSize);
+
+    uint64_t spaceSize;
+    sp<WebmElement> space = new EbmlVoid(kMaxMetaSeekSize - metaSeekSize);
+    space->write(mFd, spaceSize);
+
+    release();
+    return err;
+}
+
+status_t WebmWriter::addSource(const sp<MediaSource> &source) {
+    Mutex::Autolock l(mLock);
+    if (mStarted) {
+        ALOGE("Attempt to add source AFTER recording is started");
+        return UNKNOWN_ERROR;
+    }
+
+    // At most 2 tracks can be supported.
+    if (mStreams[kVideoIndex].mTrackEntry != NULL
+            && mStreams[kAudioIndex].mTrackEntry != NULL) {
+        ALOGE("Too many tracks (2) to add");
+        return ERROR_UNSUPPORTED;
+    }
+
+    CHECK(source != NULL);
+
+    // A track of type other than video or audio is not supported.
+    const char *mime;
+    source->getFormat()->findCString(kKeyMIMEType, &mime);
+    const char *vp8 = MEDIA_MIMETYPE_VIDEO_VP8;
+    const char *vorbis = MEDIA_MIMETYPE_AUDIO_VORBIS;
+
+    size_t streamIndex;
+    if (!strncasecmp(mime, vp8, strlen(vp8))) {
+        streamIndex = kVideoIndex;
+    } else if (!strncasecmp(mime, vorbis, strlen(vorbis))) {
+        streamIndex = kAudioIndex;
+    } else {
+        ALOGE("Track (%s) other than %s or %s is not supported", mime, vp8, vorbis);
+        return ERROR_UNSUPPORTED;
+    }
+
+    // No more than one video or one audio track is supported.
+    if (mStreams[streamIndex].mTrackEntry != NULL) {
+        ALOGE("%s track already exists", mStreams[streamIndex].mName);
+        return ERROR_UNSUPPORTED;
+    }
+
+    // This is the first track of either audio or video.
+    // Go ahead to add the track.
+    mStreams[streamIndex].mSource = source;
+    mStreams[streamIndex].mTrackEntry = mStreams[streamIndex].mMakeTrack(source->getFormat());
+
+    return OK;
+}
+
+status_t WebmWriter::start(MetaData *params) {
+    if (mInitCheck != OK) {
+        return UNKNOWN_ERROR;
+    }
+
+    if (mStreams[kVideoIndex].mTrackEntry == NULL
+            && mStreams[kAudioIndex].mTrackEntry == NULL) {
+        ALOGE("No source added");
+        return INVALID_OPERATION;
+    }
+
+    if (mMaxFileSizeLimitBytes != 0) {
+        mIsFileSizeLimitExplicitlyRequested = true;
+    }
+
+    if (params) {
+        int32_t isRealTimeRecording;
+        params->findInt32(kKeyRealTimeRecording, &isRealTimeRecording);
+        mIsRealTimeRecording = isRealTimeRecording;
+    }
+
+    if (mStarted) {
+        if (mPaused) {
+            mPaused = false;
+            mStreams[kAudioIndex].mThread->resume();
+            mStreams[kVideoIndex].mThread->resume();
+        }
+        return OK;
+    }
+
+    if (params) {
+        int32_t tcsl;
+        if (params->findInt32(kKeyTimeScale, &tcsl)) {
+            mTimeCodeScale = tcsl;
+        }
+    }
+    CHECK_GT(mTimeCodeScale, 0);
+    ALOGV("movie time scale: %" PRIu64, mTimeCodeScale);
+
+    /*
+     * When the requested file size limit is small, the priority
+     * is to meet the file size limit requirement, rather than
+     * to make the file streamable. mStreamableFile does not tell
+     * whether the actual recorded file is streamable or not.
+     */
+    mStreamableFile = (!mMaxFileSizeLimitBytes)
+        || (mMaxFileSizeLimitBytes >= kMinStreamableFileSizeInBytes);
+
+    /*
+     * Write various metadata.
+     */
+    sp<WebmElement> ebml, segment, info, seekHead, tracks, cues;
+    ebml = WebmElement::EbmlHeader();
+    segment = new WebmMaster(kMkvSegment);
+    seekHead = new EbmlVoid(kMaxMetaSeekSize);
+    info = WebmElement::SegmentInfo(mTimeCodeScale, 0);
+
+    List<sp<WebmElement> > children;
+    for (size_t i = 0; i < kMaxStreams; ++i) {
+        if (mStreams[i].mTrackEntry != NULL) {
+            children.push_back(mStreams[i].mTrackEntry);
+        }
+    }
+    tracks = new WebmMaster(kMkvTracks, children);
+
+    if (!mStreamableFile) {
+        cues = NULL;
+    } else {
+        int32_t bitRate = -1;
+        if (params) {
+            params->findInt32(kKeyBitRate, &bitRate);
+        }
+        mEstimatedCuesSize = estimateCuesSize(bitRate);
+        CHECK_GE(mEstimatedCuesSize, 8);
+        cues = new EbmlVoid(mEstimatedCuesSize);
+    }
+
+    sp<WebmElement> elems[] = { ebml, segment, seekHead, info, tracks, cues };
+    size_t nElems = sizeof(elems) / sizeof(elems[0]);
+    uint64_t offsets[nElems];
+    uint64_t sizes[nElems];
+    for (uint32_t i = 0; i < nElems; i++) {
+        WebmElement *e = elems[i].get();
+        if (!e) {
+            continue;
+        }
+
+        uint64_t size;
+        offsets[i] = ::lseek(mFd, 0, SEEK_CUR);
+        sizes[i] = e->mSize;
+        e->write(mFd, size);
+    }
+
+    mSegmentOffset = offsets[1];
+    mSegmentDataStart = offsets[2];
+    mInfoOffset = offsets[3];
+    mInfoSize = sizes[3];
+    mTracksOffset = offsets[4];
+    mCuesOffset = offsets[5];
+
+    // start threads
+    if (params) {
+        params->findInt64(kKeyTime, &mStartTimestampUs);
+    }
+
+    initStream(kAudioIndex);
+    initStream(kVideoIndex);
+
+    mStreams[kAudioIndex].mThread->start();
+    mStreams[kVideoIndex].mThread->start();
+    mSinkThread->start();
+
+    mStarted = true;
+    return OK;
+}
+
+status_t WebmWriter::pause() {
+    if (mInitCheck != OK) {
+        return OK;
+    }
+    mPaused = true;
+    status_t err = OK;
+    for (int i = 0; i < kMaxStreams; ++i) {
+        if (mStreams[i].mThread == NULL) {
+            continue;
+        }
+        status_t status = mStreams[i].mThread->pause();
+        if (status != OK) {
+            err = status;
+        }
+    }
+    return err;
+}
+
+status_t WebmWriter::stop() {
+    return reset();
+}
+
+bool WebmWriter::reachedEOS() {
+    return !mSinkThread->running();
+}
+} /* namespace android */
diff --git a/media/libstagefright/webm/WebmWriter.h b/media/libstagefright/webm/WebmWriter.h
new file mode 100644
index 0000000..529dec8
--- /dev/null
+++ b/media/libstagefright/webm/WebmWriter.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WEBMWRITER_H_
+#define WEBMWRITER_H_
+
+#include "WebmConstants.h"
+#include "WebmFrameThread.h"
+#include "LinkedBlockingQueue.h"
+
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaWriter.h>
+
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+
+#include <stdint.h>
+
+using namespace webm;
+
+namespace android {
+
+class WebmWriter : public MediaWriter {
+public:
+    WebmWriter(int fd);
+    WebmWriter(const char *filename);
+    ~WebmWriter() { reset(); }
+
+
+    status_t addSource(const sp<MediaSource> &source);
+    status_t start(MetaData *param = NULL);
+    status_t stop();
+    status_t pause();
+    bool reachedEOS();
+
+    void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
+    int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
+
+private:
+    int mFd;
+    status_t mInitCheck;
+
+    uint64_t mTimeCodeScale;
+    int64_t mStartTimestampUs;
+    int32_t mStartTimeOffsetMs;
+
+    uint64_t mSegmentOffset;
+    uint64_t mSegmentDataStart;
+    uint64_t mInfoOffset;
+    uint64_t mInfoSize;
+    uint64_t mTracksOffset;
+    uint64_t mCuesOffset;
+
+    bool mPaused;
+    bool mStarted;
+    bool mIsFileSizeLimitExplicitlyRequested;
+    bool mIsRealTimeRecording;
+    bool mStreamableFile;
+    uint64_t mEstimatedCuesSize;
+
+    Mutex mLock;
+    List<sp<WebmElement> > mCuePoints;
+
+    enum {
+        kAudioIndex     =  0,
+        kVideoIndex     =  1,
+        kMaxStreams     =  2,
+    };
+
+    struct WebmStream {
+        int mType;
+        const char *mName;
+        sp<WebmElement> (*mMakeTrack)(const sp<MetaData>&);
+
+        sp<MediaSource> mSource;
+        sp<WebmElement> mTrackEntry;
+        sp<WebmFrameSourceThread> mThread;
+        LinkedBlockingQueue<const sp<WebmFrame> > mSink;
+
+        WebmStream()
+            : mType(kInvalidType),
+              mName("Invalid"),
+              mMakeTrack(NULL) {
+        }
+
+        WebmStream(int type, const char *name, sp<WebmElement> (*makeTrack)(const sp<MetaData>&))
+            : mType(type),
+              mName(name),
+              mMakeTrack(makeTrack) {
+        }
+
+        WebmStream &operator=(const WebmStream &other) {
+            mType = other.mType;
+            mName = other.mName;
+            mMakeTrack = other.mMakeTrack;
+            return *this;
+        }
+    };
+    WebmStream mStreams[kMaxStreams];
+
+    sp<WebmFrameSinkThread> mSinkThread;
+
+    size_t numTracks();
+    uint64_t estimateCuesSize(int32_t bitRate);
+    void initStream(size_t idx);
+    void release();
+    status_t reset();
+
+    static sp<WebmElement> videoTrack(const sp<MetaData>& md);
+    static sp<WebmElement> audioTrack(const sp<MetaData>& md);
+
+    DISALLOW_EVIL_CONSTRUCTORS(WebmWriter);
+};
+
+} /* namespace android */
+#endif /* WEBMWRITER_H_ */
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 286ea13..2cb4786 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -29,6 +29,7 @@
 #include <binder/IServiceManager.h>
 #include <cutils/properties.h>
 #include <media/IHDCP.h>
+#include <media/IMediaHTTPService.h>
 #include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -749,7 +750,8 @@
 
     mExtractor = new NuMediaExtractor;
 
-    status_t err = mExtractor->setDataSource(mMediaPath.c_str());
+    status_t err = mExtractor->setDataSource(
+            NULL /* httpService */, mMediaPath.c_str());
 
     if (err != OK) {
         return err;
@@ -1053,7 +1055,7 @@
     err = source->setMaxAcquiredBufferCount(numInputBuffers);
     CHECK_EQ(err, (status_t)OK);
 
-    mBufferQueue = source->getBufferQueue();
+    mProducer = source->getProducer();
 
     return OK;
 }
@@ -1077,7 +1079,7 @@
 }
 
 sp<IGraphicBufferProducer> WifiDisplaySource::PlaybackSession::getSurfaceTexture() {
-    return mBufferQueue;
+    return mProducer;
 }
 
 void WifiDisplaySource::PlaybackSession::requestIDRFrame() {
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
index 5c8ee94..2824143 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.h
@@ -25,7 +25,6 @@
 namespace android {
 
 struct ABuffer;
-struct BufferQueue;
 struct IHDCP;
 struct IGraphicBufferProducer;
 struct MediaPuller;
@@ -111,7 +110,7 @@
 
     int64_t mLastLifesignUs;
 
-    sp<BufferQueue> mBufferQueue;
+    sp<IGraphicBufferProducer> mProducer;
 
     KeyedVector<size_t, sp<Track> > mTracks;
     ssize_t mVideoTrackIndex;
diff --git a/media/libstagefright/wifi-display/source/RepeaterSource.cpp b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
index cc8dee3..59d7e6e 100644
--- a/media/libstagefright/wifi-display/source/RepeaterSource.cpp
+++ b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
@@ -79,6 +79,8 @@
 
     ALOGV("stopping");
 
+    status_t err = mSource->stop();
+
     if (mLooper != NULL) {
         mLooper->stop();
         mLooper.clear();
@@ -92,7 +94,6 @@
         mBuffer = NULL;
     }
 
-    status_t err = mSource->stop();
 
     ALOGV("stopped");
 
diff --git a/media/libstagefright/yuv/Android.mk b/media/libstagefright/yuv/Android.mk
index b3f7b1b..bb86dfc 100644
--- a/media/libstagefright/yuv/Android.mk
+++ b/media/libstagefright/yuv/Android.mk
@@ -12,5 +12,7 @@
 LOCAL_MODULE:= libstagefright_yuv
 
 
+LOCAL_CFLAGS += -Werror
+
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/yuv/YUVImage.cpp b/media/libstagefright/yuv/YUVImage.cpp
index 7b9000b..bb3e2fd 100644
--- a/media/libstagefright/yuv/YUVImage.cpp
+++ b/media/libstagefright/yuv/YUVImage.cpp
@@ -226,8 +226,8 @@
             &ySrcOffsetIncrement, &uSrcOffsetIncrement, &vSrcOffsetIncrement);
 
     int32_t yDestOffsetIncrement;
-    int32_t uDestOffsetIncrement;
-    int32_t vDestOffsetIncrement;
+    int32_t uDestOffsetIncrement = 0;
+    int32_t vDestOffsetIncrement = 0;
     destImage.getOffsetIncrementsPerDataRow(
             &yDestOffsetIncrement, &uDestOffsetIncrement, &vDestOffsetIncrement);
 
@@ -309,7 +309,7 @@
 
     int32_t yDestOffsetIncrement;
     int32_t uDestOffsetIncrement;
-    int32_t vDestOffsetIncrement;
+    int32_t vDestOffsetIncrement = 0;
     destImage.getOffsetIncrementsPerDataRow(
             &yDestOffsetIncrement, &uDestOffsetIncrement, &vDestOffsetIncrement);
 
@@ -393,9 +393,9 @@
     fprintf(fp, "255\n");
     for (int32_t y = 0; y < mHeight; ++y) {
         for (int32_t x = 0; x < mWidth; ++x) {
-            uint8_t yValue;
-            uint8_t uValue;
-            uint8_t vValue;
+            uint8_t yValue = 0u;
+            uint8_t uValue = 0u;
+            uint8_t vValue = 0u;
             getPixelValue(x, y, &yValue, &uValue, & vValue);
 
             uint8_t rValue;
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index d07bc99..d3e546a 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -15,6 +15,8 @@
 
 LOCAL_SHARED_LIBRARIES := \
 	libaudioflinger \
+	libaudiopolicy \
+	libcamera_metadata\
 	libcameraservice \
 	libmedialogservice \
 	libcutils \
@@ -32,6 +34,7 @@
     frameworks/av/media/libmediaplayerservice \
     frameworks/av/services/medialog \
     frameworks/av/services/audioflinger \
+    frameworks/av/services/audiopolicy \
     frameworks/av/services/camera/libcameraservice
 
 LOCAL_MODULE:= mediaserver
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index d5207d5..a347951 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -37,7 +37,7 @@
 
 using namespace android;
 
-int main(int argc, char** argv)
+int main(int argc __unused, char** argv)
 {
     signal(SIGPIPE, SIG_IGN);
     char value[PROPERTY_VALUE_MAX];
diff --git a/media/mtp/MtpProperty.cpp b/media/mtp/MtpProperty.cpp
index 375ed9a..c500901 100644
--- a/media/mtp/MtpProperty.cpp
+++ b/media/mtp/MtpProperty.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "MtpProperty"
 
 #include <inttypes.h>
+#include <cutils/compiler.h>
 #include "MtpDataPacket.h"
 #include "MtpDebug.h"
 #include "MtpProperty.h"
@@ -190,9 +191,9 @@
             if (deviceProp)
                 writeValue(packet, mCurrentValue);
     }
-    packet.putUInt32(mGroupCode);
     if (!deviceProp)
-        packet.putUInt8(mFormFlag);
+        packet.putUInt32(mGroupCode);
+    packet.putUInt8(mFormFlag);
     if (mFormFlag == kFormRange) {
             writeValue(packet, mMinimumValue);
             writeValue(packet, mMaximumValue);
@@ -518,8 +519,14 @@
 
 MtpPropertyValue* MtpProperty::readArrayValues(MtpDataPacket& packet, int& length) {
     length = packet.getUInt32();
-    if (length == 0)
+    // Fail if resulting array is over 2GB.  This is because the maximum array
+    // size may be less than SIZE_MAX on some platforms.
+    if ( CC_UNLIKELY(
+            length == 0 ||
+            length >= INT32_MAX / sizeof(MtpPropertyValue)) ) {
+        length = 0;
         return NULL;
+    }
     MtpPropertyValue* result = new MtpPropertyValue[length];
     for (int i = 0; i < length; i++)
         readValue(packet, result[i]);
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 155f645..157f2ce 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -94,6 +94,7 @@
     MTP_EVENT_OBJECT_REMOVED,
     MTP_EVENT_STORE_ADDED,
     MTP_EVENT_STORE_REMOVED,
+    MTP_EVENT_DEVICE_PROP_CHANGED,
 };
 
 MtpServer::MtpServer(int fd, MtpDatabase* database, bool ptp,
@@ -262,6 +263,11 @@
     sendEvent(MTP_EVENT_STORE_REMOVED, id);
 }
 
+void MtpServer::sendDevicePropertyChanged(MtpDeviceProperty property) {
+    ALOGV("sendDevicePropertyChanged %d\n", property);
+    sendEvent(MTP_EVENT_DEVICE_PROP_CHANGED, property);
+}
+
 void MtpServer::sendEvent(MtpEventCode code, uint32_t param1) {
     if (mSessionOpen) {
         mEvent.setEventCode(code);
diff --git a/media/mtp/MtpServer.h b/media/mtp/MtpServer.h
index dfa8258..b3a11e0 100644
--- a/media/mtp/MtpServer.h
+++ b/media/mtp/MtpServer.h
@@ -104,6 +104,7 @@
 
     void                sendObjectAdded(MtpObjectHandle handle);
     void                sendObjectRemoved(MtpObjectHandle handle);
+    void                sendDevicePropertyChanged(MtpDeviceProperty property);
 
 private:
     void                sendStoreAdded(MtpStorageID id);
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index b895027..8d0a705 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -13,18 +13,27 @@
 
 include $(CLEAR_VARS)
 
+LOCAL_SRC_FILES := \
+    ServiceUtilities.cpp
+
+# FIXME Move this library to frameworks/native
+LOCAL_MODULE := libserviceutility
+
+include $(BUILD_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+
 LOCAL_SRC_FILES:=               \
     AudioFlinger.cpp            \
     Threads.cpp                 \
     Tracks.cpp                  \
     Effects.cpp                 \
     AudioMixer.cpp.arm          \
-    AudioPolicyService.cpp      \
-    ServiceUtilities.cpp        \
 
 LOCAL_SRC_FILES += StateQueue.cpp
 
 LOCAL_C_INCLUDES := \
+    $(TOPDIR)frameworks/av/services/audiopolicy \
     $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils)
 
@@ -46,12 +55,14 @@
 LOCAL_STATIC_LIBRARIES := \
     libscheduling_policy \
     libcpustats \
-    libmedia_helper
+    libmedia_helper \
+    libserviceutility
 
 LOCAL_MODULE:= libaudioflinger
 LOCAL_32_BIT_ONLY := true
 
 LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp AudioWatchdog.cpp
+LOCAL_SRC_FILES += FastThread.cpp FastThreadState.cpp
 
 LOCAL_CFLAGS += -DSTATE_QUEUE_INSTANTIATIONS='"StateQueueInstantiations.cpp"'
 
@@ -72,10 +83,21 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=               \
-	test-resample.cpp 			\
+    test-resample.cpp           \
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils)
+
+LOCAL_STATIC_LIBRARIES := \
+    libsndfile
 
 LOCAL_SHARED_LIBRARIES := \
     libaudioresampler \
+    libaudioutils \
+    libdl \
+    libcutils \
+    libutils \
+    liblog
 
 LOCAL_MODULE:= test-resample
 
@@ -88,7 +110,8 @@
 LOCAL_SRC_FILES:= \
     AudioResampler.cpp.arm \
     AudioResamplerCubic.cpp.arm \
-    AudioResamplerSinc.cpp.arm
+    AudioResamplerSinc.cpp.arm \
+    AudioResamplerDyn.cpp.arm
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index c0c34f7..755d480 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -104,6 +104,27 @@
 
 // ----------------------------------------------------------------------------
 
+const char *formatToString(audio_format_t format) {
+    switch(format) {
+    case AUDIO_FORMAT_PCM_SUB_8_BIT: return "pcm8";
+    case AUDIO_FORMAT_PCM_SUB_16_BIT: return "pcm16";
+    case AUDIO_FORMAT_PCM_SUB_32_BIT: return "pcm32";
+    case AUDIO_FORMAT_PCM_SUB_8_24_BIT: return "pcm8.24";
+    case AUDIO_FORMAT_PCM_SUB_24_BIT_PACKED: return "pcm24";
+    case AUDIO_FORMAT_PCM_SUB_FLOAT: return "pcmfloat";
+    case AUDIO_FORMAT_MP3: return "mp3";
+    case AUDIO_FORMAT_AMR_NB: return "amr-nb";
+    case AUDIO_FORMAT_AMR_WB: return "amr-wb";
+    case AUDIO_FORMAT_AAC: return "aac";
+    case AUDIO_FORMAT_HE_AAC_V1: return "he-aac-v1";
+    case AUDIO_FORMAT_HE_AAC_V2: return "he-aac-v2";
+    case AUDIO_FORMAT_VORBIS: return "vorbis";
+    default:
+        break;
+    }
+    return "unknown";
+}
+
 static int load_audio_interface(const char *if_name, audio_hw_device_t **dev)
 {
     const hw_module_t *mod;
@@ -138,6 +159,7 @@
 AudioFlinger::AudioFlinger()
     : BnAudioFlinger(),
       mPrimaryHardwareDev(NULL),
+      mAudioHwDevs(NULL),
       mHardwareStatus(AUDIO_HW_IDLE),
       mMasterVolume(1.0f),
       mMasterMute(false),
@@ -152,7 +174,7 @@
     char value[PROPERTY_VALUE_MAX];
     bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
     if (doLog) {
-        mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters");
+        mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters", MemoryHeapBase::READ_ONLY);
     }
 #ifdef TEE_SINK
     (void) property_get("ro.debuggable", value, "0");
@@ -162,12 +184,16 @@
         (void) property_get("af.tee", value, "0");
         teeEnabled = atoi(value);
     }
-    if (teeEnabled & 1)
+    // FIXME symbolic constants here
+    if (teeEnabled & 1) {
         mTeeSinkInputEnabled = true;
-    if (teeEnabled & 2)
+    }
+    if (teeEnabled & 2) {
         mTeeSinkOutputEnabled = true;
-    if (teeEnabled & 4)
+    }
+    if (teeEnabled & 4) {
         mTeeSinkTrackEnabled = true;
+    }
 #endif
 }
 
@@ -210,6 +236,18 @@
         audio_hw_device_close(mAudioHwDevs.valueAt(i)->hwDevice());
         delete mAudioHwDevs.valueAt(i);
     }
+
+    // Tell media.log service about any old writers that still need to be unregistered
+    sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
+    if (binder != 0) {
+        sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
+        for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
+            sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
+            mUnregisteredWriters.pop();
+            mediaLogService->unregisterWriter(iMemory);
+        }
+    }
+
 }
 
 static const char * const audio_interfaces[] = {
@@ -249,7 +287,7 @@
     return NULL;
 }
 
-void AudioFlinger::dumpClients(int fd, const Vector<String16>& args)
+void AudioFlinger::dumpClients(int fd, const Vector<String16>& args __unused)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
@@ -271,17 +309,17 @@
     }
 
     result.append("Global session refs:\n");
-    result.append(" session pid count\n");
+    result.append("  session   pid count\n");
     for (size_t i = 0; i < mAudioSessionRefs.size(); i++) {
         AudioSessionRef *r = mAudioSessionRefs[i];
-        snprintf(buffer, SIZE, " %7d %3d %3d\n", r->mSessionid, r->mPid, r->mCnt);
+        snprintf(buffer, SIZE, "  %7d %5d %5d\n", r->mSessionid, r->mPid, r->mCnt);
         result.append(buffer);
     }
     write(fd, result.string(), result.size());
 }
 
 
-void AudioFlinger::dumpInternals(int fd, const Vector<String16>& args)
+void AudioFlinger::dumpInternals(int fd, const Vector<String16>& args __unused)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
@@ -296,7 +334,7 @@
     write(fd, result.string(), result.size());
 }
 
-void AudioFlinger::dumpPermissionDenial(int fd, const Vector<String16>& args)
+void AudioFlinger::dumpPermissionDenial(int fd, const Vector<String16>& args __unused)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
@@ -403,16 +441,44 @@
 
 sp<NBLog::Writer> AudioFlinger::newWriter_l(size_t size, const char *name)
 {
+    // If there is no memory allocated for logs, return a dummy writer that does nothing
     if (mLogMemoryDealer == 0) {
         return new NBLog::Writer();
     }
-    sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size));
-    sp<NBLog::Writer> writer = new NBLog::Writer(size, shared);
     sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-    if (binder != 0) {
-        interface_cast<IMediaLogService>(binder)->registerWriter(shared, size, name);
+    // Similarly if we can't contact the media.log service, also return a dummy writer
+    if (binder == 0) {
+        return new NBLog::Writer();
     }
-    return writer;
+    sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
+    sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size));
+    // If allocation fails, consult the vector of previously unregistered writers
+    // and garbage-collect one or more them until an allocation succeeds
+    if (shared == 0) {
+        Mutex::Autolock _l(mUnregisteredWritersLock);
+        for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
+            {
+                // Pick the oldest stale writer to garbage-collect
+                sp<IMemory> iMemory(mUnregisteredWriters[0]->getIMemory());
+                mUnregisteredWriters.removeAt(0);
+                mediaLogService->unregisterWriter(iMemory);
+                // Now the media.log remote reference to IMemory is gone.  When our last local
+                // reference to IMemory also drops to zero at end of this block,
+                // the IMemory destructor will deallocate the region from mLogMemoryDealer.
+            }
+            // Re-attempt the allocation
+            shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size));
+            if (shared != 0) {
+                goto success;
+            }
+        }
+        // Even after garbage-collecting all old writers, there is still not enough memory,
+        // so return a dummy writer
+        return new NBLog::Writer();
+    }
+success:
+    mediaLogService->registerWriter(shared, size, name);
+    return new NBLog::Writer(size, shared);
 }
 
 void AudioFlinger::unregisterWriter(const sp<NBLog::Writer>& writer)
@@ -424,13 +490,10 @@
     if (iMemory == 0) {
         return;
     }
-    sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-    if (binder != 0) {
-        interface_cast<IMediaLogService>(binder)->unregisterWriter(iMemory);
-        // Now the media.log remote reference to IMemory is gone.
-        // When our last local reference to IMemory also drops to zero,
-        // the IMemory destructor will deallocate the region from mMemoryDealer.
-    }
+    // Rather than removing the writer immediately, append it to a queue of old writers to
+    // be garbage-collected later.  This allows us to continue to view old logs for a while.
+    Mutex::Autolock _l(mUnregisteredWritersLock);
+    mUnregisteredWriters.push(writer);
 }
 
 // IAudioFlinger interface
@@ -441,13 +504,12 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
-        size_t frameCount,
+        size_t *frameCount,
         IAudioFlinger::track_flags_t *flags,
         const sp<IMemory>& sharedBuffer,
         audio_io_handle_t output,
         pid_t tid,
         int *sessionId,
-        String8& name,
         int clientUid,
         status_t *status)
 {
@@ -465,10 +527,31 @@
         goto Exit;
     }
 
+    // further sample rate checks are performed by createTrack_l() depending on the thread type
+    if (sampleRate == 0) {
+        ALOGE("createTrack() invalid sample rate %u", sampleRate);
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
+    // further channel mask checks are performed by createTrack_l() depending on the thread type
+    if (!audio_is_output_channel(channelMask)) {
+        ALOGE("createTrack() invalid channel mask %#x", channelMask);
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
     // client is responsible for conversion of 8-bit PCM to 16-bit PCM,
     // and we don't yet support 8.24 or 32-bit PCM
-    if (audio_is_linear_pcm(format) && format != AUDIO_FORMAT_PCM_16_BIT) {
-        ALOGE("createTrack() invalid format %d", format);
+    if (!audio_is_valid_format(format) ||
+            (audio_is_linear_pcm(format) && format != AUDIO_FORMAT_PCM_16_BIT)) {
+        ALOGE("createTrack() invalid format %#x", format);
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
+    if (sharedBuffer != 0 && sharedBuffer->pointer() == NULL) {
+        ALOGE("createTrack() sharedBuffer is non-0 but has NULL pointer()");
         lStatus = BAD_VALUE;
         goto Exit;
     }
@@ -476,7 +559,6 @@
     {
         Mutex::Autolock _l(mLock);
         PlaybackThread *thread = checkPlaybackThread_l(output);
-        PlaybackThread *effectThread = NULL;
         if (thread == NULL) {
             ALOGE("no playback thread found for output handle %d", output);
             lStatus = BAD_VALUE;
@@ -484,24 +566,23 @@
         }
 
         pid_t pid = IPCThreadState::self()->getCallingPid();
-
         client = registerPid_l(pid);
 
-        ALOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId);
-        if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) {
+        PlaybackThread *effectThread = NULL;
+        if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
+            lSessionId = *sessionId;
             // check if an effect chain with the same session ID is present on another
             // output thread and move it here.
             for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
                 sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
                 if (mPlaybackThreads.keyAt(i) != output) {
-                    uint32_t sessions = t->hasAudioSession(*sessionId);
+                    uint32_t sessions = t->hasAudioSession(lSessionId);
                     if (sessions & PlaybackThread::EFFECT_SESSION) {
                         effectThread = t.get();
                         break;
                     }
                 }
             }
-            lSessionId = *sessionId;
         } else {
             // if no audio session id is provided, create one here
             lSessionId = nextUniqueId();
@@ -519,6 +600,7 @@
         // move effect chain to this output thread if an effect on same session was waiting
         // for a track to be created
         if (lStatus == NO_ERROR && effectThread != NULL) {
+            // no risk of deadlock because AudioFlinger::mLock is held
             Mutex::Autolock _dl(thread->mLock);
             Mutex::Autolock _sl(effectThread->mLock);
             moveEffectChain_l(lSessionId, effectThread, thread, true);
@@ -538,23 +620,22 @@
                 }
             }
         }
-    }
-    if (lStatus == NO_ERROR) {
-        // s for server's pid, n for normal mixer name, f for fast index
-        name = String8::format("s:%d;n:%d;f:%d", getpid_cached, track->name() - AudioMixer::TRACK0,
-                track->fastIndex());
-        trackHandle = new TrackHandle(track);
-    } else {
-        // remove local strong reference to Client before deleting the Track so that the Client
-        // destructor is called by the TrackBase destructor with mLock held
-        client.clear();
-        track.clear();
+
     }
 
-Exit:
-    if (status != NULL) {
-        *status = lStatus;
+    if (lStatus != NO_ERROR) {
+        // remove local strong reference to Client before deleting the Track so that the
+        // Client destructor is called by the TrackBase destructor with mLock held
+        client.clear();
+        track.clear();
+        goto Exit;
     }
+
+    // return handle to client
+    trackHandle = new TrackHandle(track);
+
+Exit:
+    *status = lStatus;
     return trackHandle;
 }
 
@@ -796,7 +877,7 @@
 
     AutoMutex lock(mLock);
     PlaybackThread *thread = NULL;
-    if (output) {
+    if (output != AUDIO_IO_HANDLE_NONE) {
         thread = checkPlaybackThread_l(output);
         if (thread == NULL) {
             return BAD_VALUE;
@@ -845,7 +926,7 @@
 
     AutoMutex lock(mLock);
     float volume;
-    if (output) {
+    if (output != AUDIO_IO_HANDLE_NONE) {
         PlaybackThread *thread = checkPlaybackThread_l(output);
         if (thread == NULL) {
             return 0.0f;
@@ -878,8 +959,8 @@
         return PERMISSION_DENIED;
     }
 
-    // ioHandle == 0 means the parameters are global to the audio hardware interface
-    if (ioHandle == 0) {
+    // AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interface
+    if (ioHandle == AUDIO_IO_HANDLE_NONE) {
         Mutex::Autolock _l(mLock);
         status_t final_result = NO_ERROR;
         {
@@ -961,7 +1042,7 @@
 
     Mutex::Autolock _l(mLock);
 
-    if (ioHandle == 0) {
+    if (ioHandle == AUDIO_IO_HANDLE_NONE) {
         String8 out_s8;
 
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
@@ -1212,7 +1293,7 @@
 {
 }
 
-void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who)
+void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who __unused)
 {
     sp<NotificationClient> keep(this);
     mAudioFlinger->removeNotificationClient(mPid);
@@ -1230,7 +1311,7 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
-        size_t frameCount,
+        size_t *frameCount,
         IAudioFlinger::track_flags_t *flags,
         pid_t tid,
         int *sessionId,
@@ -1240,8 +1321,6 @@
     sp<RecordHandle> recordHandle;
     sp<Client> client;
     status_t lStatus;
-    RecordThread *thread;
-    size_t inFrameCount;
     int lSessionId;
 
     // check calling permissions
@@ -1251,16 +1330,31 @@
         goto Exit;
     }
 
-    if (format != AUDIO_FORMAT_PCM_16_BIT) {
-        ALOGE("openRecord() invalid format %d", format);
+    // further sample rate checks are performed by createRecordTrack_l()
+    if (sampleRate == 0) {
+        ALOGE("openRecord() invalid sample rate %u", sampleRate);
         lStatus = BAD_VALUE;
         goto Exit;
     }
 
-    // add client to list
-    { // scope for mLock
+    // we don't yet support anything other than 16-bit PCM
+    if (!(audio_is_valid_format(format) &&
+            audio_is_linear_pcm(format) && format == AUDIO_FORMAT_PCM_16_BIT)) {
+        ALOGE("openRecord() invalid format %#x", format);
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
+    // further channel mask checks are performed by createRecordTrack_l()
+    if (!audio_is_input_channel(channelMask)) {
+        ALOGE("openRecord() invalid channel mask %#x", channelMask);
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
+    {
         Mutex::Autolock _l(mLock);
-        thread = checkRecordThread_l(input);
+        RecordThread *thread = checkRecordThread_l(input);
         if (thread == NULL) {
             ALOGE("openRecord() checkRecordThread_l failed");
             lStatus = BAD_VALUE;
@@ -1277,17 +1371,17 @@
         pid_t pid = IPCThreadState::self()->getCallingPid();
         client = registerPid_l(pid);
 
-        // If no audio session id is provided, create one here
-        if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) {
+        if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
             lSessionId = *sessionId;
         } else {
+            // if no audio session id is provided, create one here
             lSessionId = nextUniqueId();
             if (sessionId != NULL) {
                 *sessionId = lSessionId;
             }
         }
-        // create new record track.
-        // The record track uses one track in mHardwareMixerThread by convention.
+        ALOGV("openRecord() lSessionId: %d", lSessionId);
+
         // TODO: the uid should be passed in as a parameter to openRecord
         recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
                                                   frameCount, lSessionId,
@@ -1295,6 +1389,7 @@
                                                   flags, tid, &lStatus);
         LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
     }
+
     if (lStatus != NO_ERROR) {
         // remove local strong reference to Client before deleting the RecordTrack so that the
         // Client destructor is called by the TrackBase destructor with mLock held
@@ -1303,14 +1398,11 @@
         goto Exit;
     }
 
-    // return to handle to client
+    // return handle to client
     recordHandle = new RecordHandle(recordTrack);
-    lStatus = NO_ERROR;
 
 Exit:
-    if (status) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return recordHandle;
 }
 
@@ -1451,18 +1543,15 @@
                                            audio_output_flags_t flags,
                                            const audio_offload_info_t *offloadInfo)
 {
-    PlaybackThread *thread = NULL;
     struct audio_config config;
+    memset(&config, 0, sizeof(config));
     config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
     config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
     config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
-    if (offloadInfo) {
+    if (offloadInfo != NULL) {
         config.offload_info = *offloadInfo;
     }
 
-    audio_stream_out_t *outStream = NULL;
-    AudioHwDevice *outHwDev;
-
     ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
               module,
               (pDevices != NULL) ? *pDevices : 0,
@@ -1471,23 +1560,25 @@
               config.channel_mask,
               flags);
     ALOGV("openOutput(), offloadInfo %p version 0x%04x",
-          offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version );
+          offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version);
 
-    if (pDevices == NULL || *pDevices == 0) {
-        return 0;
+    if (pDevices == NULL || *pDevices == AUDIO_DEVICE_NONE) {
+        return AUDIO_IO_HANDLE_NONE;
     }
 
     Mutex::Autolock _l(mLock);
 
-    outHwDev = findSuitableHwDev_l(module, *pDevices);
-    if (outHwDev == NULL)
-        return 0;
+    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, *pDevices);
+    if (outHwDev == NULL) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
 
     audio_hw_device_t *hwDevHal = outHwDev->hwDevice();
     audio_io_handle_t id = nextUniqueId();
 
     mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
 
+    audio_stream_out_t *outStream = NULL;
     status_t status = hwDevHal->open_output_stream(hwDevHal,
                                           id,
                                           *pDevices,
@@ -1507,6 +1598,7 @@
     if (status == NO_ERROR && outStream != NULL) {
         AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags);
 
+        PlaybackThread *thread;
         if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
             thread = new OffloadThread(this, output, id, *pDevices);
             ALOGV("openOutput() created offload output: ID %d thread %p", id, thread);
@@ -1550,7 +1642,7 @@
         return id;
     }
 
-    return 0;
+    return AUDIO_IO_HANDLE_NONE;
 }
 
 audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1,
@@ -1563,7 +1655,7 @@
     if (thread1 == NULL || thread2 == NULL) {
         ALOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1,
                 output2);
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
     }
 
     audio_io_handle_t id = nextUniqueId();
@@ -1674,35 +1766,34 @@
                                           audio_format_t *pFormat,
                                           audio_channel_mask_t *pChannelMask)
 {
-    status_t status;
-    RecordThread *thread = NULL;
     struct audio_config config;
+    memset(&config, 0, sizeof(config));
     config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
     config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
     config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
 
     uint32_t reqSamplingRate = config.sample_rate;
     audio_format_t reqFormat = config.format;
-    audio_channel_mask_t reqChannels = config.channel_mask;
-    audio_stream_in_t *inStream = NULL;
-    AudioHwDevice *inHwDev;
+    audio_channel_mask_t reqChannelMask = config.channel_mask;
 
-    if (pDevices == NULL || *pDevices == 0) {
+    if (pDevices == NULL || *pDevices == AUDIO_DEVICE_NONE) {
         return 0;
     }
 
     Mutex::Autolock _l(mLock);
 
-    inHwDev = findSuitableHwDev_l(module, *pDevices);
-    if (inHwDev == NULL)
+    AudioHwDevice *inHwDev = findSuitableHwDev_l(module, *pDevices);
+    if (inHwDev == NULL) {
         return 0;
+    }
 
     audio_hw_device_t *inHwHal = inHwDev->hwDevice();
     audio_io_handle_t id = nextUniqueId();
 
-    status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
+    audio_stream_in_t *inStream = NULL;
+    status_t status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
                                         &inStream);
-    ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, "
+    ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %#x, Channels %x, "
             "status %d",
             inStream,
             config.sample_rate,
@@ -1716,10 +1807,12 @@
     if (status == BAD_VALUE &&
         reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT &&
         (config.sample_rate <= 2 * reqSamplingRate) &&
-        (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) {
+        (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannelMask) <= FCC_2)) {
+        // FIXME describe the change proposed by HAL (save old values so we can log them here)
         ALOGV("openInput() reopening with proposed sampling rate and channel mask");
         inStream = NULL;
         status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream);
+        // FIXME log this new status; HAL should not propose any further changes
     }
 
     if (status == NO_ERROR && inStream != NULL) {
@@ -1737,13 +1830,13 @@
                                         popcount(inStream->common.get_channels(&inStream->common)));
         if (!mTeeSinkInputEnabled) {
             kind = TEE_SINK_NO;
-        } else if (format == Format_Invalid) {
+        } else if (!Format_isValid(format)) {
             kind = TEE_SINK_NO;
         } else if (mRecordTeeSink == 0) {
             kind = TEE_SINK_NEW;
         } else if (mRecordTeeSink->getStrongCount() != 1) {
             kind = TEE_SINK_NO;
-        } else if (format == mRecordTeeSink->format()) {
+        } else if (Format_isEqual(format, mRecordTeeSink->format())) {
             kind = TEE_SINK_OLD;
         } else {
             kind = TEE_SINK_NEW;
@@ -1778,10 +1871,8 @@
         // Start record thread
         // RecordThread requires both input and output device indication to forward to audio
         // pre processing modules
-        thread = new RecordThread(this,
+        RecordThread *thread = new RecordThread(this,
                                   input,
-                                  reqSamplingRate,
-                                  reqChannels,
                                   id,
                                   primaryOutputDevice_l(),
                                   *pDevices
@@ -1798,7 +1889,7 @@
             *pFormat = config.format;
         }
         if (pChannelMask != NULL) {
-            *pChannelMask = reqChannels;
+            *pChannelMask = reqChannelMask;
         }
 
         // notify client processes of the new input creation
@@ -1843,10 +1934,10 @@
     return NO_ERROR;
 }
 
-status_t AudioFlinger::setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output)
+status_t AudioFlinger::invalidateStream(audio_stream_type_t stream)
 {
     Mutex::Autolock _l(mLock);
-    ALOGV("setStreamOutput() stream %d to output %d", stream, output);
+    ALOGV("invalidateStream() stream %d", stream);
 
     for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
         PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
@@ -1862,18 +1953,21 @@
     return nextUniqueId();
 }
 
-void AudioFlinger::acquireAudioSessionId(int audioSession)
+void AudioFlinger::acquireAudioSessionId(int audioSession, pid_t pid)
 {
     Mutex::Autolock _l(mLock);
     pid_t caller = IPCThreadState::self()->getCallingPid();
-    ALOGV("acquiring %d from %d", audioSession, caller);
+    ALOGV("acquiring %d from %d, for %d", audioSession, caller, pid);
+    if (pid != -1 && (caller == getpid_cached)) {
+        caller = pid;
+    }
 
     // Ignore requests received from processes not known as notification client. The request
     // is likely proxied by mediaserver (e.g CameraService) and releaseAudioSessionId() can be
     // called from a different pid leaving a stale session reference.  Also we don't know how
     // to clear this reference if the client process dies.
     if (mNotificationClients.indexOfKey(caller) < 0) {
-        ALOGV("acquireAudioSessionId() unknown client %d for session %d", caller, audioSession);
+        ALOGW("acquireAudioSessionId() unknown client %d for session %d", caller, audioSession);
         return;
     }
 
@@ -1890,11 +1984,14 @@
     ALOGV(" added new entry for %d", audioSession);
 }
 
-void AudioFlinger::releaseAudioSessionId(int audioSession)
+void AudioFlinger::releaseAudioSessionId(int audioSession, pid_t pid)
 {
     Mutex::Autolock _l(mLock);
     pid_t caller = IPCThreadState::self()->getCallingPid();
-    ALOGV("releasing %d from %d", audioSession, caller);
+    ALOGV("releasing %d from %d for %d", audioSession, caller, pid);
+    if (pid != -1 && (caller == getpid_cached)) {
+        caller = pid;
+    }
     size_t num = mAudioSessionRefs.size();
     for (size_t i = 0; i< num; i++) {
         AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
@@ -1956,7 +2053,7 @@
             }
         }
         if (!found) {
-            Mutex::Autolock _l (t->mLock);
+            Mutex::Autolock _l(t->mLock);
             // remove all effects from the chain
             while (ec->mEffects.size()) {
                 sp<EffectModule> effect = ec->mEffects[0];
@@ -1993,7 +2090,7 @@
 
 uint32_t AudioFlinger::nextUniqueId()
 {
-    return android_atomic_inc(&mNextUniqueId);
+    return (uint32_t) android_atomic_inc(&mNextUniqueId);
 }
 
 AudioFlinger::PlaybackThread *AudioFlinger::primaryPlaybackThread_l() const
@@ -2023,7 +2120,7 @@
                                     int triggerSession,
                                     int listenerSession,
                                     sync_event_callback_t callBack,
-                                    void *cookie)
+                                    wp<RefBase> cookie)
 {
     Mutex::Autolock _l(mLock);
 
@@ -2185,7 +2282,7 @@
 
         // return effect descriptor
         *pDesc = desc;
-        if (io == 0 && sessionId == AUDIO_SESSION_OUTPUT_MIX) {
+        if (io == AUDIO_IO_HANDLE_NONE && sessionId == AUDIO_SESSION_OUTPUT_MIX) {
             // if the output returned by getOutputForEffect() is removed before we lock the
             // mutex below, the call to checkPlaybackThread_l(io) below will detect it
             // and we will exit safely
@@ -2200,7 +2297,7 @@
         // If output is 0 here, sessionId is neither SESSION_OUTPUT_STAGE nor SESSION_OUTPUT_MIX
         // because of code checking output when entering the function.
         // Note: io is never 0 when creating an effect on an input
-        if (io == 0) {
+        if (io == AUDIO_IO_HANDLE_NONE) {
             if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
                 // output must be specified by AudioPolicyManager when using session
                 // AUDIO_SESSION_OUTPUT_STAGE
@@ -2225,7 +2322,7 @@
             // If no output thread contains the requested session ID, default to
             // first output. The effect chain will be moved to the correct output
             // thread when a track with the same session ID is created
-            if (io == 0 && mPlaybackThreads.size()) {
+            if (io == AUDIO_IO_HANDLE_NONE && mPlaybackThreads.size() > 0) {
                 io = mPlaybackThreads.keyAt(0);
             }
             ALOGV("createEffect() got io %d for effect %s", io, desc.name);
@@ -2251,9 +2348,7 @@
     }
 
 Exit:
-    if (status != NULL) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return handle;
 }
 
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 7320144..ec32edd 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -60,8 +60,8 @@
 
 namespace android {
 
-class audio_track_cblk_t;
-class effect_param_cblk_t;
+struct audio_track_cblk_t;
+struct effect_param_cblk_t;
 class AudioMixer;
 class AudioBuffer;
 class AudioResampler;
@@ -102,26 +102,25 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
-                                size_t frameCount,
+                                size_t *pFrameCount,
                                 IAudioFlinger::track_flags_t *flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
                                 pid_t tid,
                                 int *sessionId,
-                                String8& name,
                                 int clientUid,
-                                status_t *status);
+                                status_t *status /*non-NULL*/);
 
     virtual sp<IAudioRecord> openRecord(
                                 audio_io_handle_t input,
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
-                                size_t frameCount,
+                                size_t *pFrameCount,
                                 IAudioFlinger::track_flags_t *flags,
                                 pid_t tid,
                                 int *sessionId,
-                                status_t *status);
+                                status_t *status /*non-NULL*/);
 
     virtual     uint32_t    sampleRate(audio_io_handle_t output) const;
     virtual     int         channelCount(audio_io_handle_t output) const;
@@ -182,7 +181,7 @@
 
     virtual status_t closeInput(audio_io_handle_t input);
 
-    virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output);
+    virtual status_t invalidateStream(audio_stream_type_t stream);
 
     virtual status_t setVoiceVolume(float volume);
 
@@ -193,9 +192,9 @@
 
     virtual int newAudioSessionId();
 
-    virtual void acquireAudioSessionId(int audioSession);
+    virtual void acquireAudioSessionId(int audioSession, pid_t pid);
 
-    virtual void releaseAudioSessionId(int audioSession);
+    virtual void releaseAudioSessionId(int audioSession, pid_t pid);
 
     virtual status_t queryNumberEffects(uint32_t *numEffects) const;
 
@@ -210,7 +209,7 @@
                         int32_t priority,
                         audio_io_handle_t io,
                         int sessionId,
-                        status_t *status,
+                        status_t *status /*non-NULL*/,
                         int *id,
                         int *enabled);
 
@@ -235,8 +234,12 @@
     sp<NBLog::Writer>   newWriter_l(size_t size, const char *name);
     void                unregisterWriter(const sp<NBLog::Writer>& writer);
 private:
-    static const size_t kLogMemorySize = 10 * 1024;
+    static const size_t kLogMemorySize = 40 * 1024;
     sp<MemoryDealer>    mLogMemoryDealer;   // == 0 when NBLog is disabled
+    // When a log writer is unregistered, it is done lazily so that media.log can continue to see it
+    // for as long as possible.  The memory is only freed when it is needed for another log writer.
+    Vector< sp<NBLog::Writer> > mUnregisteredWriters;
+    Mutex               mUnregisteredWritersLock;
 public:
 
     class SyncEvent;
@@ -249,7 +252,7 @@
                   int triggerSession,
                   int listenerSession,
                   sync_event_callback_t callBack,
-                  void *cookie)
+                  wp<RefBase> cookie)
         : mType(type), mTriggerSession(triggerSession), mListenerSession(listenerSession),
           mCallback(callBack), mCookie(cookie)
         {}
@@ -262,14 +265,14 @@
         AudioSystem::sync_event_t type() const { return mType; }
         int triggerSession() const { return mTriggerSession; }
         int listenerSession() const { return mListenerSession; }
-        void *cookie() const { return mCookie; }
+        wp<RefBase> cookie() const { return mCookie; }
 
     private:
           const AudioSystem::sync_event_t mType;
           const int mTriggerSession;
           const int mListenerSession;
           sync_event_callback_t mCallback;
-          void * const mCookie;
+          const wp<RefBase> mCookie;
           mutable Mutex mLock;
     };
 
@@ -277,7 +280,7 @@
                                         int triggerSession,
                                         int listenerSession,
                                         sync_event_callback_t callBack,
-                                        void *cookie);
+                                        wp<RefBase> cookie);
 
 private:
     class AudioHwDevice;    // fwd declaration for findSuitableHwDev_l
@@ -451,7 +454,14 @@
                                 { return mStreamTypes[stream].volume; }
               void audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2);
 
-              // allocate an audio_io_handle_t, session ID, or effect ID
+              // Allocate an audio_io_handle_t, session ID, effect ID, or audio_module_handle_t.
+              // They all share the same ID space, but the namespaces are actually independent
+              // because there are separate KeyedVectors for each kind of ID.
+              // The return value is uint32_t, but is cast to signed for some IDs.
+              // FIXME This API does not handle rollover to zero (for unsigned IDs),
+              //       or from positive to negative (for signed IDs).
+              //       Thus it may fail by returning an ID of the wrong sign,
+              //       or by returning a non-unique ID.
               uint32_t nextUniqueId();
 
               status_t moveEffectChain_l(int sessionId,
@@ -499,7 +509,7 @@
     private:
         const char * const mModuleName;
         audio_hw_device_t * const mHwDevice;
-        Flags mFlags;
+        const Flags mFlags;
     };
 
     // AudioStreamOut and AudioStreamIn are immutable, so their fields are const.
@@ -509,7 +519,7 @@
     struct AudioStreamOut {
         AudioHwDevice* const audioHwDev;
         audio_stream_out_t* const stream;
-        audio_output_flags_t flags;
+        const audio_output_flags_t flags;
 
         audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
 
@@ -587,7 +597,11 @@
                 DefaultKeyedVector< audio_io_handle_t, sp<RecordThread> >    mRecordThreads;
 
                 DefaultKeyedVector< pid_t, sp<NotificationClient> >    mNotificationClients;
+
                 volatile int32_t                    mNextUniqueId;  // updated by android_atomic_inc
+                // nextUniqueId() returns uint32_t, but this is declared int32_t
+                // because the atomic operations require an int32_t
+
                 audio_mode_t                        mMode;
                 bool                                mBtNrecIsOff;
 
@@ -634,7 +648,7 @@
     // 0x200000 stereo 16-bit PCM frames = 47.5 seconds at 44.1 kHz, 8 megabytes
     static const size_t kTeeSinkInputFramesDefault = 0x200000;
     static const size_t kTeeSinkOutputFramesDefault = 0x200000;
-    static const size_t kTeeSinkTrackFramesDefault = 0x1000;
+    static const size_t kTeeSinkTrackFramesDefault = 0x200000;
 #endif
 
     // This method reads from a variable without mLock, but the variable is updated under mLock.  So
@@ -651,6 +665,8 @@
 
 #undef INCLUDING_FROM_AUDIOFLINGER_H
 
+const char *formatToString(audio_format_t format);
+
 // ----------------------------------------------------------------------------
 
 }; // namespace android
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index f92421e..2d67efb 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -58,7 +58,7 @@
 status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
         int64_t pts) {
     //ALOGV("DownmixerBufferProvider::getNextBuffer()");
-    if (this->mTrackBufferProvider != NULL) {
+    if (mTrackBufferProvider != NULL) {
         status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
         if (res == OK) {
             mDownmixConfig.inputCfg.buffer.frameCount = pBuffer->frameCount;
@@ -81,7 +81,7 @@
 
 void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
     //ALOGV("DownmixerBufferProvider::releaseBuffer()");
-    if (this->mTrackBufferProvider != NULL) {
+    if (mTrackBufferProvider != NULL) {
         mTrackBufferProvider->releaseBuffer(pBuffer);
     } else {
         ALOGE("DownmixerBufferProvider::releaseBuffer() error: NULL track buffer provider");
@@ -90,9 +90,9 @@
 
 
 // ----------------------------------------------------------------------------
-bool AudioMixer::isMultichannelCapable = false;
+bool AudioMixer::sIsMultichannelCapable = false;
 
-effect_descriptor_t AudioMixer::dwnmFxDesc;
+effect_descriptor_t AudioMixer::sDwnmFxDesc;
 
 // Ensure mConfiguredNames bitmask is initialized properly on all architectures.
 // The value of 1 << x is undefined in C when x >= 32.
@@ -113,8 +113,6 @@
     // AudioMixer is not yet capable of multi-channel output beyond stereo
     ALOG_ASSERT(2 == MAX_NUM_CHANNELS, "bad MAX_NUM_CHANNELS %d", MAX_NUM_CHANNELS);
 
-    LocalClock lc;
-
     pthread_once(&sOnceControl, &sInitRoutine);
 
     mState.enabledTracks= 0;
@@ -136,27 +134,6 @@
         t++;
     }
 
-    // find multichannel downmix effect if we have to play multichannel content
-    uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
-    if (ret != 0) {
-        ALOGE("AudioMixer() error %d querying number of effects", ret);
-        return;
-    }
-    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
-
-    for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &dwnmFxDesc) == 0) {
-            ALOGV("effect %d is called %s", i, dwnmFxDesc.name);
-            if (memcmp(&dwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
-                ALOGI("found effect \"%s\" from %s",
-                        dwnmFxDesc.name, dwnmFxDesc.implementor);
-                isMultichannelCapable = true;
-                break;
-            }
-        }
-    }
-    ALOGE_IF(!isMultichannelCapable, "unable to find downmix effect");
 }
 
 AudioMixer::~AudioMixer()
@@ -216,6 +193,7 @@
         t->mainBuffer = NULL;
         t->auxBuffer = NULL;
         t->downmixerBufferProvider = NULL;
+        t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
 
         status_t status = initTrackDownmix(&mState.tracks[n], n, channelMask);
         if (status == OK) {
@@ -229,7 +207,7 @@
 
 void AudioMixer::invalidateState(uint32_t mask)
 {
-    if (mask) {
+    if (mask != 0) {
         mState.needsChanged |= mask;
         mState.hook = process__validate;
     }
@@ -252,7 +230,7 @@
     return status;
 }
 
-void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName) {
+void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName __unused) {
     ALOGV("AudioMixer::unprepareTrackForDownmix(%d)", trackName);
 
     if (pTrack->downmixerBufferProvider != NULL) {
@@ -276,13 +254,13 @@
     DownmixerBufferProvider* pDbp = new DownmixerBufferProvider();
     int32_t status;
 
-    if (!isMultichannelCapable) {
+    if (!sIsMultichannelCapable) {
         ALOGE("prepareTrackForDownmix(%d) fails: mixer doesn't support multichannel content",
                 trackName);
         goto noDownmixForActiveTrack;
     }
 
-    if (EffectCreate(&dwnmFxDesc.uuid,
+    if (EffectCreate(&sDwnmFxDesc.uuid,
             pTrack->sessionId /*sessionId*/, -2 /*ioId not relevant here, using random value*/,
             &pDbp->mDownmixHandle/*pHandle*/) != 0) {
         ALOGE("prepareTrackForDownmix(%d) fails: error creating downmixer effect", trackName);
@@ -463,8 +441,15 @@
         //         for a specific track? or per mixer?
         /* case DOWNMIX_TYPE:
             break          */
+        case MIXER_FORMAT: {
+            audio_format_t format = static_cast<audio_format_t>(valueInt);
+            if (track.mMixerFormat != format) {
+                track.mMixerFormat = format;
+                ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
+            }
+            } break;
         default:
-            LOG_FATAL("bad param");
+            LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
         }
         break;
 
@@ -489,7 +474,7 @@
             invalidateState(1 << name);
             break;
         default:
-            LOG_FATAL("bad param");
+            LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
         }
         break;
 
@@ -537,12 +522,12 @@
             }
             break;
         default:
-            LOG_FATAL("bad param");
+            LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
         }
         break;
 
     default:
-        LOG_FATAL("bad target");
+        LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
     }
 }
 
@@ -560,14 +545,14 @@
                 // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
                 if (!((value == 44100 && devSampleRate == 48000) ||
                       (value == 48000 && devSampleRate == 44100))) {
-                    quality = AudioResampler::LOW_QUALITY;
+                    quality = AudioResampler::DYN_LOW_QUALITY;
                 } else {
                     quality = AudioResampler::DEFAULT_QUALITY;
                 }
                 resampler = AudioResampler::create(
                         format,
                         // the resampler sees the number of channels after the downmixer, if any
-                        downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount,
+                        (int) (downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount),
                         devSampleRate, quality);
                 resampler->setLocalTimeFreq(sLocalTimeFreq);
             }
@@ -668,27 +653,29 @@
         countActiveTracks++;
         track_t& t = state->tracks[i];
         uint32_t n = 0;
+        // FIXME can overflow (mask is only 3 bits)
         n |= NEEDS_CHANNEL_1 + t.channelCount - 1;
-        n |= NEEDS_FORMAT_16;
-        n |= t.doesResample() ? NEEDS_RESAMPLE_ENABLED : NEEDS_RESAMPLE_DISABLED;
+        if (t.doesResample()) {
+            n |= NEEDS_RESAMPLE;
+        }
         if (t.auxLevel != 0 && t.auxBuffer != NULL) {
-            n |= NEEDS_AUX_ENABLED;
+            n |= NEEDS_AUX;
         }
 
         if (t.volumeInc[0]|t.volumeInc[1]) {
             volumeRamp = true;
         } else if (!t.doesResample() && t.volumeRL == 0) {
-            n |= NEEDS_MUTE_ENABLED;
+            n |= NEEDS_MUTE;
         }
         t.needs = n;
 
-        if ((n & NEEDS_MUTE__MASK) == NEEDS_MUTE_ENABLED) {
+        if (n & NEEDS_MUTE) {
             t.hook = track__nop;
         } else {
-            if ((n & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) {
+            if (n & NEEDS_AUX) {
                 all16BitsStereoNoResample = false;
             }
-            if ((n & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
+            if (n & NEEDS_RESAMPLE) {
                 all16BitsStereoNoResample = false;
                 resampling = true;
                 t.hook = track__genericResample;
@@ -710,7 +697,7 @@
 
     // select the processing hooks
     state->hook = process__nop;
-    if (countActiveTracks) {
+    if (countActiveTracks > 0) {
         if (resampling) {
             if (!state->outputTemp) {
                 state->outputTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount];
@@ -746,16 +733,15 @@
 
     // Now that the volume ramp has been done, set optimal state and
     // track hooks for subsequent mixer process
-    if (countActiveTracks) {
+    if (countActiveTracks > 0) {
         bool allMuted = true;
         uint32_t en = state->enabledTracks;
         while (en) {
             const int i = 31 - __builtin_clz(en);
             en &= ~(1<<i);
             track_t& t = state->tracks[i];
-            if (!t.doesResample() && t.volumeRL == 0)
-            {
-                t.needs |= NEEDS_MUTE_ENABLED;
+            if (!t.doesResample() && t.volumeRL == 0) {
+                t.needs |= NEEDS_MUTE;
                 t.hook = track__nop;
             } else {
                 allMuted = false;
@@ -806,8 +792,8 @@
     }
 }
 
-void AudioMixer::track__nop(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp,
-        int32_t* aux)
+void AudioMixer::track__nop(track_t* t __unused, int32_t* out __unused,
+        size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
 {
 }
 
@@ -883,8 +869,8 @@
     }
 }
 
-void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
-        int32_t* aux)
+void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount,
+        int32_t* temp __unused, int32_t* aux)
 {
     const int16_t *in = static_cast<const int16_t *>(t->in);
 
@@ -974,8 +960,8 @@
     t->in = in;
 }
 
-void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
-        int32_t* aux)
+void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount,
+        int32_t* temp __unused, int32_t* aux)
 {
     const int16_t *in = static_cast<int16_t const *>(t->in);
 
@@ -1065,7 +1051,7 @@
 void AudioMixer::process__nop(state_t* state, int64_t pts)
 {
     uint32_t e0 = state->enabledTracks;
-    size_t bufSize = state->frameCount * sizeof(int16_t) * MAX_NUM_CHANNELS;
+    size_t sampleCount = state->frameCount * MAX_NUM_CHANNELS;
     while (e0) {
         // process by group of tracks with same output buffer to
         // avoid multiple memset() on same buffer
@@ -1084,7 +1070,8 @@
             }
             e0 &= ~(e1);
 
-            memset(t1.mainBuffer, 0, bufSize);
+            memset(t1.mainBuffer, 0, sampleCount
+                    * audio_bytes_per_sample(t1.mMixerFormat));
         }
 
         while (e1) {
@@ -1154,7 +1141,7 @@
                 track_t& t = state->tracks[i];
                 size_t outFrames = BLOCKSIZE;
                 int32_t *aux = NULL;
-                if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) {
+                if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
                     aux = t.auxBuffer + numFrames;
                 }
                 while (outFrames) {
@@ -1166,7 +1153,7 @@
                         break;
                     }
                     size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
-                    if (inFrames) {
+                    if (inFrames > 0) {
                         t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames,
                                 state->resampleTemp, aux);
                         t.frameCount -= inFrames;
@@ -1192,8 +1179,18 @@
                     }
                 }
             }
-            ditherAndClamp(out, outTemp, BLOCKSIZE);
-            out += BLOCKSIZE;
+            switch (t1.mMixerFormat) {
+            case AUDIO_FORMAT_PCM_FLOAT:
+                memcpy_to_float_from_q4_27(reinterpret_cast<float *>(out), outTemp, BLOCKSIZE * 2);
+                out += BLOCKSIZE * 2; // output is 2 floats/frame.
+                break;
+            case AUDIO_FORMAT_PCM_16_BIT:
+                ditherAndClamp(out, outTemp, BLOCKSIZE);
+                out += BLOCKSIZE; // output is 1 int32_t (2 int16_t samples)/frame
+                break;
+            default:
+                LOG_ALWAYS_FATAL("bad mixer format: %d", t1.mMixerFormat);
+            }
             numFrames += BLOCKSIZE;
         } while (numFrames < state->frameCount);
     }
@@ -1242,14 +1239,14 @@
             e1 &= ~(1<<i);
             track_t& t = state->tracks[i];
             int32_t *aux = NULL;
-            if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) {
+            if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
                 aux = t.auxBuffer;
             }
 
             // this is a little goofy, on the resampling case we don't
             // acquire/release the buffers because it's done by
             // the resampler.
-            if ((t.needs & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
+            if (t.needs & NEEDS_RESAMPLE) {
                 t.resampler->setPTS(pts);
                 t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);
             } else {
@@ -1275,7 +1272,16 @@
                 }
             }
         }
-        ditherAndClamp(out, outTemp, numFrames);
+        switch (t1.mMixerFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            memcpy_to_float_from_q4_27(reinterpret_cast<float*>(out), outTemp, numFrames*2);
+            break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            ditherAndClamp(out, outTemp, numFrames);
+            break;
+        default:
+            LOG_ALWAYS_FATAL("bad mixer format: %d", t1.mMixerFormat);
+        }
     }
 }
 
@@ -1316,27 +1322,46 @@
         }
         size_t outFrames = b.frameCount;
 
-        if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) {
-            // volume is boosted, so we might need to clamp even though
-            // we process only one track.
+        switch (t.mMixerFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT: {
+            float *fout = reinterpret_cast<float*>(out);
             do {
                 uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
                 in += 2;
-                int32_t l = mulRL(1, rl, vrl) >> 12;
-                int32_t r = mulRL(0, rl, vrl) >> 12;
-                // clamping...
-                l = clamp16(l);
-                r = clamp16(r);
-                *out++ = (r<<16) | (l & 0xFFFF);
+                int32_t l = mulRL(1, rl, vrl);
+                int32_t r = mulRL(0, rl, vrl);
+                *fout++ = float_from_q4_27(l);
+                *fout++ = float_from_q4_27(r);
+                // Note: In case of later int16_t sink output,
+                // conversion and clamping is done by memcpy_to_i16_from_float().
             } while (--outFrames);
-        } else {
-            do {
-                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                in += 2;
-                int32_t l = mulRL(1, rl, vrl) >> 12;
-                int32_t r = mulRL(0, rl, vrl) >> 12;
-                *out++ = (r<<16) | (l & 0xFFFF);
-            } while (--outFrames);
+            } break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) {
+                // volume is boosted, so we might need to clamp even though
+                // we process only one track.
+                do {
+                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                    in += 2;
+                    int32_t l = mulRL(1, rl, vrl) >> 12;
+                    int32_t r = mulRL(0, rl, vrl) >> 12;
+                    // clamping...
+                    l = clamp16(l);
+                    r = clamp16(r);
+                    *out++ = (r<<16) | (l & 0xFFFF);
+                } while (--outFrames);
+            } else {
+                do {
+                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                    in += 2;
+                    int32_t l = mulRL(1, rl, vrl) >> 12;
+                    int32_t r = mulRL(0, rl, vrl) >> 12;
+                    *out++ = (r<<16) | (l & 0xFFFF);
+                } while (--outFrames);
+            }
+            break;
+        default:
+            LOG_ALWAYS_FATAL("bad mixer format: %d", t.mMixerFormat);
         }
         numFrames -= b.frameCount;
         t.bufferProvider->releaseBuffer(&b);
@@ -1449,8 +1474,9 @@
 int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
                                        int outputFrameIndex)
 {
-    if (AudioBufferProvider::kInvalidPTS == basePTS)
+    if (AudioBufferProvider::kInvalidPTS == basePTS) {
         return AudioBufferProvider::kInvalidPTS;
+    }
 
     return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate);
 }
@@ -1462,6 +1488,28 @@
 {
     LocalClock lc;
     sLocalTimeFreq = lc.getLocalFreq();
+
+    // find multichannel downmix effect if we have to play multichannel content
+    uint32_t numEffects = 0;
+    int ret = EffectQueryNumberEffects(&numEffects);
+    if (ret != 0) {
+        ALOGE("AudioMixer() error %d querying number of effects", ret);
+        return;
+    }
+    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
+
+    for (uint32_t i = 0 ; i < numEffects ; i++) {
+        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
+            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+                ALOGI("found effect \"%s\" from %s",
+                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
+                sIsMultichannelCapable = true;
+                break;
+            }
+        }
+    }
+    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
 }
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 43aeb86..e5e120c 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -77,6 +77,7 @@
         MAIN_BUFFER     = 0x4002,
         AUX_BUFFER      = 0x4003,
         DOWNMIX_TYPE    = 0X4004,
+        MIXER_FORMAT    = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
         // for target RESAMPLE
         SAMPLE_RATE     = 0x4100, // Configure sample rate conversion on this track name;
                                   // parameter 'value' is the new sample rate in Hz.
@@ -120,27 +121,19 @@
 private:
 
     enum {
+        // FIXME this representation permits up to 8 channels
         NEEDS_CHANNEL_COUNT__MASK   = 0x00000007,
-        NEEDS_FORMAT__MASK          = 0x000000F0,
-        NEEDS_MUTE__MASK            = 0x00000100,
-        NEEDS_RESAMPLE__MASK        = 0x00001000,
-        NEEDS_AUX__MASK             = 0x00010000,
     };
 
     enum {
-        NEEDS_CHANNEL_1             = 0x00000000,
-        NEEDS_CHANNEL_2             = 0x00000001,
+        NEEDS_CHANNEL_1             = 0x00000000,   // mono
+        NEEDS_CHANNEL_2             = 0x00000001,   // stereo
 
-        NEEDS_FORMAT_16             = 0x00000010,
+        // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
 
-        NEEDS_MUTE_DISABLED         = 0x00000000,
-        NEEDS_MUTE_ENABLED          = 0x00000100,
-
-        NEEDS_RESAMPLE_DISABLED     = 0x00000000,
-        NEEDS_RESAMPLE_ENABLED      = 0x00001000,
-
-        NEEDS_AUX_DISABLED     = 0x00000000,
-        NEEDS_AUX_ENABLED      = 0x00010000,
+        NEEDS_MUTE                  = 0x00000100,
+        NEEDS_RESAMPLE              = 0x00001000,
+        NEEDS_AUX                   = 0x00010000,
     };
 
     struct state_t;
@@ -201,7 +194,9 @@
 
         int32_t     sessionId;
 
-        int32_t     padding[2];
+        audio_format_t mMixerFormat; // at this time: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+
+        int32_t     padding[1];
 
         // 16-byte boundary
 
@@ -224,7 +219,7 @@
         NBLog::Writer*  mLog;
         int32_t         reserved[1];
         // FIXME allocate dynamically to save some memory when maxNumTracks < MAX_NUM_TRACKS
-        track_t         tracks[MAX_NUM_TRACKS]; __attribute__((aligned(32)));
+        track_t         tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
     };
 
     // AudioBufferProvider that wraps a track AudioBufferProvider by a call to a downmix effect
@@ -256,9 +251,9 @@
     state_t         mState __attribute__((aligned(32)));
 
     // effect descriptor for the downmixer used by the mixer
-    static effect_descriptor_t dwnmFxDesc;
+    static effect_descriptor_t sDwnmFxDesc;
     // indicates whether a downmix effect has been found and is usable by this mixer
-    static bool                isMultichannelCapable;
+    static bool                sIsMultichannelCapable;
 
     // Call after changing either the enabled status of a track, or parameters of an enabled track.
     // OK to call more often than that, but unnecessary.
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index e5cceb1..562c4ea 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -25,6 +25,7 @@
 #include "AudioResampler.h"
 #include "AudioResamplerSinc.h"
 #include "AudioResamplerCubic.h"
+#include "AudioResamplerDyn.h"
 
 #ifdef __arm__
 #include <machine/cpu-features.h>
@@ -77,6 +78,9 @@
     int mX0R;
 };
 
+/*static*/
+const double AudioResampler::kPhaseMultiplier = 1L << AudioResampler::kNumPhaseBits;
+
 bool AudioResampler::qualityIsSupported(src_quality quality)
 {
     switch (quality) {
@@ -85,6 +89,9 @@
     case MED_QUALITY:
     case HIGH_QUALITY:
     case VERY_HIGH_QUALITY:
+    case DYN_LOW_QUALITY:
+    case DYN_MED_QUALITY:
+    case DYN_HIGH_QUALITY:
         return true;
     default:
         return false;
@@ -105,7 +112,7 @@
         if (*endptr == '\0') {
             defaultQuality = (src_quality) l;
             ALOGD("forcing AudioResampler quality to %d", defaultQuality);
-            if (defaultQuality < DEFAULT_QUALITY || defaultQuality > VERY_HIGH_QUALITY) {
+            if (defaultQuality < DEFAULT_QUALITY || defaultQuality > DYN_HIGH_QUALITY) {
                 defaultQuality = DEFAULT_QUALITY;
             }
         }
@@ -125,6 +132,12 @@
         return 20;
     case VERY_HIGH_QUALITY:
         return 34;
+    case DYN_LOW_QUALITY:
+        return 4;
+    case DYN_MED_QUALITY:
+        return 6;
+    case DYN_HIGH_QUALITY:
+        return 12;
     }
 }
 
@@ -148,6 +161,16 @@
         atFinalQuality = true;
     }
 
+    /* if the caller requests DEFAULT_QUALITY and af.resampler.property
+     * has not been set, the target resampler quality is set to DYN_MED_QUALITY,
+     * and allowed to "throttle" down to DYN_LOW_QUALITY if necessary
+     * due to estimated CPU load of having too many active resamplers
+     * (the code below the if).
+     */
+    if (quality == DEFAULT_QUALITY) {
+        quality = DYN_MED_QUALITY;
+    }
+
     // naive implementation of CPU load throttling doesn't account for whether resampler is active
     pthread_mutex_lock(&mutex);
     for (;;) {
@@ -162,7 +185,6 @@
         // not enough CPU available for proposed quality level, so try next lowest level
         switch (quality) {
         default:
-        case DEFAULT_QUALITY:
         case LOW_QUALITY:
             atFinalQuality = true;
             break;
@@ -175,6 +197,15 @@
         case VERY_HIGH_QUALITY:
             quality = HIGH_QUALITY;
             break;
+        case DYN_LOW_QUALITY:
+            atFinalQuality = true;
+            break;
+        case DYN_MED_QUALITY:
+            quality = DYN_LOW_QUALITY;
+            break;
+        case DYN_HIGH_QUALITY:
+            quality = DYN_MED_QUALITY;
+            break;
         }
     }
     pthread_mutex_unlock(&mutex);
@@ -183,7 +214,6 @@
 
     switch (quality) {
     default:
-    case DEFAULT_QUALITY:
     case LOW_QUALITY:
         ALOGV("Create linear Resampler");
         resampler = new AudioResamplerOrder1(bitDepth, inChannelCount, sampleRate);
@@ -200,6 +230,21 @@
         ALOGV("Create VERY_HIGH_QUALITY sinc Resampler = %d", quality);
         resampler = new AudioResamplerSinc(bitDepth, inChannelCount, sampleRate, quality);
         break;
+    case DYN_LOW_QUALITY:
+    case DYN_MED_QUALITY:
+    case DYN_HIGH_QUALITY:
+        ALOGV("Create dynamic Resampler = %d", quality);
+        if (bitDepth == 32) { /* bitDepth == 32 signals float precision */
+            resampler = new AudioResamplerDyn<float, float, float>(bitDepth, inChannelCount,
+                    sampleRate, quality);
+        } else if (quality == DYN_HIGH_QUALITY) {
+            resampler = new AudioResamplerDyn<int32_t, int16_t, int32_t>(bitDepth, inChannelCount,
+                    sampleRate, quality);
+        } else {
+            resampler = new AudioResamplerDyn<int16_t, int16_t, int32_t>(bitDepth, inChannelCount,
+                    sampleRate, quality);
+        }
+        break;
     }
 
     // initialize resampler
@@ -305,7 +350,7 @@
     uint32_t phaseIncrement = mPhaseIncrement;
     size_t outputIndex = 0;
     size_t outputSampleCount = outFrameCount * 2;
-    size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate;
+    size_t inFrameCount = getInFrameCountRequired(outFrameCount);
 
     // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d",
     //      outFrameCount, inputIndex, phaseFraction, phaseIncrement);
@@ -339,8 +384,9 @@
             out[outputIndex++] += vl * Interp(mX0L, in[0], phaseFraction);
             out[outputIndex++] += vr * Interp(mX0R, in[1], phaseFraction);
             Advance(&inputIndex, &phaseFraction, phaseIncrement);
-            if (outputIndex == outputSampleCount)
+            if (outputIndex == outputSampleCount) {
                 break;
+            }
         }
 
         // process input samples
@@ -402,7 +448,7 @@
     uint32_t phaseIncrement = mPhaseIncrement;
     size_t outputIndex = 0;
     size_t outputSampleCount = outFrameCount * 2;
-    size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate;
+    size_t inFrameCount = getInFrameCountRequired(outFrameCount);
 
     // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d",
     //      outFrameCount, inputIndex, phaseFraction, phaseIncrement);
@@ -434,8 +480,9 @@
             out[outputIndex++] += vl * sample;
             out[outputIndex++] += vr * sample;
             Advance(&inputIndex, &phaseFraction, phaseIncrement);
-            if (outputIndex == outputSampleCount)
+            if (outputIndex == outputSampleCount) {
                 break;
+            }
         }
 
         // process input samples
@@ -514,6 +561,16 @@
             size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
             uint32_t &phaseFraction, uint32_t phaseIncrement)
 {
+    (void)maxOutPt; // remove unused parameter warnings
+    (void)maxInIdx;
+    (void)outputIndex;
+    (void)out;
+    (void)inputIndex;
+    (void)vl;
+    (void)vr;
+    (void)phaseFraction;
+    (void)phaseIncrement;
+    (void)in;
 #define MO_PARAM5   "36"        // offset of parameter 5 (outputIndex)
 
     asm(
@@ -625,6 +682,16 @@
             size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
             uint32_t &phaseFraction, uint32_t phaseIncrement)
 {
+    (void)maxOutPt; // remove unused parameter warnings
+    (void)maxInIdx;
+    (void)outputIndex;
+    (void)out;
+    (void)inputIndex;
+    (void)vl;
+    (void)vr;
+    (void)phaseFraction;
+    (void)phaseIncrement;
+    (void)in;
 #define ST_PARAM5    "40"     // offset of parameter 5 (outputIndex)
     asm(
         "stmfd  sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}\n"
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index 33e64ce..b84567e 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -41,6 +41,9 @@
         MED_QUALITY=2,
         HIGH_QUALITY=3,
         VERY_HIGH_QUALITY=4,
+        DYN_LOW_QUALITY=5,
+        DYN_MED_QUALITY=6,
+        DYN_HIGH_QUALITY=7,
     };
 
     static AudioResampler* create(int bitDepth, int inChannelCount,
@@ -60,7 +63,7 @@
     // A mono provider delivers a sequence of samples.
     // A stereo provider delivers a sequence of interleaved pairs of samples.
     // Multi-channel providers are not supported.
-    // In either case, 'out' holds interleaved pairs of fixed-point signed Q19.12.
+    // In either case, 'out' holds interleaved pairs of fixed-point Q4.27.
     // That is, for a mono provider, there is an implicit up-channeling.
     // Since this method accumulates, the caller is responsible for clearing 'out' initially.
     // FIXME assumes provider is always successful; it should return the actual frame count.
@@ -81,7 +84,7 @@
     static const uint32_t kPhaseMask = (1LU<<kNumPhaseBits)-1;
 
     // multiplier to calculate fixed point phase increment
-    static const double kPhaseMultiplier = 1L << kNumPhaseBits;
+    static const double kPhaseMultiplier;
 
     AudioResampler(int bitDepth, int inChannelCount, int32_t sampleRate, src_quality quality);
 
@@ -107,6 +110,38 @@
     uint64_t mLocalTimeFreq;
     int64_t mPTS;
 
+    // returns the inFrameCount required to generate outFrameCount frames.
+    //
+    // Placed here to be a consistent for all resamplers.
+    //
+    // Right now, we use the upper bound without regards to the current state of the
+    // input buffer using integer arithmetic, as follows:
+    //
+    // (static_cast<uint64_t>(outFrameCount)*mInSampleRate + (mSampleRate - 1))/mSampleRate;
+    //
+    // The double precision equivalent (float may not be precise enough):
+    // ceil(static_cast<double>(outFrameCount) * mInSampleRate / mSampleRate);
+    //
+    // this relies on the fact that the mPhaseIncrement is rounded down from
+    // #phases * mInSampleRate/mSampleRate and the fact that Sum(Floor(x)) <= Floor(Sum(x)).
+    // http://www.proofwiki.org/wiki/Sum_of_Floors_Not_Greater_Than_Floor_of_Sums
+    //
+    // (so long as double precision is computed accurately enough to be considered
+    // greater than or equal to the Floor(x) value in int32_t arithmetic; thus this
+    // will not necessarily hold for floats).
+    //
+    // TODO:
+    // Greater accuracy and a tight bound is obtained by:
+    // 1) subtract and adjust for the current state of the AudioBufferProvider buffer.
+    // 2) using the exact integer formula where (ignoring 64b casting)
+    //  inFrameCount = (mPhaseIncrement * (outFrameCount - 1) + mPhaseFraction) / phaseWrapLimit;
+    //  phaseWrapLimit is the wraparound (1 << kNumPhaseBits), if not specified explicitly.
+    //
+    inline size_t getInFrameCountRequired(size_t outFrameCount) {
+        return (static_cast<uint64_t>(outFrameCount)*mInSampleRate
+                + (mSampleRate - 1))/mSampleRate;
+    }
+
 private:
     const src_quality mQuality;
 
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index 18e59e9..8f14ff9 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -60,14 +60,15 @@
     uint32_t phaseIncrement = mPhaseIncrement;
     size_t outputIndex = 0;
     size_t outputSampleCount = outFrameCount * 2;
-    size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate;
+    size_t inFrameCount = getInFrameCountRequired(outFrameCount);
 
     // fetch first buffer
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
         provider->getNextBuffer(&mBuffer, mPTS);
-        if (mBuffer.raw == NULL)
+        if (mBuffer.raw == NULL) {
             return;
+        }
         // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
     }
     int16_t *in = mBuffer.i16;
@@ -97,8 +98,9 @@
                 mBuffer.frameCount = inFrameCount;
                 provider->getNextBuffer(&mBuffer,
                                         calculateOutputPTS(outputIndex / 2));
-                if (mBuffer.raw == NULL)
+                if (mBuffer.raw == NULL) {
                     goto save_state;  // ugly, but efficient
+                }
                 in = mBuffer.i16;
                 // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
             }
@@ -126,14 +128,15 @@
     uint32_t phaseIncrement = mPhaseIncrement;
     size_t outputIndex = 0;
     size_t outputSampleCount = outFrameCount * 2;
-    size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate;
+    size_t inFrameCount = getInFrameCountRequired(outFrameCount);
 
     // fetch first buffer
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
         provider->getNextBuffer(&mBuffer, mPTS);
-        if (mBuffer.raw == NULL)
+        if (mBuffer.raw == NULL) {
             return;
+        }
         // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
     }
     int16_t *in = mBuffer.i16;
@@ -163,8 +166,9 @@
                 mBuffer.frameCount = inFrameCount;
                 provider->getNextBuffer(&mBuffer,
                                         calculateOutputPTS(outputIndex / 2));
-                if (mBuffer.raw == NULL)
+                if (mBuffer.raw == NULL) {
                     goto save_state;  // ugly, but efficient
+                }
                 // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
                 in = mBuffer.i16;
             }
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
new file mode 100644
index 0000000..3abe8fd
--- /dev/null
+++ b/services/audioflinger/AudioResamplerDyn.cpp
@@ -0,0 +1,556 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioResamplerDyn"
+//#define LOG_NDEBUG 0
+
+#include <malloc.h>
+#include <string.h>
+#include <stdlib.h>
+#include <dlfcn.h>
+#include <math.h>
+
+#include <cutils/compiler.h>
+#include <cutils/properties.h>
+#include <utils/Debug.h>
+#include <utils/Log.h>
+
+#include "AudioResamplerFirOps.h" // USE_NEON and USE_INLINE_ASSEMBLY defined here
+#include "AudioResamplerFirProcess.h"
+#include "AudioResamplerFirProcessNeon.h"
+#include "AudioResamplerFirGen.h" // requires math.h
+#include "AudioResamplerDyn.h"
+
+//#define DEBUG_RESAMPLER
+
+namespace android {
+
+// generate a unique resample type compile-time constant (constexpr)
+#define RESAMPLETYPE(CHANNELS, LOCKED, STRIDE) \
+    ((((CHANNELS)-1)&1) | !!(LOCKED)<<1 \
+    | ((STRIDE)==8 ? 1 : (STRIDE)==16 ? 2 : 0)<<2)
+
+/*
+ * InBuffer is a type agnostic input buffer.
+ *
+ * Layout of the state buffer for halfNumCoefs=8.
+ *
+ * [rrrrrrppppppppnnnnnnnnrrrrrrrrrrrrrrrrrrr.... rrrrrrr]
+ *  S            I                                R
+ *
+ * S = mState
+ * I = mImpulse
+ * R = mRingFull
+ * p = past samples, convoluted with the (p)ositive side of sinc()
+ * n = future samples, convoluted with the (n)egative side of sinc()
+ * r = extra space for implementing the ring buffer
+ */
+
+template<typename TC, typename TI, typename TO>
+AudioResamplerDyn<TC, TI, TO>::InBuffer::InBuffer()
+    : mState(NULL), mImpulse(NULL), mRingFull(NULL), mStateCount(0)
+{
+}
+
+template<typename TC, typename TI, typename TO>
+AudioResamplerDyn<TC, TI, TO>::InBuffer::~InBuffer()
+{
+    init();
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::init()
+{
+    free(mState);
+    mState = NULL;
+    mImpulse = NULL;
+    mRingFull = NULL;
+    mStateCount = 0;
+}
+
+// resizes the state buffer to accommodate the appropriate filter length
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::resize(int CHANNELS, int halfNumCoefs)
+{
+    // calculate desired state size
+    int stateCount = halfNumCoefs * CHANNELS * 2 * kStateSizeMultipleOfFilterLength;
+
+    // check if buffer needs resizing
+    if (mState
+            && stateCount == mStateCount
+            && mRingFull-mState == mStateCount-halfNumCoefs*CHANNELS) {
+        return;
+    }
+
+    // create new buffer
+    TI* state;
+    (void)posix_memalign(reinterpret_cast<void**>(&state), 32, stateCount*sizeof(*state));
+    memset(state, 0, stateCount*sizeof(*state));
+
+    // attempt to preserve state
+    if (mState) {
+        TI* srcLo = mImpulse - halfNumCoefs*CHANNELS;
+        TI* srcHi = mImpulse + halfNumCoefs*CHANNELS;
+        TI* dst = state;
+
+        if (srcLo < mState) {
+            dst += mState-srcLo;
+            srcLo = mState;
+        }
+        if (srcHi > mState + mStateCount) {
+            srcHi = mState + mStateCount;
+        }
+        memcpy(dst, srcLo, (srcHi - srcLo) * sizeof(*srcLo));
+        free(mState);
+    }
+
+    // set class member vars
+    mState = state;
+    mStateCount = stateCount;
+    mImpulse = state + halfNumCoefs*CHANNELS; // actually one sample greater than needed
+    mRingFull = state + mStateCount - halfNumCoefs*CHANNELS;
+}
+
+// copy in the input data into the head (impulse+halfNumCoefs) of the buffer.
+template<typename TC, typename TI, typename TO>
+template<int CHANNELS>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::readAgain(TI*& impulse, const int halfNumCoefs,
+        const TI* const in, const size_t inputIndex)
+{
+    TI* head = impulse + halfNumCoefs*CHANNELS;
+    for (size_t i=0 ; i<CHANNELS ; i++) {
+        head[i] = in[inputIndex*CHANNELS + i];
+    }
+}
+
+// advance the impulse pointer, and load in data into the head (impulse+halfNumCoefs)
+template<typename TC, typename TI, typename TO>
+template<int CHANNELS>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::readAdvance(TI*& impulse, const int halfNumCoefs,
+        const TI* const in, const size_t inputIndex)
+{
+    impulse += CHANNELS;
+
+    if (CC_UNLIKELY(impulse >= mRingFull)) {
+        const size_t shiftDown = mRingFull - mState - halfNumCoefs*CHANNELS;
+        memcpy(mState, mState+shiftDown, halfNumCoefs*CHANNELS*2*sizeof(TI));
+        impulse -= shiftDown;
+    }
+    readAgain<CHANNELS>(impulse, halfNumCoefs, in, inputIndex);
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::Constants::set(
+        int L, int halfNumCoefs, int inSampleRate, int outSampleRate)
+{
+    int bits = 0;
+    int lscale = inSampleRate/outSampleRate < 2 ? L - 1 :
+            static_cast<int>(static_cast<uint64_t>(L)*inSampleRate/outSampleRate);
+    for (int i=lscale; i; ++bits, i>>=1)
+        ;
+    mL = L;
+    mShift = kNumPhaseBits - bits;
+    mHalfNumCoefs = halfNumCoefs;
+}
+
+template<typename TC, typename TI, typename TO>
+AudioResamplerDyn<TC, TI, TO>::AudioResamplerDyn(int bitDepth,
+        int inChannelCount, int32_t sampleRate, src_quality quality)
+    : AudioResampler(bitDepth, inChannelCount, sampleRate, quality),
+      mResampleFunc(0), mFilterSampleRate(0), mFilterQuality(DEFAULT_QUALITY),
+    mCoefBuffer(NULL)
+{
+    mVolumeSimd[0] = mVolumeSimd[1] = 0;
+    // The AudioResampler base class assumes we are always ready for 1:1 resampling.
+    // We reset mInSampleRate to 0, so setSampleRate() will calculate filters for
+    // setSampleRate() for 1:1. (May be removed if precalculated filters are used.)
+    mInSampleRate = 0;
+    mConstants.set(128, 8, mSampleRate, mSampleRate); // TODO: set better
+}
+
+template<typename TC, typename TI, typename TO>
+AudioResamplerDyn<TC, TI, TO>::~AudioResamplerDyn()
+{
+    free(mCoefBuffer);
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::init()
+{
+    mFilterSampleRate = 0; // always trigger new filter generation
+    mInBuffer.init();
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::setVolume(int16_t left, int16_t right)
+{
+    AudioResampler::setVolume(left, right);
+    // volume is applied on the output type.
+    if (is_same<TO, float>::value || is_same<TO, double>::value) {
+        const TO scale = 1. / (1UL << 12);
+        mVolumeSimd[0] = static_cast<TO>(left) * scale;
+        mVolumeSimd[1] = static_cast<TO>(right) * scale;
+    } else {
+        mVolumeSimd[0] = static_cast<int32_t>(left) << 16;
+        mVolumeSimd[1] = static_cast<int32_t>(right) << 16;
+    }
+}
+
+template<typename T> T max(T a, T b) {return a > b ? a : b;}
+
+template<typename T> T absdiff(T a, T b) {return a > b ? a - b : b - a;}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::createKaiserFir(Constants &c,
+        double stopBandAtten, int inSampleRate, int outSampleRate, double tbwCheat)
+{
+    TC* buf;
+    static const double atten = 0.9998;   // to avoid ripple overflow
+    double fcr;
+    double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten);
+
+    (void)posix_memalign(reinterpret_cast<void**>(&buf), 32, (c.mL+1)*c.mHalfNumCoefs*sizeof(TC));
+    if (inSampleRate < outSampleRate) { // upsample
+        fcr = max(0.5*tbwCheat - tbw/2, tbw/2);
+    } else { // downsample
+        fcr = max(0.5*tbwCheat*outSampleRate/inSampleRate - tbw/2, tbw/2);
+    }
+    // create and set filter
+    firKaiserGen(buf, c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten);
+    c.mFirCoefs = buf;
+    if (mCoefBuffer) {
+        free(mCoefBuffer);
+    }
+    mCoefBuffer = buf;
+#ifdef DEBUG_RESAMPLER
+    // print basic filter stats
+    printf("L:%d  hnc:%d  stopBandAtten:%lf  fcr:%lf  atten:%lf  tbw:%lf\n",
+            c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten, tbw);
+    // test the filter and report results
+    double fp = (fcr - tbw/2)/c.mL;
+    double fs = (fcr + tbw/2)/c.mL;
+    double passMin, passMax, passRipple;
+    double stopMax, stopRipple;
+    testFir(buf, c.mL, c.mHalfNumCoefs, fp, fs, /*passSteps*/ 1000, /*stopSteps*/ 100000,
+            passMin, passMax, passRipple, stopMax, stopRipple);
+    printf("passband(%lf, %lf): %.8lf %.8lf %.8lf\n", 0., fp, passMin, passMax, passRipple);
+    printf("stopband(%lf, %lf): %.8lf %.3lf\n", fs, 0.5, stopMax, stopRipple);
+#endif
+}
+
+// recursive gcd. Using objdump, it appears the tail recursion is converted to a while loop.
+static int gcd(int n, int m)
+{
+    if (m == 0) {
+        return n;
+    }
+    return gcd(m, n % m);
+}
+
+static bool isClose(int32_t newSampleRate, int32_t prevSampleRate,
+        int32_t filterSampleRate, int32_t outSampleRate)
+{
+
+    // different upsampling ratios do not need a filter change.
+    if (filterSampleRate != 0
+            && filterSampleRate < outSampleRate
+            && newSampleRate < outSampleRate)
+        return true;
+
+    // check design criteria again if downsampling is detected.
+    int pdiff = absdiff(newSampleRate, prevSampleRate);
+    int adiff = absdiff(newSampleRate, filterSampleRate);
+
+    // allow up to 6% relative change increments.
+    // allow up to 12% absolute change increments (from filter design)
+    return pdiff < prevSampleRate>>4 && adiff < filterSampleRate>>3;
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::setSampleRate(int32_t inSampleRate)
+{
+    if (mInSampleRate == inSampleRate) {
+        return;
+    }
+    int32_t oldSampleRate = mInSampleRate;
+    int32_t oldHalfNumCoefs = mConstants.mHalfNumCoefs;
+    uint32_t oldPhaseWrapLimit = mConstants.mL << mConstants.mShift;
+    bool useS32 = false;
+
+    mInSampleRate = inSampleRate;
+
+    // TODO: Add precalculated Equiripple filters
+
+    if (mFilterQuality != getQuality() ||
+            !isClose(inSampleRate, oldSampleRate, mFilterSampleRate, mSampleRate)) {
+        mFilterSampleRate = inSampleRate;
+        mFilterQuality = getQuality();
+
+        // Begin Kaiser Filter computation
+        //
+        // The quantization floor for S16 is about 96db - 10*log_10(#length) + 3dB.
+        // Keep the stop band attenuation no greater than 84-85dB for 32 length S16 filters
+        //
+        // For s32 we keep the stop band attenuation at the same as 16b resolution, about
+        // 96-98dB
+        //
+
+        double stopBandAtten;
+        double tbwCheat = 1.; // how much we "cheat" into aliasing
+        int halfLength;
+        if (mFilterQuality == DYN_HIGH_QUALITY) {
+            // 32b coefficients, 64 length
+            useS32 = true;
+            stopBandAtten = 98.;
+            if (inSampleRate >= mSampleRate * 4) {
+                halfLength = 48;
+            } else if (inSampleRate >= mSampleRate * 2) {
+                halfLength = 40;
+            } else {
+                halfLength = 32;
+            }
+        } else if (mFilterQuality == DYN_LOW_QUALITY) {
+            // 16b coefficients, 16-32 length
+            useS32 = false;
+            stopBandAtten = 80.;
+            if (inSampleRate >= mSampleRate * 4) {
+                halfLength = 24;
+            } else if (inSampleRate >= mSampleRate * 2) {
+                halfLength = 16;
+            } else {
+                halfLength = 8;
+            }
+            if (inSampleRate <= mSampleRate) {
+                tbwCheat = 1.05;
+            } else {
+                tbwCheat = 1.03;
+            }
+        } else { // DYN_MED_QUALITY
+            // 16b coefficients, 32-64 length
+            // note: > 64 length filters with 16b coefs can have quantization noise problems
+            useS32 = false;
+            stopBandAtten = 84.;
+            if (inSampleRate >= mSampleRate * 4) {
+                halfLength = 32;
+            } else if (inSampleRate >= mSampleRate * 2) {
+                halfLength = 24;
+            } else {
+                halfLength = 16;
+            }
+            if (inSampleRate <= mSampleRate) {
+                tbwCheat = 1.03;
+            } else {
+                tbwCheat = 1.01;
+            }
+        }
+
+        // determine the number of polyphases in the filterbank.
+        // for 16b, it is desirable to have 2^(16/2) = 256 phases.
+        // https://ccrma.stanford.edu/~jos/resample/Relation_Interpolation_Error_Quantization.html
+        //
+        // We are a bit more lax on this.
+
+        int phases = mSampleRate / gcd(mSampleRate, inSampleRate);
+
+        // TODO: Once dynamic sample rate change is an option, the code below
+        // should be modified to execute only when dynamic sample rate change is enabled.
+        //
+        // as above, #phases less than 63 is too few phases for accurate linear interpolation.
+        // we increase the phases to compensate, but more phases means more memory per
+        // filter and more time to compute the filter.
+        //
+        // if we know that the filter will be used for dynamic sample rate changes,
+        // that would allow us skip this part for fixed sample rate resamplers.
+        //
+        while (phases<63) {
+            phases *= 2; // this code only needed to support dynamic rate changes
+        }
+
+        if (phases>=256) {  // too many phases, always interpolate
+            phases = 127;
+        }
+
+        // create the filter
+        mConstants.set(phases, halfLength, inSampleRate, mSampleRate);
+        createKaiserFir(mConstants, stopBandAtten,
+                inSampleRate, mSampleRate, tbwCheat);
+    } // End Kaiser filter
+
+    // update phase and state based on the new filter.
+    const Constants& c(mConstants);
+    mInBuffer.resize(mChannelCount, c.mHalfNumCoefs);
+    const uint32_t phaseWrapLimit = c.mL << c.mShift;
+    // try to preserve as much of the phase fraction as possible for on-the-fly changes
+    mPhaseFraction = static_cast<unsigned long long>(mPhaseFraction)
+            * phaseWrapLimit / oldPhaseWrapLimit;
+    mPhaseFraction %= phaseWrapLimit; // should not do anything, but just in case.
+    mPhaseIncrement = static_cast<uint32_t>(static_cast<double>(phaseWrapLimit)
+            * inSampleRate / mSampleRate);
+
+    // determine which resampler to use
+    // check if locked phase (works only if mPhaseIncrement has no "fractional phase bits")
+    int locked = (mPhaseIncrement << (sizeof(mPhaseIncrement)*8 - c.mShift)) == 0;
+    int stride = (c.mHalfNumCoefs&7)==0 ? 16 : (c.mHalfNumCoefs&3)==0 ? 8 : 2;
+    if (locked) {
+        mPhaseFraction = mPhaseFraction >> c.mShift << c.mShift; // remove fractional phase
+    }
+
+    setResampler(RESAMPLETYPE(mChannelCount, locked, stride));
+#ifdef DEBUG_RESAMPLER
+    printf("channels:%d  %s  stride:%d  %s  coef:%d  shift:%d\n",
+            mChannelCount, locked ? "locked" : "interpolated",
+            stride, useS32 ? "S32" : "S16", 2*c.mHalfNumCoefs, c.mShift);
+#endif
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::resample(int32_t* out, size_t outFrameCount,
+            AudioBufferProvider* provider)
+{
+    (this->*mResampleFunc)(reinterpret_cast<TO*>(out), outFrameCount, provider);
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::setResampler(unsigned resampleType)
+{
+    // stride 16 (falls back to stride 2 for machines that do not support NEON)
+    switch (resampleType) {
+    case RESAMPLETYPE(1, true, 16):
+        mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, true, 16>;
+        return;
+    case RESAMPLETYPE(2, true, 16):
+        mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, true, 16>;
+        return;
+    case RESAMPLETYPE(1, false, 16):
+        mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, false, 16>;
+        return;
+    case RESAMPLETYPE(2, false, 16):
+        mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, false, 16>;
+        return;
+    default:
+        LOG_ALWAYS_FATAL("Invalid resampler type: %u", resampleType);
+        mResampleFunc = NULL;
+        return;
+    }
+}
+
+template<typename TC, typename TI, typename TO>
+template<int CHANNELS, bool LOCKED, int STRIDE>
+void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount,
+        AudioBufferProvider* provider)
+{
+    const Constants& c(mConstants);
+    const TC* const coefs = mConstants.mFirCoefs;
+    TI* impulse = mInBuffer.getImpulse();
+    size_t inputIndex = mInputIndex;
+    uint32_t phaseFraction = mPhaseFraction;
+    const uint32_t phaseIncrement = mPhaseIncrement;
+    size_t outputIndex = 0;
+    size_t outputSampleCount = outFrameCount * 2;   // stereo output
+    size_t inFrameCount = getInFrameCountRequired(outFrameCount);
+    const uint32_t phaseWrapLimit = c.mL << c.mShift;
+
+    // NOTE: be very careful when modifying the code here. register
+    // pressure is very high and a small change might cause the compiler
+    // to generate far less efficient code.
+    // Always sanity check the result with objdump or test-resample.
+
+    // the following logic is a bit convoluted to keep the main processing loop
+    // as tight as possible with register allocation.
+    while (outputIndex < outputSampleCount) {
+        // buffer is empty, fetch a new one
+        while (mBuffer.frameCount == 0) {
+            mBuffer.frameCount = inFrameCount;
+            provider->getNextBuffer(&mBuffer,
+                    calculateOutputPTS(outputIndex / 2));
+            if (mBuffer.raw == NULL) {
+                goto resample_exit;
+            }
+            if (phaseFraction >= phaseWrapLimit) { // read in data
+                mInBuffer.template readAdvance<CHANNELS>(
+                        impulse, c.mHalfNumCoefs,
+                        reinterpret_cast<TI*>(mBuffer.raw), inputIndex);
+                phaseFraction -= phaseWrapLimit;
+                while (phaseFraction >= phaseWrapLimit) {
+                    inputIndex++;
+                    if (inputIndex >= mBuffer.frameCount) {
+                        inputIndex -= mBuffer.frameCount;
+                        provider->releaseBuffer(&mBuffer);
+                        break;
+                    }
+                    mInBuffer.template readAdvance<CHANNELS>(
+                            impulse, c.mHalfNumCoefs,
+                            reinterpret_cast<TI*>(mBuffer.raw), inputIndex);
+                    phaseFraction -= phaseWrapLimit;
+                }
+            }
+        }
+        const TI* const in = reinterpret_cast<const TI*>(mBuffer.raw);
+        const size_t frameCount = mBuffer.frameCount;
+        const int coefShift = c.mShift;
+        const int halfNumCoefs = c.mHalfNumCoefs;
+        const TO* const volumeSimd = mVolumeSimd;
+
+        // reread the last input in.
+        mInBuffer.template readAgain<CHANNELS>(impulse, halfNumCoefs, in, inputIndex);
+
+        // main processing loop
+        while (CC_LIKELY(outputIndex < outputSampleCount)) {
+            // caution: fir() is inlined and may be large.
+            // output will be loaded with the appropriate values
+            //
+            // from the input samples in impulse[-halfNumCoefs+1]... impulse[halfNumCoefs]
+            // from the polyphase filter of (phaseFraction / phaseWrapLimit) in coefs.
+            //
+            fir<CHANNELS, LOCKED, STRIDE>(
+                    &out[outputIndex],
+                    phaseFraction, phaseWrapLimit,
+                    coefShift, halfNumCoefs, coefs,
+                    impulse, volumeSimd);
+            outputIndex += 2;
+
+            phaseFraction += phaseIncrement;
+            while (phaseFraction >= phaseWrapLimit) {
+                inputIndex++;
+                if (inputIndex >= frameCount) {
+                    goto done;  // need a new buffer
+                }
+                mInBuffer.template readAdvance<CHANNELS>(impulse, halfNumCoefs, in, inputIndex);
+                phaseFraction -= phaseWrapLimit;
+            }
+        }
+done:
+        // often arrives here when input buffer runs out
+        if (inputIndex >= frameCount) {
+            inputIndex -= frameCount;
+            provider->releaseBuffer(&mBuffer);
+            // mBuffer.frameCount MUST be zero here.
+        }
+    }
+
+resample_exit:
+    mInBuffer.setImpulse(impulse);
+    mInputIndex = inputIndex;
+    mPhaseFraction = phaseFraction;
+}
+
+/* instantiate templates used by AudioResampler::create */
+template class AudioResamplerDyn<float, float, float>;
+template class AudioResamplerDyn<int16_t, int16_t, int32_t>;
+template class AudioResamplerDyn<int32_t, int16_t, int32_t>;
+
+// ----------------------------------------------------------------------------
+}; // namespace android
diff --git a/services/audioflinger/AudioResamplerDyn.h b/services/audioflinger/AudioResamplerDyn.h
new file mode 100644
index 0000000..8c56319
--- /dev/null
+++ b/services/audioflinger/AudioResamplerDyn.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_DYN_H
+#define ANDROID_AUDIO_RESAMPLER_DYN_H
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <cutils/log.h>
+
+#include "AudioResampler.h"
+
+namespace android {
+
+/* AudioResamplerDyn
+ *
+ * This class template is used for floating point and integer resamplers.
+ *
+ * Type variables:
+ * TC = filter coefficient type (one of int16_t, int32_t, or float)
+ * TI = input data type (one of int16_t or float)
+ * TO = output data type (one of int32_t or float)
+ *
+ * For integer input data types TI, the coefficient type TC is either int16_t or int32_t.
+ * For float input data types TI, the coefficient type TC is float.
+ */
+
+template<typename TC, typename TI, typename TO>
+class AudioResamplerDyn: public AudioResampler {
+public:
+    AudioResamplerDyn(int bitDepth, int inChannelCount,
+            int32_t sampleRate, src_quality quality);
+
+    virtual ~AudioResamplerDyn();
+
+    virtual void init();
+
+    virtual void setSampleRate(int32_t inSampleRate);
+
+    virtual void setVolume(int16_t left, int16_t right);
+
+    virtual void resample(int32_t* out, size_t outFrameCount,
+            AudioBufferProvider* provider);
+
+private:
+
+    class Constants { // stores the filter constants.
+    public:
+        Constants() :
+            mL(0), mShift(0), mHalfNumCoefs(0), mFirCoefs(NULL)
+        {}
+        void set(int L, int halfNumCoefs,
+                int inSampleRate, int outSampleRate);
+
+                 int mL;            // interpolation phases in the filter.
+                 int mShift;        // right shift to get polyphase index
+        unsigned int mHalfNumCoefs; // filter half #coefs
+           const TC* mFirCoefs;     // polyphase filter bank
+    };
+
+    class InBuffer { // buffer management for input type TI
+    public:
+        InBuffer();
+        ~InBuffer();
+        void init();
+
+        void resize(int CHANNELS, int halfNumCoefs);
+
+        // used for direct management of the mImpulse pointer
+        inline TI* getImpulse() {
+            return mImpulse;
+        }
+
+        inline void setImpulse(TI *impulse) {
+            mImpulse = impulse;
+        }
+
+        template<int CHANNELS>
+        inline void readAgain(TI*& impulse, const int halfNumCoefs,
+                const TI* const in, const size_t inputIndex);
+
+        template<int CHANNELS>
+        inline void readAdvance(TI*& impulse, const int halfNumCoefs,
+                const TI* const in, const size_t inputIndex);
+
+    private:
+        // tuning parameter guidelines: 2 <= multiple <= 8
+        static const int kStateSizeMultipleOfFilterLength = 4;
+
+        // in general, mRingFull = mState + mStateSize - halfNumCoefs*CHANNELS.
+           TI* mState;      // base pointer for the input buffer storage
+           TI* mImpulse;    // current location of the impulse response (centered)
+           TI* mRingFull;   // mState <= mImpulse < mRingFull
+        size_t mStateCount; // size of state in units of TI.
+    };
+
+    void createKaiserFir(Constants &c, double stopBandAtten,
+            int inSampleRate, int outSampleRate, double tbwCheat);
+
+    void setResampler(unsigned resampleType);
+
+    template<int CHANNELS, bool LOCKED, int STRIDE>
+    void resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider);
+
+    // declare a pointer to member function for resample
+    typedef void (AudioResamplerDyn<TC, TI, TO>::*resample_ABP_t)(TO* out,
+            size_t outFrameCount, AudioBufferProvider* provider);
+
+    // data - the contiguous storage and layout of these is important.
+           InBuffer mInBuffer;
+          Constants mConstants;        // current set of coefficient parameters
+    TO __attribute__ ((aligned (8))) mVolumeSimd[2]; // must be aligned or NEON may crash
+     resample_ABP_t mResampleFunc;     // called function for resampling
+            int32_t mFilterSampleRate; // designed filter sample rate.
+        src_quality mFilterQuality;    // designed filter quality.
+              void* mCoefBuffer;       // if a filter is created, this is not null
+};
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_DYN_H*/
diff --git a/services/audioflinger/AudioResamplerFirGen.h b/services/audioflinger/AudioResamplerFirGen.h
new file mode 100644
index 0000000..d024b2f
--- /dev/null
+++ b/services/audioflinger/AudioResamplerFirGen.h
@@ -0,0 +1,709 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_FIR_GEN_H
+#define ANDROID_AUDIO_RESAMPLER_FIR_GEN_H
+
+namespace android {
+
+/*
+ * generates a sine wave at equal steps.
+ *
+ * As most of our functions use sine or cosine at equal steps,
+ * it is very efficient to compute them that way (single multiply and subtract),
+ * rather than invoking the math library sin() or cos() each time.
+ *
+ * SineGen uses Goertzel's Algorithm (as a generator not a filter)
+ * to calculate sine(wstart + n * wstep) or cosine(wstart + n * wstep)
+ * by stepping through 0, 1, ... n.
+ *
+ * e^i(wstart+wstep) = 2cos(wstep) * e^i(wstart) - e^i(wstart-wstep)
+ *
+ * or looking at just the imaginary sine term, as the cosine follows identically:
+ *
+ * sin(wstart+wstep) = 2cos(wstep) * sin(wstart) - sin(wstart-wstep)
+ *
+ * Goertzel's algorithm is more efficient than the angle addition formula,
+ * e^i(wstart+wstep) = e^i(wstart) * e^i(wstep), which takes up to
+ * 4 multiplies and 2 adds (or 3* and 3+) and requires both sine and
+ * cosine generation due to the complex * complex multiply (full rotation).
+ *
+ * See: http://en.wikipedia.org/wiki/Goertzel_algorithm
+ *
+ */
+
+class SineGen {
+public:
+    SineGen(double wstart, double wstep, bool cosine = false) {
+        if (cosine) {
+            mCurrent = cos(wstart);
+            mPrevious = cos(wstart - wstep);
+        } else {
+            mCurrent = sin(wstart);
+            mPrevious = sin(wstart - wstep);
+        }
+        mTwoCos = 2.*cos(wstep);
+    }
+    SineGen(double expNow, double expPrev, double twoCosStep) {
+        mCurrent = expNow;
+        mPrevious = expPrev;
+        mTwoCos = twoCosStep;
+    }
+    inline double value() const {
+        return mCurrent;
+    }
+    inline void advance() {
+        double tmp = mCurrent;
+        mCurrent = mCurrent*mTwoCos - mPrevious;
+        mPrevious = tmp;
+    }
+    inline double valueAdvance() {
+        double tmp = mCurrent;
+        mCurrent = mCurrent*mTwoCos - mPrevious;
+        mPrevious = tmp;
+        return tmp;
+    }
+
+private:
+    double mCurrent; // current value of sine/cosine
+    double mPrevious; // previous value of sine/cosine
+    double mTwoCos; // stepping factor
+};
+
+/*
+ * generates a series of sine generators, phase offset by fixed steps.
+ *
+ * This is used to generate polyphase sine generators, one per polyphase
+ * in the filter code below.
+ *
+ * The SineGen returned by value() starts at innerStart = outerStart + n*outerStep;
+ * increments by innerStep.
+ *
+ */
+
+class SineGenGen {
+public:
+    SineGenGen(double outerStart, double outerStep, double innerStep, bool cosine = false)
+            : mSineInnerCur(outerStart, outerStep, cosine),
+              mSineInnerPrev(outerStart-innerStep, outerStep, cosine)
+    {
+        mTwoCos = 2.*cos(innerStep);
+    }
+    inline SineGen value() {
+        return SineGen(mSineInnerCur.value(), mSineInnerPrev.value(), mTwoCos);
+    }
+    inline void advance() {
+        mSineInnerCur.advance();
+        mSineInnerPrev.advance();
+    }
+    inline SineGen valueAdvance() {
+        return SineGen(mSineInnerCur.valueAdvance(), mSineInnerPrev.valueAdvance(), mTwoCos);
+    }
+
+private:
+    SineGen mSineInnerCur; // generate the inner sine values (stepped by outerStep).
+    SineGen mSineInnerPrev; // generate the inner sine previous values
+                            // (behind by innerStep, stepped by outerStep).
+    double mTwoCos; // the inner stepping factor for the returned SineGen.
+};
+
+static inline double sqr(double x) {
+    return x * x;
+}
+
+/*
+ * rounds a double to the nearest integer for FIR coefficients.
+ *
+ * One variant uses noise shaping, which must keep error history
+ * to work (the err parameter, initialized to 0).
+ * The other variant is a non-noise shaped version for
+ * S32 coefficients (noise shaping doesn't gain much).
+ *
+ * Caution: No bounds saturation is applied, but isn't needed in this case.
+ *
+ * @param x is the value to round.
+ *
+ * @param maxval is the maximum integer scale factor expressed as an int64 (for headroom).
+ * Typically this may be the maximum positive integer+1 (using the fact that double precision
+ * FIR coefficients generated here are never that close to 1.0 to pose an overflow condition).
+ *
+ * @param err is the previous error (actual - rounded) for the previous rounding op.
+ * For 16b coefficients this can improve stopband dB performance by up to 2dB.
+ *
+ * Many variants exist for the noise shaping: http://en.wikipedia.org/wiki/Noise_shaping
+ *
+ */
+
+static inline int64_t toint(double x, int64_t maxval, double& err) {
+    double val = x * maxval;
+    double ival = floor(val + 0.5 + err*0.2);
+    err = val - ival;
+    return static_cast<int64_t>(ival);
+}
+
+static inline int64_t toint(double x, int64_t maxval) {
+    return static_cast<int64_t>(floor(x * maxval + 0.5));
+}
+
+/*
+ * Modified Bessel function of the first kind
+ * http://en.wikipedia.org/wiki/Bessel_function
+ *
+ * The formulas are taken from Abramowitz and Stegun,
+ * _Handbook of Mathematical Functions_ (links below):
+ *
+ * http://people.math.sfu.ca/~cbm/aands/page_375.htm
+ * http://people.math.sfu.ca/~cbm/aands/page_378.htm
+ *
+ * http://dlmf.nist.gov/10.25
+ * http://dlmf.nist.gov/10.40
+ *
+ * Note we assume x is nonnegative (the function is symmetric,
+ * pass in the absolute value as needed).
+ *
+ * Constants are compile time derived with templates I0Term<> and
+ * I0ATerm<> to the precision of the compiler.  The series can be expanded
+ * to any precision needed, but currently set around 24b precision.
+ *
+ * We use a bit of template math here, constexpr would probably be
+ * more appropriate for a C++11 compiler.
+ *
+ * For the intermediate range 3.75 < x < 15, we use minimax polynomial fit.
+ *
+ */
+
+template <int N>
+struct I0Term {
+    static const double value = I0Term<N-1>::value / (4. * N * N);
+};
+
+template <>
+struct I0Term<0> {
+    static const double value = 1.;
+};
+
+template <int N>
+struct I0ATerm {
+    static const double value = I0ATerm<N-1>::value * (2.*N-1.) * (2.*N-1.) / (8. * N);
+};
+
+template <>
+struct I0ATerm<0> { // 1/sqrt(2*PI);
+    static const double value = 0.398942280401432677939946059934381868475858631164934657665925;
+};
+
+#if USE_HORNERS_METHOD
+/* Polynomial evaluation of A + Bx + Cx^2 + Dx^3 + ...
+ * using Horner's Method: http://en.wikipedia.org/wiki/Horner's_method
+ *
+ * This has fewer multiplications than Estrin's method below, but has back to back
+ * floating point dependencies.
+ *
+ * On ARM this appears to work slower, so USE_HORNERS_METHOD is not default enabled.
+ */
+
+inline double Poly2(double A, double B, double x) {
+    return A + x * B;
+}
+
+inline double Poly4(double A, double B, double C, double D, double x) {
+    return A + x * (B + x * (C + x * (D)));
+}
+
+inline double Poly7(double A, double B, double C, double D, double E, double F, double G,
+        double x) {
+    return A + x * (B + x * (C + x * (D + x * (E + x * (F + x * (G))))));
+}
+
+inline double Poly9(double A, double B, double C, double D, double E, double F, double G,
+        double H, double I, double x) {
+    return A + x * (B + x * (C + x * (D + x * (E + x * (F + x * (G + x * (H + x * (I))))))));
+}
+
+#else
+/* Polynomial evaluation of A + Bx + Cx^2 + Dx^3 + ...
+ * using Estrin's Method: http://en.wikipedia.org/wiki/Estrin's_scheme
+ *
+ * This is typically faster, perhaps gains about 5-10% overall on ARM processors
+ * over Horner's method above.
+ */
+
+inline double Poly2(double A, double B, double x) {
+    return A + B * x;
+}
+
+inline double Poly3(double A, double B, double C, double x, double x2) {
+    return Poly2(A, B, x) + C * x2;
+}
+
+inline double Poly3(double A, double B, double C, double x) {
+    return Poly2(A, B, x) + C * x * x;
+}
+
+inline double Poly4(double A, double B, double C, double D, double x, double x2) {
+    return Poly2(A, B, x) + Poly2(C, D, x) * x2; // same as poly2(poly2, poly2, x2);
+}
+
+inline double Poly4(double A, double B, double C, double D, double x) {
+    return Poly4(A, B, C, D, x, x * x);
+}
+
+inline double Poly7(double A, double B, double C, double D, double E, double F, double G,
+        double x) {
+    double x2 = x * x;
+    return Poly4(A, B, C, D, x, x2) + Poly3(E, F, G, x, x2) * (x2 * x2);
+}
+
+inline double Poly8(double A, double B, double C, double D, double E, double F, double G,
+        double H, double x, double x2, double x4) {
+    return Poly4(A, B, C, D, x, x2) + Poly4(E, F, G, H, x, x2) * x4;
+}
+
+inline double Poly9(double A, double B, double C, double D, double E, double F, double G,
+        double H, double I, double x) {
+    double x2 = x * x;
+#if 1
+    // It does not seem faster to explicitly decompose Poly8 into Poly4, but
+    // could depend on compiler floating point scheduling.
+    double x4 = x2 * x2;
+    return Poly8(A, B, C, D, E, F, G, H, x, x2, x4) + I * (x4 * x4);
+#else
+    double val = Poly4(A, B, C, D, x, x2);
+    double x4 = x2 * x2;
+    return val + Poly4(E, F, G, H, x, x2) * x4 + I * (x4 * x4);
+#endif
+}
+#endif
+
+static inline double I0(double x) {
+    if (x < 3.75) {
+        x *= x;
+        return Poly7(I0Term<0>::value, I0Term<1>::value,
+                I0Term<2>::value, I0Term<3>::value,
+                I0Term<4>::value, I0Term<5>::value,
+                I0Term<6>::value, x); // e < 1.6e-7
+    }
+    if (1) {
+        /*
+         * Series expansion coefs are easy to calculate, but are expanded around 0,
+         * so error is unequal over the interval 0 < x < 3.75, the error being
+         * significantly better near 0.
+         *
+         * A better solution is to use precise minimax polynomial fits.
+         *
+         * We use a slightly more complicated solution for 3.75 < x < 15, based on
+         * the tables in Blair and Edwards, "Stable Rational Minimax Approximations
+         * to the Modified Bessel Functions I0(x) and I1(x)", Chalk Hill Nuclear Laboratory,
+         * AECL-4928.
+         *
+         * http://www.iaea.org/inis/collection/NCLCollectionStore/_Public/06/178/6178667.pdf
+         *
+         * See Table 11 for 0 < x < 15; e < 10^(-7.13).
+         *
+         * Note: Beta cannot exceed 15 (hence Stopband cannot exceed 144dB = 24b).
+         *
+         * This speeds up overall computation by about 40% over using the else clause below,
+         * which requires sqrt and exp.
+         *
+         */
+
+        x *= x;
+        double num = Poly9(-0.13544938430e9, -0.33153754512e8,
+                -0.19406631946e7, -0.48058318783e5,
+                -0.63269783360e3, -0.49520779070e1,
+                -0.24970910370e-1, -0.74741159550e-4,
+                -0.18257612460e-6, x);
+        double y = x - 225.; // reflection around 15 (squared)
+        double den = Poly4(-0.34598737196e8, 0.23852643181e6,
+                -0.70699387620e3, 0.10000000000e1, y);
+        return num / den;
+
+#if IO_EXTENDED_BETA
+        /* Table 42 for x > 15; e < 10^(-8.11).
+         * This is used for Beta>15, but is disabled here as
+         * we never use Beta that high.
+         *
+         * NOTE: This should be enabled only for x > 15.
+         */
+
+        double y = 1./x;
+        double z = y - (1./15);
+        double num = Poly2(0.415079861746e1, -0.5149092496e1, z);
+        double den = Poly3(0.103150763823e2, -0.14181687413e2,
+                0.1000000000e1, z);
+        return exp(x) * sqrt(y) * num / den;
+#endif
+    } else {
+        /*
+         * NOT USED, but reference for large Beta.
+         *
+         * Abramowitz and Stegun asymptotic formula.
+         * works for x > 3.75.
+         */
+        double y = 1./x;
+        return exp(x) * sqrt(y) *
+                // note: reciprocal squareroot may be easier!
+                // http://en.wikipedia.org/wiki/Fast_inverse_square_root
+                Poly9(I0ATerm<0>::value, I0ATerm<1>::value,
+                        I0ATerm<2>::value, I0ATerm<3>::value,
+                        I0ATerm<4>::value, I0ATerm<5>::value,
+                        I0ATerm<6>::value, I0ATerm<7>::value,
+                        I0ATerm<8>::value, y); // (... e) < 1.9e-7
+    }
+}
+
+/* A speed optimized version of the Modified Bessel I0() which incorporates
+ * the sqrt and numerator multiply and denominator divide into the computation.
+ * This speeds up filter computation by about 10-15%.
+ */
+static inline double I0SqrRat(double x2, double num, double den) {
+    if (x2 < (3.75 * 3.75)) {
+        return Poly7(I0Term<0>::value, I0Term<1>::value,
+                I0Term<2>::value, I0Term<3>::value,
+                I0Term<4>::value, I0Term<5>::value,
+                I0Term<6>::value, x2) * num / den; // e < 1.6e-7
+    }
+    num *= Poly9(-0.13544938430e9, -0.33153754512e8,
+            -0.19406631946e7, -0.48058318783e5,
+            -0.63269783360e3, -0.49520779070e1,
+            -0.24970910370e-1, -0.74741159550e-4,
+            -0.18257612460e-6, x2); // e < 10^(-7.13).
+    double y = x2 - 225.; // reflection around 15 (squared)
+    den *= Poly4(-0.34598737196e8, 0.23852643181e6,
+            -0.70699387620e3, 0.10000000000e1, y);
+    return num / den;
+}
+
+/*
+ * calculates the transition bandwidth for a Kaiser filter
+ *
+ * Formula 3.2.8, Vaidyanathan, _Multirate Systems and Filter Banks_, p. 48
+ * Formula 7.76, Oppenheim and Schafer, _Discrete-time Signal Processing, 3e_, p. 542
+ *
+ * @param halfNumCoef is half the number of coefficients per filter phase.
+ *
+ * @param stopBandAtten is the stop band attenuation desired.
+ *
+ * @return the transition bandwidth in normalized frequency (0 <= f <= 0.5)
+ */
+static inline double firKaiserTbw(int halfNumCoef, double stopBandAtten) {
+    return (stopBandAtten - 7.95)/((2.*14.36)*halfNumCoef);
+}
+
+/*
+ * calculates the fir transfer response of the overall polyphase filter at w.
+ *
+ * Calculates the DTFT transfer coefficient H(w) for 0 <= w <= PI, utilizing the
+ * fact that h[n] is symmetric (cosines only, no complex arithmetic).
+ *
+ * We use Goertzel's algorithm to accelerate the computation to essentially
+ * a single multiply and 2 adds per filter coefficient h[].
+ *
+ * Be careful be careful to consider that h[n] is the overall polyphase filter,
+ * with L phases, so rescaling H(w)/L is probably what you expect for "unity gain",
+ * as you only use one of the polyphases at a time.
+ */
+template <typename T>
+static inline double firTransfer(const T* coef, int L, int halfNumCoef, double w) {
+    double accum = static_cast<double>(coef[0])*0.5;  // "center coefficient" from first bank
+    coef += halfNumCoef;    // skip first filterbank (picked up by the last filterbank).
+#if SLOW_FIRTRANSFER
+    /* Original code for reference.  This is equivalent to the code below, but slower. */
+    for (int i=1 ; i<=L ; ++i) {
+        for (int j=0, ix=i ; j<halfNumCoef ; ++j, ix+=L) {
+            accum += cos(ix*w)*static_cast<double>(*coef++);
+        }
+    }
+#else
+    /*
+     * Our overall filter is stored striped by polyphases, not a contiguous h[n].
+     * We could fetch coefficients in a non-contiguous fashion
+     * but that will not scale to vector processing.
+     *
+     * We apply Goertzel's algorithm directly to each polyphase filter bank instead of
+     * using cosine generation/multiplication, thereby saving one multiply per inner loop.
+     *
+     * See: http://en.wikipedia.org/wiki/Goertzel_algorithm
+     * Also: Oppenheim and Schafer, _Discrete Time Signal Processing, 3e_, p. 720.
+     *
+     * We use the basic recursion to incorporate the cosine steps into real sequence x[n]:
+     * s[n] = x[n] + (2cosw)*s[n-1] + s[n-2]
+     *
+     * y[n] = s[n] - e^(iw)s[n-1]
+     *      = sum_{k=-\infty}^{n} x[k]e^(-iw(n-k))
+     *      = e^(-iwn) sum_{k=0}^{n} x[k]e^(iwk)
+     *
+     * The summation contains the frequency steps we want multiplied by the source
+     * (similar to a DTFT).
+     *
+     * Using symmetry, and just the real part (be careful, this must happen
+     * after any internal complex multiplications), the polyphase filterbank
+     * transfer function is:
+     *
+     * Hpp[n, w, w_0] = sum_{k=0}^{n} x[k] * cos(wk + w_0)
+     *                = Re{ e^(iwn + iw_0) y[n]}
+     *                = cos(wn+w_0) * s[n] - cos(w(n+1)+w_0) * s[n-1]
+     *
+     * using the fact that s[n] of real x[n] is real.
+     *
+     */
+    double dcos = 2. * cos(L*w);
+    int start = ((halfNumCoef)*L + 1);
+    SineGen cc((start - L) * w, w, true); // cosine
+    SineGen cp(start * w, w, true); // cosine
+    for (int i=1 ; i<=L ; ++i) {
+        double sc = 0;
+        double sp = 0;
+        for (int j=0 ; j<halfNumCoef ; ++j) {
+            double tmp = sc;
+            sc  = static_cast<double>(*coef++) + dcos*sc - sp;
+            sp = tmp;
+        }
+        // If we are awfully clever, we can apply Goertzel's algorithm
+        // again on the sc and sp sequences returned here.
+        accum += cc.valueAdvance() * sc - cp.valueAdvance() * sp;
+    }
+#endif
+    return accum*2.;
+}
+
+/*
+ * evaluates the minimum and maximum |H(f)| bound in a band region.
+ *
+ * This is usually done with equally spaced increments in the target band in question.
+ * The passband is often very small, and sampled that way. The stopband is often much
+ * larger.
+ *
+ * We use the fact that the overall polyphase filter has an additional bank at the end
+ * for interpolation; hence it is overspecified for the H(f) computation.  Thus the
+ * first polyphase is never actually checked, excepting its first term.
+ *
+ * In this code we use the firTransfer() evaluator above, which uses Goertzel's
+ * algorithm to calculate the transfer function at each point.
+ *
+ * TODO: An alternative with equal spacing is the FFT/DFT.  An alternative with unequal
+ * spacing is a chirp transform.
+ *
+ * @param coef is the designed polyphase filter banks
+ *
+ * @param L is the number of phases (for interpolation)
+ *
+ * @param halfNumCoef should be half the number of coefficients for a single
+ * polyphase.
+ *
+ * @param fstart is the normalized frequency start.
+ *
+ * @param fend is the normalized frequency end.
+ *
+ * @param steps is the number of steps to take (sampling) between frequency start and end
+ *
+ * @param firMin returns the minimum transfer |H(f)| found
+ *
+ * @param firMax returns the maximum transfer |H(f)| found
+ *
+ * 0 <= f <= 0.5.
+ * This is used to test passband and stopband performance.
+ */
+template <typename T>
+static void testFir(const T* coef, int L, int halfNumCoef,
+        double fstart, double fend, int steps, double &firMin, double &firMax) {
+    double wstart = fstart*(2.*M_PI);
+    double wend = fend*(2.*M_PI);
+    double wstep = (wend - wstart)/steps;
+    double fmax, fmin;
+    double trf = firTransfer(coef, L, halfNumCoef, wstart);
+    if (trf<0) {
+        trf = -trf;
+    }
+    fmin = fmax = trf;
+    wstart += wstep;
+    for (int i=1; i<steps; ++i) {
+        trf = firTransfer(coef, L, halfNumCoef, wstart);
+        if (trf<0) {
+            trf = -trf;
+        }
+        if (trf>fmax) {
+            fmax = trf;
+        }
+        else if (trf<fmin) {
+            fmin = trf;
+        }
+        wstart += wstep;
+    }
+    // renormalize - this is only needed for integer filter types
+    double norm = 1./((1ULL<<(sizeof(T)*8-1))*L);
+
+    firMin = fmin * norm;
+    firMax = fmax * norm;
+}
+
+/*
+ * evaluates the |H(f)| lowpass band characteristics.
+ *
+ * This function tests the lowpass characteristics for the overall polyphase filter,
+ * and is used to verify the design.  For this case, fp should be set to the
+ * passband normalized frequency from 0 to 0.5 for the overall filter (thus it
+ * is the designed polyphase bank value / L).  Likewise for fs.
+ *
+ * @param coef is the designed polyphase filter banks
+ *
+ * @param L is the number of phases (for interpolation)
+ *
+ * @param halfNumCoef should be half the number of coefficients for a single
+ * polyphase.
+ *
+ * @param fp is the passband normalized frequency, 0 < fp < fs < 0.5.
+ *
+ * @param fs is the stopband normalized frequency, 0 < fp < fs < 0.5.
+ *
+ * @param passSteps is the number of passband sampling steps.
+ *
+ * @param stopSteps is the number of stopband sampling steps.
+ *
+ * @param passMin is the minimum value in the passband
+ *
+ * @param passMax is the maximum value in the passband (useful for scaling).  This should
+ * be less than 1., to avoid sine wave test overflow.
+ *
+ * @param passRipple is the passband ripple.  Typically this should be less than 0.1 for
+ * an audio filter.  Generally speaker/headphone device characteristics will dominate
+ * the passband term.
+ *
+ * @param stopMax is the maximum value in the stopband.
+ *
+ * @param stopRipple is the stopband ripple, also known as stopband attenuation.
+ * Typically this should be greater than ~80dB for low quality, and greater than
+ * ~100dB for full 16b quality, otherwise aliasing may become noticeable.
+ *
+ */
+template <typename T>
+static void testFir(const T* coef, int L, int halfNumCoef,
+        double fp, double fs, int passSteps, int stopSteps,
+        double &passMin, double &passMax, double &passRipple,
+        double &stopMax, double &stopRipple) {
+    double fmin, fmax;
+    testFir(coef, L, halfNumCoef, 0., fp, passSteps, fmin, fmax);
+    double d1 = (fmax - fmin)/2.;
+    passMin = fmin;
+    passMax = fmax;
+    passRipple = -20.*log10(1. - d1); // passband ripple
+    testFir(coef, L, halfNumCoef, fs, 0.5, stopSteps, fmin, fmax);
+    // fmin is really not important for the stopband.
+    stopMax = fmax;
+    stopRipple = -20.*log10(fmax); // stopband ripple/attenuation
+}
+
+/*
+ * Calculates the overall polyphase filter based on a windowed sinc function.
+ *
+ * The windowed sinc is an odd length symmetric filter of exactly L*halfNumCoef*2+1
+ * taps for the entire kernel.  This is then decomposed into L+1 polyphase filterbanks.
+ * The last filterbank is used for interpolation purposes (and is mostly composed
+ * of the first bank shifted by one sample), and is unnecessary if one does
+ * not do interpolation.
+ *
+ * We use the last filterbank for some transfer function calculation purposes,
+ * so it needs to be generated anyways.
+ *
+ * @param coef is the caller allocated space for coefficients.  This should be
+ * exactly (L+1)*halfNumCoef in size.
+ *
+ * @param L is the number of phases (for interpolation)
+ *
+ * @param halfNumCoef should be half the number of coefficients for a single
+ * polyphase.
+ *
+ * @param stopBandAtten is the stopband value, should be >50dB.
+ *
+ * @param fcr is cutoff frequency/sampling rate (<0.5).  At this point, the energy
+ * should be 6dB less. (fcr is where the amplitude drops by half).  Use the
+ * firKaiserTbw() to calculate the transition bandwidth.  fcr is the midpoint
+ * between the stop band and the pass band (fstop+fpass)/2.
+ *
+ * @param atten is the attenuation (generally slightly less than 1).
+ */
+
+template <typename T>
+static inline void firKaiserGen(T* coef, int L, int halfNumCoef,
+        double stopBandAtten, double fcr, double atten) {
+    //
+    // Formula 3.2.5, 3.2.7, Vaidyanathan, _Multirate Systems and Filter Banks_, p. 48
+    // Formula 7.75, Oppenheim and Schafer, _Discrete-time Signal Processing, 3e_, p. 542
+    //
+    // See also: http://melodi.ee.washington.edu/courses/ee518/notes/lec17.pdf
+    //
+    // Kaiser window and beta parameter
+    //
+    //         | 0.1102*(A - 8.7)                         A > 50
+    //  beta = | 0.5842*(A - 21)^0.4 + 0.07886*(A - 21)   21 <= A <= 50
+    //         | 0.                                       A < 21
+    //
+    // with A is the desired stop-band attenuation in dBFS
+    //
+    //    30 dB    2.210
+    //    40 dB    3.384
+    //    50 dB    4.538
+    //    60 dB    5.658
+    //    70 dB    6.764
+    //    80 dB    7.865
+    //    90 dB    8.960
+    //   100 dB   10.056
+
+    const int N = L * halfNumCoef; // non-negative half
+    const double beta = 0.1102 * (stopBandAtten - 8.7); // >= 50dB always
+    const double xstep = (2. * M_PI) * fcr / L;
+    const double xfrac = 1. / N;
+    const double yscale = atten * L / (I0(beta) * M_PI);
+    const double sqrbeta = sqr(beta);
+
+    // We use sine generators, which computes sines on regular step intervals.
+    // This speeds up overall computation about 40% from computing the sine directly.
+
+    SineGenGen sgg(0., xstep, L*xstep); // generates sine generators (one per polyphase)
+
+    for (int i=0 ; i<=L ; ++i) { // generate an extra set of coefs for interpolation
+
+        // computation for a single polyphase of the overall filter.
+        SineGen sg = sgg.valueAdvance(); // current sine generator for "j" inner loop.
+        double err = 0; // for noise shaping on int16_t coefficients (over each polyphase)
+
+        for (int j=0, ix=i ; j<halfNumCoef ; ++j, ix+=L) {
+            double y;
+            if (CC_LIKELY(ix)) {
+                double x = static_cast<double>(ix);
+
+                // sine generator: sg.valueAdvance() returns sin(ix*xstep);
+                // y = I0(beta * sqrt(1.0 - sqr(x * xfrac))) * yscale * sg.valueAdvance() / x;
+                y = I0SqrRat(sqrbeta * (1.0 - sqr(x * xfrac)), yscale * sg.valueAdvance(), x);
+            } else {
+                y = 2. * atten * fcr; // center of filter, sinc(0) = 1.
+                sg.advance();
+            }
+
+            if (is_same<T, int16_t>::value) { // int16_t needs noise shaping
+                *coef++ = static_cast<T>(toint(y, 1ULL<<(sizeof(T)*8-1), err));
+            } else if (is_same<T, int32_t>::value) {
+                *coef++ = static_cast<T>(toint(y, 1ULL<<(sizeof(T)*8-1)));
+            } else { // assumed float or double
+                *coef++ = static_cast<T>(y);
+            }
+        }
+    }
+}
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_FIR_GEN_H*/
diff --git a/services/audioflinger/AudioResamplerFirOps.h b/services/audioflinger/AudioResamplerFirOps.h
new file mode 100644
index 0000000..bf2163f
--- /dev/null
+++ b/services/audioflinger/AudioResamplerFirOps.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_FIR_OPS_H
+#define ANDROID_AUDIO_RESAMPLER_FIR_OPS_H
+
+namespace android {
+
+#if defined(__arm__) && !defined(__thumb__)
+#define USE_INLINE_ASSEMBLY (true)
+#else
+#define USE_INLINE_ASSEMBLY (false)
+#endif
+
+#if USE_INLINE_ASSEMBLY && defined(__ARM_NEON__)
+#define USE_NEON (true)
+#include <arm_neon.h>
+#else
+#define USE_NEON (false)
+#endif
+
+template<typename T, typename U>
+struct is_same
+{
+    static const bool value = false;
+};
+
+template<typename T>
+struct is_same<T, T>  // partial specialization
+{
+    static const bool value = true;
+};
+
+static inline
+int32_t mulRL(int left, int32_t in, uint32_t vRL)
+{
+#if USE_INLINE_ASSEMBLY
+    int32_t out;
+    if (left) {
+        asm( "smultb %[out], %[in], %[vRL] \n"
+             : [out]"=r"(out)
+             : [in]"%r"(in), [vRL]"r"(vRL)
+             : );
+    } else {
+        asm( "smultt %[out], %[in], %[vRL] \n"
+             : [out]"=r"(out)
+             : [in]"%r"(in), [vRL]"r"(vRL)
+             : );
+    }
+    return out;
+#else
+    int16_t v = left ? static_cast<int16_t>(vRL) : static_cast<int16_t>(vRL>>16);
+    return static_cast<int32_t>((static_cast<int64_t>(in) * v) >> 16);
+#endif
+}
+
+static inline
+int32_t mulAdd(int16_t in, int16_t v, int32_t a)
+{
+#if USE_INLINE_ASSEMBLY
+    int32_t out;
+    asm( "smlabb %[out], %[v], %[in], %[a] \n"
+         : [out]"=r"(out)
+         : [in]"%r"(in), [v]"r"(v), [a]"r"(a)
+         : );
+    return out;
+#else
+    return a + v * in;
+#endif
+}
+
+static inline
+int32_t mulAdd(int16_t in, int32_t v, int32_t a)
+{
+#if USE_INLINE_ASSEMBLY
+    int32_t out;
+    asm( "smlawb %[out], %[v], %[in], %[a] \n"
+         : [out]"=r"(out)
+         : [in]"%r"(in), [v]"r"(v), [a]"r"(a)
+         : );
+    return out;
+#else
+    return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 16);
+#endif
+}
+
+static inline
+int32_t mulAdd(int32_t in, int32_t v, int32_t a)
+{
+#if USE_INLINE_ASSEMBLY
+    int32_t out;
+    asm( "smmla %[out], %[v], %[in], %[a] \n"
+         : [out]"=r"(out)
+         : [in]"%r"(in), [v]"r"(v), [a]"r"(a)
+         : );
+    return out;
+#else
+    return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 32);
+#endif
+}
+
+static inline
+int32_t mulAddRL(int left, uint32_t inRL, int16_t v, int32_t a)
+{
+#if USE_INLINE_ASSEMBLY
+    int32_t out;
+    if (left) {
+        asm( "smlabb %[out], %[v], %[inRL], %[a] \n"
+             : [out]"=r"(out)
+             : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
+             : );
+    } else {
+        asm( "smlabt %[out], %[v], %[inRL], %[a] \n"
+             : [out]"=r"(out)
+             : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
+             : );
+    }
+    return out;
+#else
+    int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16);
+    return a + v * s;
+#endif
+}
+
+static inline
+int32_t mulAddRL(int left, uint32_t inRL, int32_t v, int32_t a)
+{
+#if USE_INLINE_ASSEMBLY
+    int32_t out;
+    if (left) {
+        asm( "smlawb %[out], %[v], %[inRL], %[a] \n"
+             : [out]"=r"(out)
+             : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
+             : );
+    } else {
+        asm( "smlawt %[out], %[v], %[inRL], %[a] \n"
+             : [out]"=r"(out)
+             : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
+             : );
+    }
+    return out;
+#else
+    int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16);
+    return a + static_cast<int32_t>((static_cast<int64_t>(v) * s) >> 16);
+#endif
+}
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_FIR_OPS_H*/
diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h
new file mode 100644
index 0000000..76d2d66
--- /dev/null
+++ b/services/audioflinger/AudioResamplerFirProcess.h
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H
+#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H
+
+namespace android {
+
+// depends on AudioResamplerFirOps.h
+
+/* variant for input type TI = int16_t input samples */
+template<typename TC>
+static inline
+void mac(int32_t& l, int32_t& r, TC coef, const int16_t* samples)
+{
+    uint32_t rl = *reinterpret_cast<const uint32_t*>(samples);
+    l = mulAddRL(1, rl, coef, l);
+    r = mulAddRL(0, rl, coef, r);
+}
+
+template<typename TC>
+static inline
+void mac(int32_t& l, TC coef, const int16_t* samples)
+{
+    l = mulAdd(samples[0], coef, l);
+}
+
+/* variant for input type TI = float input samples */
+template<typename TC>
+static inline
+void mac(float& l, float& r, TC coef,  const float* samples)
+{
+    l += *samples++ * coef;
+    r += *samples++ * coef;
+}
+
+template<typename TC>
+static inline
+void mac(float& l, TC coef,  const float* samples)
+{
+    l += *samples++ * coef;
+}
+
+/* variant for output type TO = int32_t output samples */
+static inline
+int32_t volumeAdjust(int32_t value, int32_t volume)
+{
+    return 2 * mulRL(0, value, volume);  // Note: only use top 16b
+}
+
+/* variant for output type TO = float output samples */
+static inline
+float volumeAdjust(float value, float volume)
+{
+    return value * volume;
+}
+
+/*
+ * Calculates a single output frame (two samples).
+ *
+ * This function computes both the positive half FIR dot product and
+ * the negative half FIR dot product, accumulates, and then applies the volume.
+ *
+ * This is a locked phase filter (it does not compute the interpolation).
+ *
+ * Use fir() to compute the proper coefficient pointers for a polyphase
+ * filter bank.
+ */
+
+template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO>
+static inline
+void ProcessL(TO* const out,
+        int count,
+        const TC* coefsP,
+        const TC* coefsN,
+        const TI* sP,
+        const TI* sN,
+        const TO* const volumeLR)
+{
+    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS >= 1 && CHANNELS <= 2)
+    if (CHANNELS == 2) {
+        TO l = 0;
+        TO r = 0;
+        do {
+            mac(l, r, *coefsP++, sP);
+            sP -= CHANNELS;
+            mac(l, r, *coefsN++, sN);
+            sN += CHANNELS;
+        } while (--count > 0);
+        out[0] += volumeAdjust(l, volumeLR[0]);
+        out[1] += volumeAdjust(r, volumeLR[1]);
+    } else { /* CHANNELS == 1 */
+        TO l = 0;
+        do {
+            mac(l, *coefsP++, sP);
+            sP -= CHANNELS;
+            mac(l, *coefsN++, sN);
+            sN += CHANNELS;
+        } while (--count > 0);
+        out[0] += volumeAdjust(l, volumeLR[0]);
+        out[1] += volumeAdjust(l, volumeLR[1]);
+    }
+}
+
+/*
+ * Calculates a single output frame (two samples) interpolating phase.
+ *
+ * This function computes both the positive half FIR dot product and
+ * the negative half FIR dot product, accumulates, and then applies the volume.
+ *
+ * This is an interpolated phase filter.
+ *
+ * Use fir() to compute the proper coefficient pointers for a polyphase
+ * filter bank.
+ */
+
+template<typename TC, typename T>
+void adjustLerp(T& lerpP __unused)
+{
+}
+
+template<int32_t, typename T>
+void adjustLerp(T& lerpP)
+{
+    lerpP >>= 16;   // lerpP is 32bit for NEON int32_t, but always 16 bit for non-NEON path
+}
+
+template<typename TC, typename TINTERP>
+static inline
+TC interpolate(TC coef_0, TC coef_1, TINTERP lerp)
+{
+    return lerp * (coef_1 - coef_0) + coef_0;
+}
+
+template<int16_t, uint32_t>
+static inline
+int16_t interpolate(int16_t coef_0, int16_t coef_1, uint32_t lerp)
+{
+    return (static_cast<int16_t>(lerp) * ((coef_1-coef_0)<<1)>>16) + coef_0;
+}
+
+template<int32_t, uint32_t>
+static inline
+int32_t interpolate(int32_t coef_0, int32_t coef_1, uint32_t lerp)
+{
+    return mulAdd(static_cast<int16_t>(lerp), (coef_1-coef_0)<<1, coef_0);
+}
+
+template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO, typename TINTERP>
+static inline
+void Process(TO* const out,
+        int count,
+        const TC* coefsP,
+        const TC* coefsN,
+        const TC* coefsP1 __unused,
+        const TC* coefsN1 __unused,
+        const TI* sP,
+        const TI* sN,
+        TINTERP lerpP,
+        const TO* const volumeLR)
+{
+    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS >= 1 && CHANNELS <= 2)
+    adjustLerp<TC, TINTERP>(lerpP); // coefficient type adjustment for interpolation
+
+    if (CHANNELS == 2) {
+        TO l = 0;
+        TO r = 0;
+        for (size_t i = 0; i < count; ++i) {
+            mac(l, r, interpolate(coefsP[0], coefsP[count], lerpP), sP);
+            coefsP++;
+            sP -= CHANNELS;
+            mac(l, r, interpolate(coefsN[count], coefsN[0], lerpP), sN);
+            coefsN++;
+            sN += CHANNELS;
+        }
+        out[0] += volumeAdjust(l, volumeLR[0]);
+        out[1] += volumeAdjust(r, volumeLR[1]);
+    } else { /* CHANNELS == 1 */
+        TO l = 0;
+        for (size_t i = 0; i < count; ++i) {
+            mac(l, interpolate(coefsP[0], coefsP[count], lerpP), sP);
+            coefsP++;
+            sP -= CHANNELS;
+            mac(l, interpolate(coefsN[count], coefsN[0], lerpP), sN);
+            coefsN++;
+            sN += CHANNELS;
+        }
+        out[0] += volumeAdjust(l, volumeLR[0]);
+        out[1] += volumeAdjust(l, volumeLR[1]);
+    }
+}
+
+/*
+ * Calculates a single output frame (two samples) from input sample pointer.
+ *
+ * This sets up the params for the accelerated Process() and ProcessL()
+ * functions to do the appropriate dot products.
+ *
+ * @param out should point to the output buffer with space for at least one output frame.
+ *
+ * @param phase is the fractional distance between input frames for interpolation:
+ * phase >= 0  && phase < phaseWrapLimit.  It can be thought of as a rational fraction
+ * of phase/phaseWrapLimit.
+ *
+ * @param phaseWrapLimit is #polyphases<<coefShift, where #polyphases is the number of polyphases
+ * in the polyphase filter. Likewise, #polyphases can be obtained as (phaseWrapLimit>>coefShift).
+ *
+ * @param coefShift gives the bit alignment of the polyphase index in the phase parameter.
+ *
+ * @param halfNumCoefs is the half the number of coefficients per polyphase filter. Since the
+ * overall filterbank is odd-length symmetric, only halfNumCoefs need be stored.
+ *
+ * @param coefs is the polyphase filter bank, starting at from polyphase index 0, and ranging to
+ * and including the #polyphases.  Each polyphase of the filter has half-length halfNumCoefs
+ * (due to symmetry).  The total size of the filter bank in coefficients is
+ * (#polyphases+1)*halfNumCoefs.
+ *
+ * The filter bank coefs should be aligned to a minimum of 16 bytes (preferrably to cache line).
+ *
+ * The coefs should be attenuated (to compensate for passband ripple)
+ * if storing back into the native format.
+ *
+ * @param samples are unaligned input samples.  The position is in the "middle" of the
+ * sample array with respect to the FIR filter:
+ * the negative half of the filter is dot product from samples+1 to samples+halfNumCoefs;
+ * the positive half of the filter is dot product from samples to samples-halfNumCoefs+1.
+ *
+ * @param volumeLR is a pointer to an array of two 32 bit volume values, one per stereo channel,
+ * expressed as a S32 integer.  A negative value inverts the channel 180 degrees.
+ * The pointer volumeLR should be aligned to a minimum of 8 bytes.
+ * A typical value for volume is 0x1000 to align to a unity gain output of 20.12.
+ *
+ * In between calls to filterCoefficient, the phase is incremented by phaseIncrement, where
+ * phaseIncrement is calculated as inputSampling * phaseWrapLimit / outputSampling.
+ *
+ * The filter polyphase index is given by indexP = phase >> coefShift. Due to
+ * odd length symmetric filter, the polyphase index of the negative half depends on
+ * whether interpolation is used.
+ *
+ * The fractional siting between the polyphase indices is given by the bits below coefShift:
+ *
+ * lerpP = phase << 32 - coefShift >> 1;  // for 32 bit unsigned phase multiply
+ * lerpP = phase << 32 - coefShift >> 17; // for 16 bit unsigned phase multiply
+ *
+ * For integer types, this is expressed as:
+ *
+ * lerpP = phase << sizeof(phase)*8 - coefShift
+ *              >> (sizeof(phase)-sizeof(*coefs))*8 + 1;
+ *
+ * For floating point, lerpP is the fractional phase scaled to [0.0, 1.0):
+ *
+ * lerpP = (phase << 32 - coefShift) / (1 << 32); // floating point equivalent
+ */
+
+template<int CHANNELS, bool LOCKED, int STRIDE, typename TC, typename TI, typename TO>
+static inline
+void fir(TO* const out,
+        const uint32_t phase, const uint32_t phaseWrapLimit,
+        const int coefShift, const int halfNumCoefs, const TC* const coefs,
+        const TI* const samples, const TO* const volumeLR)
+{
+    // NOTE: be very careful when modifying the code here. register
+    // pressure is very high and a small change might cause the compiler
+    // to generate far less efficient code.
+    // Always sanity check the result with objdump or test-resample.
+
+    if (LOCKED) {
+        // locked polyphase (no interpolation)
+        // Compute the polyphase filter index on the positive and negative side.
+        uint32_t indexP = phase >> coefShift;
+        uint32_t indexN = (phaseWrapLimit - phase) >> coefShift;
+        const TC* coefsP = coefs + indexP*halfNumCoefs;
+        const TC* coefsN = coefs + indexN*halfNumCoefs;
+        const TI* sP = samples;
+        const TI* sN = samples + CHANNELS;
+
+        // dot product filter.
+        ProcessL<CHANNELS, STRIDE>(out,
+                halfNumCoefs, coefsP, coefsN, sP, sN, volumeLR);
+    } else {
+        // interpolated polyphase
+        // Compute the polyphase filter index on the positive and negative side.
+        uint32_t indexP = phase >> coefShift;
+        uint32_t indexN = (phaseWrapLimit - phase - 1) >> coefShift; // one's complement.
+        const TC* coefsP = coefs + indexP*halfNumCoefs;
+        const TC* coefsN = coefs + indexN*halfNumCoefs;
+        const TC* coefsP1 = coefsP + halfNumCoefs;
+        const TC* coefsN1 = coefsN + halfNumCoefs;
+        const TI* sP = samples;
+        const TI* sN = samples + CHANNELS;
+
+        // Interpolation fraction lerpP derived by shifting all the way up and down
+        // to clear the appropriate bits and align to the appropriate level
+        // for the integer multiply.  The constants should resolve in compile time.
+        //
+        // The interpolated filter coefficient is derived as follows for the pos/neg half:
+        //
+        // interpolated[P] = index[P]*lerpP + index[P+1]*(1-lerpP)
+        // interpolated[N] = index[N+1]*lerpP + index[N]*(1-lerpP)
+
+        // on-the-fly interpolated dot product filter
+        if (is_same<TC, float>::value || is_same<TC, double>::value) {
+            static const TC scale = 1. / (65536. * 65536.); // scale phase bits to [0.0, 1.0)
+            TC lerpP = TC(phase << (sizeof(phase)*8 - coefShift)) * scale;
+
+            Process<CHANNELS, STRIDE>(out,
+                    halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR);
+        } else {
+            uint32_t lerpP = phase << (sizeof(phase)*8 - coefShift)
+                    >> ((sizeof(phase)-sizeof(*coefs))*8 + 1);
+
+            Process<CHANNELS, STRIDE>(out,
+                    halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR);
+        }
+    }
+}
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H*/
diff --git a/services/audioflinger/AudioResamplerFirProcessNeon.h b/services/audioflinger/AudioResamplerFirProcessNeon.h
new file mode 100644
index 0000000..f311cef
--- /dev/null
+++ b/services/audioflinger/AudioResamplerFirProcessNeon.h
@@ -0,0 +1,1149 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H
+#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H
+
+namespace android {
+
+// depends on AudioResamplerFirOps.h, AudioResamplerFirProcess.h
+
+#if USE_NEON
+//
+// NEON specializations are enabled for Process() and ProcessL()
+//
+// TODO: Stride 16 and Stride 8 can be combined with one pass stride 8 (if necessary)
+// and looping stride 16 (or vice versa). This has some polyphase coef data alignment
+// issues with S16 coefs. Consider this later.
+
+// Macros to save a mono/stereo accumulator sample in q0 (and q4) as stereo out.
+#define ASSEMBLY_ACCUMULATE_MONO \
+        "vld1.s32       {d2}, [%[vLR]:64]        \n"/* (1) load volumes */\
+        "vld1.s32       {d3}, %[out]             \n"/* (2) unaligned load the output */\
+        "vpadd.s32      d0, d0, d1               \n"/* (1) add all 4 partial sums */\
+        "vpadd.s32      d0, d0, d0               \n"/* (1+4d) and replicate L/R */\
+        "vqrdmulh.s32   d0, d0, d2               \n"/* (2+3d) apply volume */\
+        "vqadd.s32      d3, d3, d0               \n"/* (1+4d) accumulate result (saturating) */\
+        "vst1.s32       {d3}, %[out]             \n"/* (2+2d) store result */
+
+#define ASSEMBLY_ACCUMULATE_STEREO \
+        "vld1.s32       {d2}, [%[vLR]:64]        \n"/* (1) load volumes*/\
+        "vld1.s32       {d3}, %[out]             \n"/* (2) unaligned load the output*/\
+        "vpadd.s32      d0, d0, d1               \n"/* (1) add all 4 partial sums from q0*/\
+        "vpadd.s32      d8, d8, d9               \n"/* (1) add all 4 partial sums from q4*/\
+        "vpadd.s32      d0, d0, d8               \n"/* (1+4d) combine into L/R*/\
+        "vqrdmulh.s32   d0, d0, d2               \n"/* (2+3d) apply volume*/\
+        "vqadd.s32      d3, d3, d0               \n"/* (1+4d) accumulate result (saturating)*/\
+        "vst1.s32       {d3}, %[out]             \n"/* (2+2d)store result*/
+
+template <>
+inline void ProcessL<1, 16>(int32_t* const out,
+        int count,
+        const int16_t* coefsP,
+        const int16_t* coefsN,
+        const int16_t* sP,
+        const int16_t* sN,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 1; // template specialization does not preserve params
+    const int STRIDE = 16;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "veor           q0, q0, q0               \n"// (0 - combines+) accumulator = 0
+
+        "1:                                      \n"
+
+        "vld1.16        {q2}, [%[sP]]            \n"// (2+0d) load 8 16-bits mono samples
+        "vld1.16        {q3}, [%[sN]]!           \n"// (2) load 8 16-bits mono samples
+        "vld1.16        {q8}, [%[coefsP0]:128]!  \n"// (1) load 8 16-bits coefs
+        "vld1.16        {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs
+
+        "vrev64.16      q2, q2                   \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4
+
+        // reordering the vmal to do d6, d7 before d4, d5 is slower(?)
+        "vmlal.s16      q0, d4, d17              \n"// (1+0d) multiply (reversed)samples by coef
+        "vmlal.s16      q0, d5, d16              \n"// (1) multiply (reversed)samples by coef
+        "vmlal.s16      q0, d6, d20              \n"// (1) multiply neg samples
+        "vmlal.s16      q0, d7, d21              \n"// (1) multiply neg samples
+
+        // moving these ARM instructions before neon above seems to be slower
+        "subs           %[count], %[count], #8   \n"// (1) update loop counter
+        "sub            %[sP], %[sP], #16        \n"// (0) move pointer to next set of samples
+
+        // sP used after branch (warning)
+        "bne            1b                       \n"// loop
+
+         ASSEMBLY_ACCUMULATE_MONO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q8", "q10"
+    );
+}
+
+template <>
+inline void ProcessL<2, 16>(int32_t* const out,
+        int count,
+        const int16_t* coefsP,
+        const int16_t* coefsN,
+        const int16_t* sP,
+        const int16_t* sN,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 2; // template specialization does not preserve params
+    const int STRIDE = 16;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "veor           q0, q0, q0               \n"// (1) acc_L = 0
+        "veor           q4, q4, q4               \n"// (0 combines+) acc_R = 0
+
+        "1:                                      \n"
+
+        "vld2.16        {q2, q3}, [%[sP]]        \n"// (3+0d) load 8 16-bits stereo samples
+        "vld2.16        {q5, q6}, [%[sN]]!       \n"// (3) load 8 16-bits stereo samples
+        "vld1.16        {q8}, [%[coefsP0]:128]!  \n"// (1) load 8 16-bits coefs
+        "vld1.16        {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs
+
+        "vrev64.16      q2, q2                   \n"// (1) reverse 8 frames of the left positive
+        "vrev64.16      q3, q3                   \n"// (0 combines+) reverse right positive
+
+        "vmlal.s16      q0, d4, d17              \n"// (1) multiply (reversed) samples left
+        "vmlal.s16      q0, d5, d16              \n"// (1) multiply (reversed) samples left
+        "vmlal.s16      q4, d6, d17              \n"// (1) multiply (reversed) samples right
+        "vmlal.s16      q4, d7, d16              \n"// (1) multiply (reversed) samples right
+        "vmlal.s16      q0, d10, d20             \n"// (1) multiply samples left
+        "vmlal.s16      q0, d11, d21             \n"// (1) multiply samples left
+        "vmlal.s16      q4, d12, d20             \n"// (1) multiply samples right
+        "vmlal.s16      q4, d13, d21             \n"// (1) multiply samples right
+
+        // moving these ARM before neon seems to be slower
+        "subs           %[count], %[count], #8   \n"// (1) update loop counter
+        "sub            %[sP], %[sP], #32        \n"// (0) move pointer to next set of samples
+
+        // sP used after branch (warning)
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_STEREO
+
+        : [out] "=Uv" (out[0]),
+          [count] "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [sP] "+r" (sP),
+          [sN] "+r" (sN)
+        : [vLR] "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q4", "q5", "q6",
+          "q8", "q10"
+     );
+}
+
+template <>
+inline void Process<1, 16>(int32_t* const out,
+        int count,
+        const int16_t* coefsP,
+        const int16_t* coefsN,
+        const int16_t* coefsP1,
+        const int16_t* coefsN1,
+        const int16_t* sP,
+        const int16_t* sN,
+        uint32_t lerpP,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 1; // template specialization does not preserve params
+    const int STRIDE = 16;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "vmov.32        d2[0], %[lerpP]          \n"// load the positive phase S32 Q15
+        "veor           q0, q0, q0               \n"// (0 - combines+) accumulator = 0
+
+        "1:                                      \n"
+
+        "vld1.16        {q2}, [%[sP]]            \n"// (2+0d) load 8 16-bits mono samples
+        "vld1.16        {q3}, [%[sN]]!           \n"// (2) load 8 16-bits mono samples
+        "vld1.16        {q8}, [%[coefsP0]:128]!  \n"// (1) load 8 16-bits coefs
+        "vld1.16        {q9}, [%[coefsP1]:128]!  \n"// (1) load 8 16-bits coefs for interpolation
+        "vld1.16        {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs
+        "vld1.16        {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation
+
+        "vsub.s16       q9, q9, q8               \n"// (1) interpolate (step1) 1st set of coefs
+        "vsub.s16       q11, q11, q10            \n"// (1) interpolate (step1) 2nd set of coets
+
+        "vqrdmulh.s16   q9, q9, d2[0]            \n"// (2) interpolate (step2) 1st set of coefs
+        "vqrdmulh.s16   q11, q11, d2[0]          \n"// (2) interpolate (step2) 2nd set of coefs
+
+        "vrev64.16      q2, q2                   \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4
+
+        "vadd.s16       q8, q8, q9               \n"// (1+2d) interpolate (step3) 1st set
+        "vadd.s16       q10, q10, q11            \n"// (1+1d) interpolate (step3) 2nd set
+
+        // reordering the vmal to do d6, d7 before d4, d5 is slower(?)
+        "vmlal.s16      q0, d4, d17              \n"// (1+0d) multiply reversed samples by coef
+        "vmlal.s16      q0, d5, d16              \n"// (1) multiply reversed samples by coef
+        "vmlal.s16      q0, d6, d20              \n"// (1) multiply neg samples
+        "vmlal.s16      q0, d7, d21              \n"// (1) multiply neg samples
+
+        // moving these ARM instructions before neon above seems to be slower
+        "subs           %[count], %[count], #8   \n"// (1) update loop counter
+        "sub            %[sP], %[sP], #16        \n"// (0) move pointer to next set of samples
+
+        // sP used after branch (warning)
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_MONO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [coefsP1] "+r" (coefsP1),
+          [coefsN1] "+r" (coefsN1),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [lerpP]   "r" (lerpP),
+          [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q8", "q9", "q10", "q11"
+    );
+}
+
+template <>
+inline void Process<2, 16>(int32_t* const out,
+        int count,
+        const int16_t* coefsP,
+        const int16_t* coefsN,
+        const int16_t* coefsP1,
+        const int16_t* coefsN1,
+        const int16_t* sP,
+        const int16_t* sN,
+        uint32_t lerpP,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 2; // template specialization does not preserve params
+    const int STRIDE = 16;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "vmov.32        d2[0], %[lerpP]          \n"// load the positive phase
+        "veor           q0, q0, q0               \n"// (1) acc_L = 0
+        "veor           q4, q4, q4               \n"// (0 combines+) acc_R = 0
+
+        "1:                                      \n"
+
+        "vld2.16        {q2, q3}, [%[sP]]        \n"// (3+0d) load 8 16-bits stereo samples
+        "vld2.16        {q5, q6}, [%[sN]]!       \n"// (3) load 8 16-bits stereo samples
+        "vld1.16        {q8}, [%[coefsP0]:128]!  \n"// (1) load 8 16-bits coefs
+        "vld1.16        {q9}, [%[coefsP1]:128]!  \n"// (1) load 8 16-bits coefs for interpolation
+        "vld1.16        {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs
+        "vld1.16        {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation
+
+        "vsub.s16       q9, q9, q8               \n"// (1) interpolate (step1) 1st set of coefs
+        "vsub.s16       q11, q11, q10            \n"// (1) interpolate (step1) 2nd set of coets
+
+        "vqrdmulh.s16   q9, q9, d2[0]            \n"// (2) interpolate (step2) 1st set of coefs
+        "vqrdmulh.s16   q11, q11, d2[0]          \n"// (2) interpolate (step2) 2nd set of coefs
+
+        "vrev64.16      q2, q2                   \n"// (1) reverse 8 frames of the left positive
+        "vrev64.16      q3, q3                   \n"// (1) reverse 8 frames of the right positive
+
+        "vadd.s16       q8, q8, q9               \n"// (1+1d) interpolate (step3) 1st set
+        "vadd.s16       q10, q10, q11            \n"// (1+1d) interpolate (step3) 2nd set
+
+        "vmlal.s16      q0, d4, d17              \n"// (1) multiply reversed samples left
+        "vmlal.s16      q0, d5, d16              \n"// (1) multiply reversed samples left
+        "vmlal.s16      q4, d6, d17              \n"// (1) multiply reversed samples right
+        "vmlal.s16      q4, d7, d16              \n"// (1) multiply reversed samples right
+        "vmlal.s16      q0, d10, d20             \n"// (1) multiply samples left
+        "vmlal.s16      q0, d11, d21             \n"// (1) multiply samples left
+        "vmlal.s16      q4, d12, d20             \n"// (1) multiply samples right
+        "vmlal.s16      q4, d13, d21             \n"// (1) multiply samples right
+
+        // moving these ARM before neon seems to be slower
+        "subs           %[count], %[count], #8   \n"// (1) update loop counter
+        "sub            %[sP], %[sP], #32        \n"// (0) move pointer to next set of samples
+
+        // sP used after branch (warning)
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_STEREO
+
+        : [out] "=Uv" (out[0]),
+          [count] "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [coefsP1] "+r" (coefsP1),
+          [coefsN1] "+r" (coefsN1),
+          [sP] "+r" (sP),
+          [sN] "+r" (sN)
+        : [lerpP]   "r" (lerpP),
+          [vLR] "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q4", "q5", "q6",
+          "q8", "q9", "q10", "q11"
+    );
+}
+
+template <>
+inline void ProcessL<1, 16>(int32_t* const out,
+        int count,
+        const int32_t* coefsP,
+        const int32_t* coefsN,
+        const int16_t* sP,
+        const int16_t* sN,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 1; // template specialization does not preserve params
+    const int STRIDE = 16;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "veor           q0, q0, q0                    \n"// result, initialize to 0
+
+        "1:                                           \n"
+
+        "vld1.16        {q2}, [%[sP]]                 \n"// load 8 16-bits mono samples
+        "vld1.16        {q3}, [%[sN]]!                \n"// load 8 16-bits mono samples
+        "vld1.32        {q8, q9}, [%[coefsP0]:128]!   \n"// load 8 32-bits coefs
+        "vld1.32        {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
+
+        "vrev64.16      q2, q2                        \n"// reverse 8 frames of the positive side
+
+        "vshll.s16      q12, d4, #15                  \n"// extend samples to 31 bits
+        "vshll.s16      q13, d5, #15                  \n"// extend samples to 31 bits
+
+        "vshll.s16      q14, d6, #15                  \n"// extend samples to 31 bits
+        "vshll.s16      q15, d7, #15                  \n"// extend samples to 31 bits
+
+        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by interpolated coef
+
+        "vadd.s32       q0, q0, q12                   \n"// accumulate result
+        "vadd.s32       q13, q13, q14                 \n"// accumulate result
+        "vadd.s32       q0, q0, q15                   \n"// accumulate result
+        "vadd.s32       q0, q0, q13                   \n"// accumulate result
+
+        "sub            %[sP], %[sP], #16             \n"// move pointer to next set of samples
+        "subs           %[count], %[count], #8        \n"// update loop counter
+
+        "bne            1b                            \n"// loop
+
+        ASSEMBLY_ACCUMULATE_MONO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q8", "q9", "q10", "q11",
+          "q12", "q13", "q14", "q15"
+    );
+}
+
+template <>
+inline void ProcessL<2, 16>(int32_t* const out,
+        int count,
+        const int32_t* coefsP,
+        const int32_t* coefsN,
+        const int16_t* sP,
+        const int16_t* sN,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 2; // template specialization does not preserve params
+    const int STRIDE = 16;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "veor           q0, q0, q0                    \n"// result, initialize to 0
+        "veor           q4, q4, q4                    \n"// result, initialize to 0
+
+        "1:                                           \n"
+
+        "vld2.16        {q2, q3}, [%[sP]]             \n"// load 4 16-bits stereo samples
+        "vld2.16        {q5, q6}, [%[sN]]!            \n"// load 4 16-bits stereo samples
+        "vld1.32        {q8, q9}, [%[coefsP0]:128]!   \n"// load 4 32-bits coefs
+        "vld1.32        {q10, q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs
+
+        "vrev64.16      q2, q2                        \n"// reverse 8 frames of the positive side
+        "vrev64.16      q3, q3                        \n"// reverse 8 frames of the positive side
+
+        "vshll.s16      q12,  d4, #15                 \n"// extend samples to 31 bits
+        "vshll.s16      q13,  d5, #15                 \n"// extend samples to 31 bits
+
+        "vshll.s16      q14,  d10, #15                \n"// extend samples to 31 bits
+        "vshll.s16      q15,  d11, #15                \n"// extend samples to 31 bits
+
+        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by interpolated coef
+
+        "vadd.s32       q0, q0, q12                   \n"// accumulate result
+        "vadd.s32       q13, q13, q14                 \n"// accumulate result
+        "vadd.s32       q0, q0, q15                   \n"// (+1) accumulate result
+        "vadd.s32       q0, q0, q13                   \n"// (+1) accumulate result
+
+        "vshll.s16      q12,  d6, #15                 \n"// extend samples to 31 bits
+        "vshll.s16      q13,  d7, #15                 \n"// extend samples to 31 bits
+
+        "vshll.s16      q14,  d12, #15                \n"// extend samples to 31 bits
+        "vshll.s16      q15,  d13, #15                \n"// extend samples to 31 bits
+
+        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by interpolated coef
+
+        "vadd.s32       q4, q4, q12                   \n"// accumulate result
+        "vadd.s32       q13, q13, q14                 \n"// accumulate result
+        "vadd.s32       q4, q4, q15                   \n"// (+1) accumulate result
+        "vadd.s32       q4, q4, q13                   \n"// (+1) accumulate result
+
+        "subs           %[count], %[count], #8        \n"// update loop counter
+        "sub            %[sP], %[sP], #32             \n"// move pointer to next set of samples
+
+        "bne            1b                            \n"// loop
+
+        ASSEMBLY_ACCUMULATE_STEREO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q4", "q5", "q6",
+          "q8", "q9", "q10", "q11",
+          "q12", "q13", "q14", "q15"
+    );
+}
+
+template <>
+inline void Process<1, 16>(int32_t* const out,
+        int count,
+        const int32_t* coefsP,
+        const int32_t* coefsN,
+        const int32_t* coefsP1,
+        const int32_t* coefsN1,
+        const int16_t* sP,
+        const int16_t* sN,
+        uint32_t lerpP,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 1; // template specialization does not preserve params
+    const int STRIDE = 16;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "vmov.32        d2[0], %[lerpP]               \n"// load the positive phase
+        "veor           q0, q0, q0                    \n"// result, initialize to 0
+
+        "1:                                           \n"
+
+        "vld1.16        {q2}, [%[sP]]                 \n"// load 8 16-bits mono samples
+        "vld1.16        {q3}, [%[sN]]!                \n"// load 8 16-bits mono samples
+        "vld1.32        {q8, q9}, [%[coefsP0]:128]!   \n"// load 8 32-bits coefs
+        "vld1.32        {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs
+        "vld1.32        {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs
+        "vld1.32        {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
+
+        "vsub.s32       q12, q12, q8                  \n"// interpolate (step1)
+        "vsub.s32       q13, q13, q9                  \n"// interpolate (step1)
+        "vsub.s32       q14, q14, q10                 \n"// interpolate (step1)
+        "vsub.s32       q15, q15, q11                 \n"// interpolate (step1)
+
+        "vqrdmulh.s32   q12, q12, d2[0]               \n"// interpolate (step2)
+        "vqrdmulh.s32   q13, q13, d2[0]               \n"// interpolate (step2)
+        "vqrdmulh.s32   q14, q14, d2[0]               \n"// interpolate (step2)
+        "vqrdmulh.s32   q15, q15, d2[0]               \n"// interpolate (step2)
+
+        "vadd.s32       q8, q8, q12                   \n"// interpolate (step3)
+        "vadd.s32       q9, q9, q13                   \n"// interpolate (step3)
+        "vadd.s32       q10, q10, q14                 \n"// interpolate (step3)
+        "vadd.s32       q11, q11, q15                 \n"// interpolate (step3)
+
+        "vrev64.16      q2, q2                        \n"// reverse 8 frames of the positive side
+
+        "vshll.s16      q12,  d4, #15                 \n"// extend samples to 31 bits
+        "vshll.s16      q13,  d5, #15                 \n"// extend samples to 31 bits
+
+        "vshll.s16      q14,  d6, #15                 \n"// extend samples to 31 bits
+        "vshll.s16      q15,  d7, #15                 \n"// extend samples to 31 bits
+
+        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by interpolated coef
+
+        "vadd.s32       q0, q0, q12                   \n"// accumulate result
+        "vadd.s32       q13, q13, q14                 \n"// accumulate result
+        "vadd.s32       q0, q0, q15                   \n"// accumulate result
+        "vadd.s32       q0, q0, q13                   \n"// accumulate result
+
+        "sub            %[sP], %[sP], #16             \n"// move pointer to next set of samples
+        "subs           %[count], %[count], #8        \n"// update loop counter
+
+        "bne            1b                            \n"// loop
+
+        ASSEMBLY_ACCUMULATE_MONO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [coefsP1] "+r" (coefsP1),
+          [coefsN1] "+r" (coefsN1),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [lerpP]   "r" (lerpP),
+          [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q8", "q9", "q10", "q11",
+          "q12", "q13", "q14", "q15"
+    );
+}
+
+template <>
+inline void Process<2, 16>(int32_t* const out,
+        int count,
+        const int32_t* coefsP,
+        const int32_t* coefsN,
+        const int32_t* coefsP1,
+        const int32_t* coefsN1,
+        const int16_t* sP,
+        const int16_t* sN,
+        uint32_t lerpP,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 2; // template specialization does not preserve params
+    const int STRIDE = 16;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "vmov.32        d2[0], %[lerpP]               \n"// load the positive phase
+        "veor           q0, q0, q0                    \n"// result, initialize to 0
+        "veor           q4, q4, q4                    \n"// result, initialize to 0
+
+        "1:                                           \n"
+
+        "vld2.16        {q2, q3}, [%[sP]]             \n"// load 4 16-bits stereo samples
+        "vld2.16        {q5, q6}, [%[sN]]!            \n"// load 4 16-bits stereo samples
+        "vld1.32        {q8, q9}, [%[coefsP0]:128]!   \n"// load 8 32-bits coefs
+        "vld1.32        {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs
+        "vld1.32        {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs
+        "vld1.32        {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
+
+        "vsub.s32       q12, q12, q8                  \n"// interpolate (step1)
+        "vsub.s32       q13, q13, q9                  \n"// interpolate (step1)
+        "vsub.s32       q14, q14, q10                 \n"// interpolate (step1)
+        "vsub.s32       q15, q15, q11                 \n"// interpolate (step1)
+
+        "vqrdmulh.s32   q12, q12, d2[0]               \n"// interpolate (step2)
+        "vqrdmulh.s32   q13, q13, d2[0]               \n"// interpolate (step2)
+        "vqrdmulh.s32   q14, q14, d2[0]               \n"// interpolate (step2)
+        "vqrdmulh.s32   q15, q15, d2[0]               \n"// interpolate (step2)
+
+        "vadd.s32       q8, q8, q12                   \n"// interpolate (step3)
+        "vadd.s32       q9, q9, q13                   \n"// interpolate (step3)
+        "vadd.s32       q10, q10, q14                 \n"// interpolate (step3)
+        "vadd.s32       q11, q11, q15                 \n"// interpolate (step3)
+
+        "vrev64.16      q2, q2                        \n"// reverse 8 frames of the positive side
+        "vrev64.16      q3, q3                        \n"// reverse 8 frames of the positive side
+
+        "vshll.s16      q12,  d4, #15                 \n"// extend samples to 31 bits
+        "vshll.s16      q13,  d5, #15                 \n"// extend samples to 31 bits
+
+        "vshll.s16      q14,  d10, #15                \n"// extend samples to 31 bits
+        "vshll.s16      q15,  d11, #15                \n"// extend samples to 31 bits
+
+        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by interpolated coef
+
+        "vadd.s32       q0, q0, q12                   \n"// accumulate result
+        "vadd.s32       q13, q13, q14                 \n"// accumulate result
+        "vadd.s32       q0, q0, q15                   \n"// (+1) accumulate result
+        "vadd.s32       q0, q0, q13                   \n"// (+1) accumulate result
+
+        "vshll.s16      q12,  d6, #15                 \n"// extend samples to 31 bits
+        "vshll.s16      q13,  d7, #15                 \n"// extend samples to 31 bits
+
+        "vshll.s16      q14,  d12, #15                \n"// extend samples to 31 bits
+        "vshll.s16      q15,  d13, #15                \n"// extend samples to 31 bits
+
+        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by interpolated coef
+
+        "vadd.s32       q4, q4, q12                   \n"// accumulate result
+        "vadd.s32       q13, q13, q14                 \n"// accumulate result
+        "vadd.s32       q4, q4, q15                   \n"// (+1) accumulate result
+        "vadd.s32       q4, q4, q13                   \n"// (+1) accumulate result
+
+        "subs           %[count], %[count], #8        \n"// update loop counter
+        "sub            %[sP], %[sP], #32             \n"// move pointer to next set of samples
+
+        "bne            1b                            \n"// loop
+
+        ASSEMBLY_ACCUMULATE_STEREO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [coefsP1] "+r" (coefsP1),
+          [coefsN1] "+r" (coefsN1),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [lerpP]   "r" (lerpP),
+          [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q4", "q5", "q6",
+          "q8", "q9", "q10", "q11",
+          "q12", "q13", "q14", "q15"
+    );
+}
+
+template <>
+inline void ProcessL<1, 8>(int32_t* const out,
+        int count,
+        const int16_t* coefsP,
+        const int16_t* coefsN,
+        const int16_t* sP,
+        const int16_t* sN,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 1; // template specialization does not preserve params
+    const int STRIDE = 8;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "veor           q0, q0, q0               \n"// (0 - combines+) accumulator = 0
+
+        "1:                                      \n"
+
+        "vld1.16        {d4}, [%[sP]]            \n"// (2+0d) load 4 16-bits mono samples
+        "vld1.16        {d6}, [%[sN]]!           \n"// (2) load 4 16-bits mono samples
+        "vld1.16        {d16}, [%[coefsP0]:64]!  \n"// (1) load 4 16-bits coefs
+        "vld1.16        {d20}, [%[coefsN0]:64]!  \n"// (1) load 4 16-bits coefs
+
+        "vrev64.16      d4, d4                   \n"// (1) reversed s3, s2, s1, s0, s7, s6, s5, s4
+
+        // reordering the vmal to do d6, d7 before d4, d5 is slower(?)
+        "vmlal.s16      q0, d4, d16              \n"// (1) multiply (reversed)samples by coef
+        "vmlal.s16      q0, d6, d20              \n"// (1) multiply neg samples
+
+        // moving these ARM instructions before neon above seems to be slower
+        "subs           %[count], %[count], #4   \n"// (1) update loop counter
+        "sub            %[sP], %[sP], #8         \n"// (0) move pointer to next set of samples
+
+        // sP used after branch (warning)
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_MONO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q8", "q10"
+    );
+}
+
+template <>
+inline void ProcessL<2, 8>(int32_t* const out,
+        int count,
+        const int16_t* coefsP,
+        const int16_t* coefsN,
+        const int16_t* sP,
+        const int16_t* sN,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 2; // template specialization does not preserve params
+    const int STRIDE = 8;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "veor           q0, q0, q0               \n"// (1) acc_L = 0
+        "veor           q4, q4, q4               \n"// (0 combines+) acc_R = 0
+
+        "1:                                      \n"
+
+        "vld2.16        {d4, d5}, [%[sP]]        \n"// (2+0d) load 8 16-bits stereo samples
+        "vld2.16        {d6, d7}, [%[sN]]!       \n"// (2) load 8 16-bits stereo samples
+        "vld1.16        {d16}, [%[coefsP0]:64]!  \n"// (1) load 8 16-bits coefs
+        "vld1.16        {d20}, [%[coefsN0]:64]!  \n"// (1) load 8 16-bits coefs
+
+        "vrev64.16      q2, q2                   \n"// (1) reverse 8 frames of the left positive
+
+        "vmlal.s16      q0, d4, d16              \n"// (1) multiply (reversed) samples left
+        "vmlal.s16      q4, d5, d16              \n"// (1) multiply (reversed) samples right
+        "vmlal.s16      q0, d6, d20              \n"// (1) multiply samples left
+        "vmlal.s16      q4, d7, d20              \n"// (1) multiply samples right
+
+        // moving these ARM before neon seems to be slower
+        "subs           %[count], %[count], #4   \n"// (1) update loop counter
+        "sub            %[sP], %[sP], #16        \n"// (0) move pointer to next set of samples
+
+        // sP used after branch (warning)
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_STEREO
+
+        : [out] "=Uv" (out[0]),
+          [count] "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [sP] "+r" (sP),
+          [sN] "+r" (sN)
+        : [vLR] "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q4", "q5", "q6",
+          "q8", "q10"
+     );
+}
+
+template <>
+inline void Process<1, 8>(int32_t* const out,
+        int count,
+        const int16_t* coefsP,
+        const int16_t* coefsN,
+        const int16_t* coefsP1,
+        const int16_t* coefsN1,
+        const int16_t* sP,
+        const int16_t* sN,
+        uint32_t lerpP,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 1; // template specialization does not preserve params
+    const int STRIDE = 8;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "vmov.32        d2[0], %[lerpP]          \n"// load the positive phase S32 Q15
+        "veor           q0, q0, q0               \n"// (0 - combines+) accumulator = 0
+
+        "1:                                      \n"
+
+        "vld1.16        {d4}, [%[sP]]            \n"// (2+0d) load 4 16-bits mono samples
+        "vld1.16        {d6}, [%[sN]]!           \n"// (2) load 4 16-bits mono samples
+        "vld1.16        {d16}, [%[coefsP0]:64]!  \n"// (1) load 4 16-bits coefs
+        "vld1.16        {d17}, [%[coefsP1]:64]!  \n"// (1) load 4 16-bits coefs for interpolation
+        "vld1.16        {d20}, [%[coefsN1]:64]!  \n"// (1) load 4 16-bits coefs
+        "vld1.16        {d21}, [%[coefsN0]:64]!  \n"// (1) load 4 16-bits coefs for interpolation
+
+        "vsub.s16       d17, d17, d16            \n"// (1) interpolate (step1) 1st set of coefs
+        "vsub.s16       d21, d21, d20            \n"// (1) interpolate (step1) 2nd set of coets
+
+        "vqrdmulh.s16   d17, d17, d2[0]          \n"// (2) interpolate (step2) 1st set of coefs
+        "vqrdmulh.s16   d21, d21, d2[0]          \n"// (2) interpolate (step2) 2nd set of coefs
+
+        "vrev64.16      d4, d4                   \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4
+
+        "vadd.s16       d16, d16, d17            \n"// (1+2d) interpolate (step3) 1st set
+        "vadd.s16       d20, d20, d21            \n"// (1+1d) interpolate (step3) 2nd set
+
+        // reordering the vmal to do d6, d7 before d4, d5 is slower(?)
+        "vmlal.s16      q0, d4, d16              \n"// (1+0d) multiply (reversed)by coef
+        "vmlal.s16      q0, d6, d20              \n"// (1) multiply neg samples
+
+        // moving these ARM instructions before neon above seems to be slower
+        "subs           %[count], %[count], #4   \n"// (1) update loop counter
+        "sub            %[sP], %[sP], #8        \n"// move pointer to next set of samples
+
+        // sP used after branch (warning)
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_MONO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [coefsP1] "+r" (coefsP1),
+          [coefsN1] "+r" (coefsN1),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [lerpP]   "r" (lerpP),
+          [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q8", "q9", "q10", "q11"
+    );
+}
+
+template <>
+inline void Process<2, 8>(int32_t* const out,
+        int count,
+        const int16_t* coefsP,
+        const int16_t* coefsN,
+        const int16_t* coefsP1,
+        const int16_t* coefsN1,
+        const int16_t* sP,
+        const int16_t* sN,
+        uint32_t lerpP,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 2; // template specialization does not preserve params
+    const int STRIDE = 8;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "vmov.32        d2[0], %[lerpP]          \n"// load the positive phase
+        "veor           q0, q0, q0               \n"// (1) acc_L = 0
+        "veor           q4, q4, q4               \n"// (0 combines+) acc_R = 0
+
+        "1:                                      \n"
+
+        "vld2.16        {d4, d5}, [%[sP]]        \n"// (3+0d) load 8 16-bits stereo samples
+        "vld2.16        {d6, d7}, [%[sN]]!       \n"// (3) load 8 16-bits stereo samples
+        "vld1.16        {d16}, [%[coefsP0]:64]!  \n"// (1) load 8 16-bits coefs
+        "vld1.16        {d17}, [%[coefsP1]:64]!  \n"// (1) load 8 16-bits coefs for interpolation
+        "vld1.16        {d20}, [%[coefsN1]:64]!  \n"// (1) load 8 16-bits coefs
+        "vld1.16        {d21}, [%[coefsN0]:64]!  \n"// (1) load 8 16-bits coefs for interpolation
+
+        "vsub.s16       d17, d17, d16            \n"// (1) interpolate (step1) 1st set of coefs
+        "vsub.s16       d21, d21, d20            \n"// (1) interpolate (step1) 2nd set of coets
+
+        "vqrdmulh.s16   d17, d17, d2[0]          \n"// (2) interpolate (step2) 1st set of coefs
+        "vqrdmulh.s16   d21, d21, d2[0]          \n"// (2) interpolate (step2) 2nd set of coefs
+
+        "vrev64.16      q2, q2                   \n"// (1) reverse 8 frames of the left positive
+
+        "vadd.s16       d16, d16, d17            \n"// (1+1d) interpolate (step3) 1st set
+        "vadd.s16       d20, d20, d21            \n"// (1+1d) interpolate (step3) 2nd set
+
+        "vmlal.s16      q0, d4, d16              \n"// (1) multiply (reversed) samples left
+        "vmlal.s16      q4, d5, d16              \n"// (1) multiply (reversed) samples right
+        "vmlal.s16      q0, d6, d20              \n"// (1) multiply samples left
+        "vmlal.s16      q4, d7, d20              \n"// (1) multiply samples right
+
+        // moving these ARM before neon seems to be slower
+        "subs           %[count], %[count], #4   \n"// (1) update loop counter
+        "sub            %[sP], %[sP], #16        \n"// move pointer to next set of samples
+
+        // sP used after branch (warning)
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_STEREO
+
+        : [out] "=Uv" (out[0]),
+          [count] "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [coefsP1] "+r" (coefsP1),
+          [coefsN1] "+r" (coefsN1),
+          [sP] "+r" (sP),
+          [sN] "+r" (sN)
+        : [lerpP]   "r" (lerpP),
+          [vLR] "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q4", "q5", "q6",
+          "q8", "q9", "q10", "q11"
+    );
+}
+
+template <>
+inline void ProcessL<1, 8>(int32_t* const out,
+        int count,
+        const int32_t* coefsP,
+        const int32_t* coefsN,
+        const int16_t* sP,
+        const int16_t* sN,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 1; // template specialization does not preserve params
+    const int STRIDE = 8;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "veor           q0, q0, q0               \n"// result, initialize to 0
+
+        "1:                                      \n"
+
+        "vld1.16        {d4}, [%[sP]]            \n"// load 4 16-bits mono samples
+        "vld1.16        {d6}, [%[sN]]!           \n"// load 4 16-bits mono samples
+        "vld1.32        {q8}, [%[coefsP0]:128]!  \n"// load 4 32-bits coefs
+        "vld1.32        {q10}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs
+
+        "vrev64.16      d4, d4                   \n"// reverse 2 frames of the positive side
+
+        "vshll.s16      q12, d4, #15             \n"// (stall) extend samples to 31 bits
+        "vshll.s16      q14, d6, #15             \n"// extend samples to 31 bits
+
+        "vqrdmulh.s32   q12, q12, q8             \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q14, q14, q10            \n"// multiply samples by interpolated coef
+
+        "vadd.s32       q0, q0, q12              \n"// accumulate result
+        "vadd.s32       q0, q0, q14              \n"// (stall) accumulate result
+
+        "subs           %[count], %[count], #4   \n"// update loop counter
+        "sub            %[sP], %[sP], #8         \n"// move pointer to next set of samples
+
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_MONO
+
+        : [out] "=Uv" (out[0]),
+          [count] "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [sP] "+r" (sP),
+          [sN] "+r" (sN)
+        : [vLR] "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q8", "q9", "q10", "q11",
+          "q12", "q14"
+    );
+}
+
+template <>
+inline void ProcessL<2, 8>(int32_t* const out,
+        int count,
+        const int32_t* coefsP,
+        const int32_t* coefsN,
+        const int16_t* sP,
+        const int16_t* sN,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 2; // template specialization does not preserve params
+    const int STRIDE = 8;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "veor           q0, q0, q0               \n"// result, initialize to 0
+        "veor           q4, q4, q4               \n"// result, initialize to 0
+
+        "1:                                      \n"
+
+        "vld2.16        {d4, d5}, [%[sP]]        \n"// load 4 16-bits stereo samples
+        "vld2.16        {d6, d7}, [%[sN]]!       \n"// load 4 16-bits stereo samples
+        "vld1.32        {q8}, [%[coefsP0]:128]!  \n"// load 4 32-bits coefs
+        "vld1.32        {q10}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs
+
+        "vrev64.16      q2, q2                   \n"// reverse 2 frames of the positive side
+
+        "vshll.s16      q12, d4, #15             \n"// extend samples to 31 bits
+        "vshll.s16      q13, d5, #15             \n"// extend samples to 31 bits
+
+        "vshll.s16      q14, d6, #15             \n"// extend samples to 31 bits
+        "vshll.s16      q15, d7, #15             \n"// extend samples to 31 bits
+
+        "vqrdmulh.s32   q12, q12, q8             \n"// multiply samples by coef
+        "vqrdmulh.s32   q13, q13, q8             \n"// multiply samples by coef
+        "vqrdmulh.s32   q14, q14, q10            \n"// multiply samples by coef
+        "vqrdmulh.s32   q15, q15, q10            \n"// multiply samples by coef
+
+        "vadd.s32       q0, q0, q12              \n"// accumulate result
+        "vadd.s32       q4, q4, q13              \n"// accumulate result
+        "vadd.s32       q0, q0, q14              \n"// accumulate result
+        "vadd.s32       q4, q4, q15              \n"// accumulate result
+
+        "subs           %[count], %[count], #4   \n"// update loop counter
+        "sub            %[sP], %[sP], #16        \n"// move pointer to next set of samples
+
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_STEREO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsN0] "+r" (coefsN),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3", "q4",
+          "q8", "q9", "q10", "q11",
+          "q12", "q13", "q14", "q15"
+    );
+}
+
+template <>
+inline void Process<1, 8>(int32_t* const out,
+        int count,
+        const int32_t* coefsP,
+        const int32_t* coefsN,
+        const int32_t* coefsP1,
+        const int32_t* coefsN1,
+        const int16_t* sP,
+        const int16_t* sN,
+        uint32_t lerpP,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 1; // template specialization does not preserve params
+    const int STRIDE = 8;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "vmov.32        d2[0], %[lerpP]          \n"// load the positive phase
+        "veor           q0, q0, q0               \n"// result, initialize to 0
+
+        "1:                                      \n"
+
+        "vld1.16        {d4}, [%[sP]]            \n"// load 4 16-bits mono samples
+        "vld1.16        {d6}, [%[sN]]!           \n"// load 4 16-bits mono samples
+        "vld1.32        {q8}, [%[coefsP0]:128]!  \n"// load 4 32-bits coefs
+        "vld1.32        {q9}, [%[coefsP1]:128]!  \n"// load 4 32-bits coefs for interpolation
+        "vld1.32        {q10}, [%[coefsN1]:128]! \n"// load 4 32-bits coefs
+        "vld1.32        {q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs for interpolation
+
+        "vrev64.16      d4, d4                   \n"// reverse 2 frames of the positive side
+
+        "vsub.s32       q9, q9, q8               \n"// interpolate (step1) 1st set of coefs
+        "vsub.s32       q11, q11, q10            \n"// interpolate (step1) 2nd set of coets
+        "vshll.s16      q12, d4, #15             \n"// extend samples to 31 bits
+
+        "vqrdmulh.s32   q9, q9, d2[0]            \n"// interpolate (step2) 1st set of coefs
+        "vqrdmulh.s32   q11, q11, d2[0]          \n"// interpolate (step2) 2nd set of coefs
+        "vshll.s16      q14, d6, #15             \n"// extend samples to 31 bits
+
+        "vadd.s32       q8, q8, q9               \n"// interpolate (step3) 1st set
+        "vadd.s32       q10, q10, q11            \n"// interpolate (step4) 2nd set
+
+        "vqrdmulh.s32   q12, q12, q8             \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q14, q14, q10            \n"// multiply samples by interpolated coef
+
+        "vadd.s32       q0, q0, q12              \n"// accumulate result
+        "vadd.s32       q0, q0, q14              \n"// accumulate result
+
+        "subs           %[count], %[count], #4   \n"// update loop counter
+        "sub            %[sP], %[sP], #8         \n"// move pointer to next set of samples
+
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_MONO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsP1] "+r" (coefsP1),
+          [coefsN0] "+r" (coefsN),
+          [coefsN1] "+r" (coefsN1),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [lerpP]   "r" (lerpP),
+          [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3",
+          "q8", "q9", "q10", "q11",
+          "q12", "q14"
+    );
+}
+
+template <>
+inline
+void Process<2, 8>(int32_t* const out,
+        int count,
+        const int32_t* coefsP,
+        const int32_t* coefsN,
+        const int32_t* coefsP1,
+        const int32_t* coefsN1,
+        const int16_t* sP,
+        const int16_t* sN,
+        uint32_t lerpP,
+        const int32_t* const volumeLR)
+{
+    const int CHANNELS = 2; // template specialization does not preserve params
+    const int STRIDE = 8;
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    asm (
+        "vmov.32        d2[0], %[lerpP]          \n"// load the positive phase
+        "veor           q0, q0, q0               \n"// result, initialize to 0
+        "veor           q4, q4, q4               \n"// result, initialize to 0
+
+        "1:                                      \n"
+        "vld2.16        {d4, d5}, [%[sP]]        \n"// load 4 16-bits stereo samples
+        "vld2.16        {d6, d7}, [%[sN]]!       \n"// load 4 16-bits stereo samples
+        "vld1.32        {q8}, [%[coefsP0]:128]!  \n"// load 4 32-bits coefs
+        "vld1.32        {q9}, [%[coefsP1]:128]!  \n"// load 4 32-bits coefs for interpolation
+        "vld1.32        {q10}, [%[coefsN1]:128]! \n"// load 4 32-bits coefs
+        "vld1.32        {q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs for interpolation
+
+        "vrev64.16      q2, q2                   \n"// (reversed) 2 frames of the positive side
+
+        "vsub.s32       q9, q9, q8               \n"// interpolate (step1) 1st set of coefs
+        "vsub.s32       q11, q11, q10            \n"// interpolate (step1) 2nd set of coets
+        "vshll.s16      q12, d4, #15             \n"// extend samples to 31 bits
+        "vshll.s16      q13, d5, #15             \n"// extend samples to 31 bits
+
+        "vqrdmulh.s32   q9, q9, d2[0]            \n"// interpolate (step2) 1st set of coefs
+        "vqrdmulh.s32   q11, q11, d2[1]          \n"// interpolate (step3) 2nd set of coefs
+        "vshll.s16      q14, d6, #15             \n"// extend samples to 31 bits
+        "vshll.s16      q15, d7, #15             \n"// extend samples to 31 bits
+
+        "vadd.s32       q8, q8, q9               \n"// interpolate (step3) 1st set
+        "vadd.s32       q10, q10, q11            \n"// interpolate (step4) 2nd set
+
+        "vqrdmulh.s32   q12, q12, q8             \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q13, q13, q8             \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q14, q14, q10            \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q15, q15, q10            \n"// multiply samples by interpolated coef
+
+        "vadd.s32       q0, q0, q12              \n"// accumulate result
+        "vadd.s32       q4, q4, q13              \n"// accumulate result
+        "vadd.s32       q0, q0, q14              \n"// accumulate result
+        "vadd.s32       q4, q4, q15              \n"// accumulate result
+
+        "subs           %[count], %[count], #4   \n"// update loop counter
+        "sub            %[sP], %[sP], #16        \n"// move pointer to next set of samples
+
+        "bne            1b                       \n"// loop
+
+        ASSEMBLY_ACCUMULATE_STEREO
+
+        : [out]     "=Uv" (out[0]),
+          [count]   "+r" (count),
+          [coefsP0] "+r" (coefsP),
+          [coefsP1] "+r" (coefsP1),
+          [coefsN0] "+r" (coefsN),
+          [coefsN1] "+r" (coefsN1),
+          [sP]      "+r" (sP),
+          [sN]      "+r" (sN)
+        : [lerpP]   "r" (lerpP),
+          [vLR]     "r" (volumeLR)
+        : "cc", "memory",
+          "q0", "q1", "q2", "q3", "q4",
+          "q8", "q9", "q10", "q11",
+          "q12", "q13", "q14", "q15"
+    );
+}
+
+#endif //USE_NEON
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H*/
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index e50b192..35553ef 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -543,7 +543,7 @@
     uint32_t phaseIncrement = mPhaseIncrement;
     size_t outputIndex = 0;
     size_t outputSampleCount = outFrameCount * 2;
-    size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate;
+    size_t inFrameCount = getInFrameCountRequired(outFrameCount);
 
     while (outputIndex < outputSampleCount) {
         // buffer is empty, fetch a new one
diff --git a/services/audioflinger/Configuration.h b/services/audioflinger/Configuration.h
index 0754d9d..6a8aeb1 100644
--- a/services/audioflinger/Configuration.h
+++ b/services/audioflinger/Configuration.h
@@ -31,6 +31,7 @@
 
 // uncomment to enable fast mixer to take performance samples for later statistical analysis
 #define FAST_MIXER_STATISTICS
+// FIXME rename to FAST_THREAD_STATISTICS
 
 // uncomment for debugging timing problems related to StateQueue::push()
 //#define STATE_QUEUE_DUMP
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 010e233..29b56db 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -116,8 +116,9 @@
             continue;
         }
         // first non destroyed handle is considered in control
-        if (controlHandle == NULL)
+        if (controlHandle == NULL) {
             controlHandle = h;
+        }
         if (h->priority() <= priority) {
             break;
         }
@@ -804,7 +805,112 @@
     return mOffloaded;
 }
 
-void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args)
+String8 effectFlagsToString(uint32_t flags) {
+    String8 s;
+
+    s.append("conn. mode: ");
+    switch (flags & EFFECT_FLAG_TYPE_MASK) {
+    case EFFECT_FLAG_TYPE_INSERT: s.append("insert"); break;
+    case EFFECT_FLAG_TYPE_AUXILIARY: s.append("auxiliary"); break;
+    case EFFECT_FLAG_TYPE_REPLACE: s.append("replace"); break;
+    case EFFECT_FLAG_TYPE_PRE_PROC: s.append("preproc"); break;
+    case EFFECT_FLAG_TYPE_POST_PROC: s.append("postproc"); break;
+    default: s.append("unknown/reserved"); break;
+    }
+    s.append(", ");
+
+    s.append("insert pref: ");
+    switch (flags & EFFECT_FLAG_INSERT_MASK) {
+    case EFFECT_FLAG_INSERT_ANY: s.append("any"); break;
+    case EFFECT_FLAG_INSERT_FIRST: s.append("first"); break;
+    case EFFECT_FLAG_INSERT_LAST: s.append("last"); break;
+    case EFFECT_FLAG_INSERT_EXCLUSIVE: s.append("exclusive"); break;
+    default: s.append("unknown/reserved"); break;
+    }
+    s.append(", ");
+
+    s.append("volume mgmt: ");
+    switch (flags & EFFECT_FLAG_VOLUME_MASK) {
+    case EFFECT_FLAG_VOLUME_NONE: s.append("none"); break;
+    case EFFECT_FLAG_VOLUME_CTRL: s.append("implements control"); break;
+    case EFFECT_FLAG_VOLUME_IND: s.append("requires indication"); break;
+    default: s.append("unknown/reserved"); break;
+    }
+    s.append(", ");
+
+    uint32_t devind = flags & EFFECT_FLAG_DEVICE_MASK;
+    if (devind) {
+        s.append("device indication: ");
+        switch (devind) {
+        case EFFECT_FLAG_DEVICE_IND: s.append("requires updates"); break;
+        default: s.append("unknown/reserved"); break;
+        }
+        s.append(", ");
+    }
+
+    s.append("input mode: ");
+    switch (flags & EFFECT_FLAG_INPUT_MASK) {
+    case EFFECT_FLAG_INPUT_DIRECT: s.append("direct"); break;
+    case EFFECT_FLAG_INPUT_PROVIDER: s.append("provider"); break;
+    case EFFECT_FLAG_INPUT_BOTH: s.append("direct+provider"); break;
+    default: s.append("not set"); break;
+    }
+    s.append(", ");
+
+    s.append("output mode: ");
+    switch (flags & EFFECT_FLAG_OUTPUT_MASK) {
+    case EFFECT_FLAG_OUTPUT_DIRECT: s.append("direct"); break;
+    case EFFECT_FLAG_OUTPUT_PROVIDER: s.append("provider"); break;
+    case EFFECT_FLAG_OUTPUT_BOTH: s.append("direct+provider"); break;
+    default: s.append("not set"); break;
+    }
+    s.append(", ");
+
+    uint32_t accel = flags & EFFECT_FLAG_HW_ACC_MASK;
+    if (accel) {
+        s.append("hardware acceleration: ");
+        switch (accel) {
+        case EFFECT_FLAG_HW_ACC_SIMPLE: s.append("non-tunneled"); break;
+        case EFFECT_FLAG_HW_ACC_TUNNEL: s.append("tunneled"); break;
+        default: s.append("unknown/reserved"); break;
+        }
+        s.append(", ");
+    }
+
+    uint32_t modeind = flags & EFFECT_FLAG_AUDIO_MODE_MASK;
+    if (modeind) {
+        s.append("mode indication: ");
+        switch (modeind) {
+        case EFFECT_FLAG_AUDIO_MODE_IND: s.append("required"); break;
+        default: s.append("unknown/reserved"); break;
+        }
+        s.append(", ");
+    }
+
+    uint32_t srcind = flags & EFFECT_FLAG_AUDIO_SOURCE_MASK;
+    if (srcind) {
+        s.append("source indication: ");
+        switch (srcind) {
+        case EFFECT_FLAG_AUDIO_SOURCE_IND: s.append("required"); break;
+        default: s.append("unknown/reserved"); break;
+        }
+        s.append(", ");
+    }
+
+    if (flags & EFFECT_FLAG_OFFLOAD_MASK) {
+        s.append("offloadable, ");
+    }
+
+    int len = s.length();
+    if (s.length() > 2) {
+        char *str = s.lockBuffer(len);
+        s.unlockBuffer(len - 2);
+    }
+    return s;
+}
+
+
+void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args __unused)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
@@ -838,9 +944,10 @@
                     mDescriptor.type.node[2],
                 mDescriptor.type.node[3],mDescriptor.type.node[4],mDescriptor.type.node[5]);
     result.append(buffer);
-    snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X\n",
+    snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X (%s)\n",
             mDescriptor.apiVersion,
-            mDescriptor.flags);
+            mDescriptor.flags,
+            effectFlagsToString(mDescriptor.flags).string());
     result.append(buffer);
     snprintf(buffer, SIZE, "\t\t- name: %s\n",
             mDescriptor.name);
@@ -851,37 +958,37 @@
 
     result.append("\t\t- Input configuration:\n");
     result.append("\t\t\tFrames  Smp rate Channels Format Buffer\n");
-    snprintf(buffer, SIZE, "\t\t\t%05zu   %05d    %08x %6d %p\n",
+    snprintf(buffer, SIZE, "\t\t\t%05zu   %05d    %08x %6d (%s) %p\n",
             mConfig.inputCfg.buffer.frameCount,
             mConfig.inputCfg.samplingRate,
             mConfig.inputCfg.channels,
             mConfig.inputCfg.format,
+            formatToString((audio_format_t)mConfig.inputCfg.format),
             mConfig.inputCfg.buffer.raw);
     result.append(buffer);
 
     result.append("\t\t- Output configuration:\n");
     result.append("\t\t\tBuffer     Frames  Smp rate Channels Format\n");
-    snprintf(buffer, SIZE, "\t\t\t%p %05zu   %05d    %08x %d\n",
+    snprintf(buffer, SIZE, "\t\t\t%p %05zu   %05d    %08x %d (%s)\n",
             mConfig.outputCfg.buffer.raw,
             mConfig.outputCfg.buffer.frameCount,
             mConfig.outputCfg.samplingRate,
             mConfig.outputCfg.channels,
-            mConfig.outputCfg.format);
+            mConfig.outputCfg.format,
+            formatToString((audio_format_t)mConfig.outputCfg.format));
     result.append(buffer);
 
     snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size());
     result.append(buffer);
-    result.append("\t\t\tPid   Priority Ctrl Locked client server\n");
+    result.append("\t\t\t  Pid Priority Ctrl Locked client server\n");
     for (size_t i = 0; i < mHandles.size(); ++i) {
         EffectHandle *handle = mHandles[i];
         if (handle != NULL && !handle->destroyed_l()) {
-            handle->dump(buffer, SIZE);
+            handle->dumpToBuffer(buffer, SIZE);
             result.append(buffer);
         }
     }
 
-    result.append("\n");
-
     write(fd, result.string(), result.length());
 
     if (locked) {
@@ -911,18 +1018,15 @@
     }
     int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
     mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset);
-    if (mCblkMemory != 0) {
-        mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer());
-
-        if (mCblk != NULL) {
-            new(mCblk) effect_param_cblk_t();
-            mBuffer = (uint8_t *)mCblk + bufOffset;
-        }
-    } else {
+    if (mCblkMemory == 0 ||
+            (mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer())) == NULL) {
         ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE +
                 sizeof(effect_param_cblk_t));
+        mCblkMemory.clear();
         return;
     }
+    new(mCblk) effect_param_cblk_t();
+    mBuffer = (uint8_t *)mCblk + bufOffset;
 }
 
 AudioFlinger::EffectHandle::~EffectHandle()
@@ -939,6 +1043,11 @@
     disconnect(false);
 }
 
+status_t AudioFlinger::EffectHandle::initCheck()
+{
+    return mClient == 0 || mCblkMemory != 0 ? OK : NO_MEMORY;
+}
+
 status_t AudioFlinger::EffectHandle::enable()
 {
     ALOGV("enable %p", this);
@@ -1179,15 +1288,15 @@
 }
 
 
-void AudioFlinger::EffectHandle::dump(char* buffer, size_t size)
+void AudioFlinger::EffectHandle::dumpToBuffer(char* buffer, size_t size)
 {
     bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock);
 
-    snprintf(buffer, size, "\t\t\t%05d %05d    %01u    %01u      %05u  %05u\n",
+    snprintf(buffer, size, "\t\t\t%5d    %5d  %3s    %3s  %5u  %5u\n",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mPriority,
-            mHasControl,
-            !locked,
+            mHasControl ? "yes" : "no",
+            locked ? "yes" : "no",
             mCblk ? mCblk->clientIndex : 0,
             mCblk ? mCblk->serverIndex : 0
             );
@@ -1568,33 +1677,35 @@
     char buffer[SIZE];
     String8 result;
 
-    snprintf(buffer, SIZE, "Effects for session %d:\n", mSessionId);
+    size_t numEffects = mEffects.size();
+    snprintf(buffer, SIZE, "    %d effects for session %d\n", numEffects, mSessionId);
     result.append(buffer);
 
-    bool locked = AudioFlinger::dumpTryLock(mLock);
-    // failed to lock - AudioFlinger is probably deadlocked
-    if (!locked) {
-        result.append("\tCould not lock mutex:\n");
-    }
-
-    result.append("\tNum fx In buffer   Out buffer   Active tracks:\n");
-    snprintf(buffer, SIZE, "\t%02zu     %p  %p   %d\n",
-            mEffects.size(),
-            mInBuffer,
-            mOutBuffer,
-            mActiveTrackCnt);
-    result.append(buffer);
-    write(fd, result.string(), result.size());
-
-    for (size_t i = 0; i < mEffects.size(); ++i) {
-        sp<EffectModule> effect = mEffects[i];
-        if (effect != 0) {
-            effect->dump(fd, args);
+    if (numEffects) {
+        bool locked = AudioFlinger::dumpTryLock(mLock);
+        // failed to lock - AudioFlinger is probably deadlocked
+        if (!locked) {
+            result.append("\tCould not lock mutex:\n");
         }
-    }
 
-    if (locked) {
-        mLock.unlock();
+        result.append("\tIn buffer   Out buffer   Active tracks:\n");
+        snprintf(buffer, SIZE, "\t%p  %p   %d\n",
+                mInBuffer,
+                mOutBuffer,
+                mActiveTrackCnt);
+        result.append(buffer);
+        write(fd, result.string(), result.size());
+
+        for (size_t i = 0; i < numEffects; ++i) {
+            sp<EffectModule> effect = mEffects[i];
+            if (effect != 0) {
+                effect->dump(fd, args);
+            }
+        }
+
+        if (locked) {
+            mLock.unlock();
+        }
     }
 }
 
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index b717857..ccc4825 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -169,6 +169,7 @@
             const sp<IEffectClient>& effectClient,
             int32_t priority);
     virtual ~EffectHandle();
+    virtual status_t initCheck();
 
     // IEffect
     virtual status_t enable();
@@ -208,7 +209,7 @@
     // destroyed_l() must be called with the associated EffectModule mLock held
     bool destroyed_l() const { return mDestroyed; }
 
-    void dump(char* buffer, size_t size);
+    void dumpToBuffer(char* buffer, size_t size);
 
 protected:
     friend class AudioFlinger;          // for mEffect, mHasControl, mEnabled
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 85d637e..5cb42cc 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -40,617 +40,385 @@
 #include "AudioMixer.h"
 #include "FastMixer.h"
 
-#define FAST_HOT_IDLE_NS     1000000L   // 1 ms: time to sleep while hot idling
-#define FAST_DEFAULT_NS    999999999L   // ~1 sec: default time to sleep
-#define MIN_WARMUP_CYCLES          2    // minimum number of loop cycles to wait for warmup
-#define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
-
 #define FCC_2                       2   // fixed channel count assumption
 
 namespace android {
 
-// Fast mixer thread
-bool FastMixer::threadLoop()
+/*static*/ const FastMixerState FastMixer::initial;
+
+FastMixer::FastMixer() : FastThread(),
+    slopNs(0),
+    // fastTrackNames
+    // generations
+    outputSink(NULL),
+    outputSinkGen(0),
+    mixer(NULL),
+    mixBuffer(NULL),
+    mixBufferState(UNDEFINED),
+    format(Format_Invalid),
+    sampleRate(0),
+    fastTracksGen(0),
+    totalNativeFramesWritten(0),
+    // timestamp
+    nativeFramesWrittenButNotPresented(0)   // the = 0 is to silence the compiler
 {
-    static const FastMixerState initial;
-    const FastMixerState *previous = &initial, *current = &initial;
-    FastMixerState preIdle; // copy of state before we went into idle
-    struct timespec oldTs = {0, 0};
-    bool oldTsValid = false;
-    long slopNs = 0;    // accumulated time we've woken up too early (> 0) or too late (< 0)
-    long sleepNs = -1;  // -1: busy wait, 0: sched_yield, > 0: nanosleep
-    int fastTrackNames[FastMixerState::kMaxFastTracks]; // handles used by mixer to identify tracks
-    int generations[FastMixerState::kMaxFastTracks];    // last observed mFastTracks[i].mGeneration
+    // FIXME pass initial as parameter to base class constructor, and make it static local
+    previous = &initial;
+    current = &initial;
+
+    mDummyDumpState = &dummyDumpState;
+
     unsigned i;
     for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) {
         fastTrackNames[i] = -1;
         generations[i] = 0;
     }
-    NBAIO_Sink *outputSink = NULL;
-    int outputSinkGen = 0;
-    AudioMixer* mixer = NULL;
-    short *mixBuffer = NULL;
-    enum {UNDEFINED, MIXED, ZEROED} mixBufferState = UNDEFINED;
-    NBAIO_Format format = Format_Invalid;
-    unsigned sampleRate = 0;
-    int fastTracksGen = 0;
-    long periodNs = 0;      // expected period; the time required to render one mix buffer
-    long underrunNs = 0;    // underrun likely when write cycle is greater than this value
-    long overrunNs = 0;     // overrun likely when write cycle is less than this value
-    long forceNs = 0;       // if overrun detected, force the write cycle to take this much time
-    long warmupNs = 0;      // warmup complete when write cycle is greater than to this value
-    FastMixerDumpState dummyDumpState, *dumpState = &dummyDumpState;
-    bool ignoreNextOverrun = true;  // used to ignore initial overrun and first after an underrun
 #ifdef FAST_MIXER_STATISTICS
-    struct timespec oldLoad = {0, 0};    // previous value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
-    bool oldLoadValid = false;  // whether oldLoad is valid
-    uint32_t bounds = 0;
-    bool full = false;      // whether we have collected at least mSamplingN samples
-#ifdef CPU_FREQUENCY_STATISTICS
-    ThreadCpuUsage tcu;     // for reading the current CPU clock frequency in kHz
+    oldLoad.tv_sec = 0;
+    oldLoad.tv_nsec = 0;
 #endif
+}
+
+FastMixer::~FastMixer()
+{
+}
+
+FastMixerStateQueue* FastMixer::sq()
+{
+    return &mSQ;
+}
+
+const FastThreadState *FastMixer::poll()
+{
+    return mSQ.poll();
+}
+
+void FastMixer::setLog(NBLog::Writer *logWriter)
+{
+    if (mixer != NULL) {
+        mixer->setLog(logWriter);
+    }
+}
+
+void FastMixer::onIdle()
+{
+    preIdle = *(const FastMixerState *)current;
+    current = &preIdle;
+}
+
+void FastMixer::onExit()
+{
+    delete mixer;
+    delete[] mixBuffer;
+}
+
+bool FastMixer::isSubClassCommand(FastThreadState::Command command)
+{
+    switch ((FastMixerState::Command) command) {
+    case FastMixerState::MIX:
+    case FastMixerState::WRITE:
+    case FastMixerState::MIX_WRITE:
+        return true;
+    default:
+        return false;
+    }
+}
+
+void FastMixer::onStateChange()
+{
+    const FastMixerState * const current = (const FastMixerState *) this->current;
+    const FastMixerState * const previous = (const FastMixerState *) this->previous;
+    FastMixerDumpState * const dumpState = (FastMixerDumpState *) this->dumpState;
+    const size_t frameCount = current->mFrameCount;
+
+    // handle state change here, but since we want to diff the state,
+    // we're prepared for previous == &initial the first time through
+    unsigned previousTrackMask;
+
+    // check for change in output HAL configuration
+    NBAIO_Format previousFormat = format;
+    if (current->mOutputSinkGen != outputSinkGen) {
+        outputSink = current->mOutputSink;
+        outputSinkGen = current->mOutputSinkGen;
+        if (outputSink == NULL) {
+            format = Format_Invalid;
+            sampleRate = 0;
+        } else {
+            format = outputSink->format();
+            sampleRate = Format_sampleRate(format);
+            ALOG_ASSERT(Format_channelCount(format) == FCC_2);
+        }
+        dumpState->mSampleRate = sampleRate;
+    }
+
+    if ((!Format_isEqual(format, previousFormat)) || (frameCount != previous->mFrameCount)) {
+        // FIXME to avoid priority inversion, don't delete here
+        delete mixer;
+        mixer = NULL;
+        delete[] mixBuffer;
+        mixBuffer = NULL;
+        if (frameCount > 0 && sampleRate > 0) {
+            // FIXME new may block for unbounded time at internal mutex of the heap
+            //       implementation; it would be better to have normal mixer allocate for us
+            //       to avoid blocking here and to prevent possible priority inversion
+            mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks);
+            mixBuffer = new short[frameCount * FCC_2];
+            periodNs = (frameCount * 1000000000LL) / sampleRate;    // 1.00
+            underrunNs = (frameCount * 1750000000LL) / sampleRate;  // 1.75
+            overrunNs = (frameCount * 500000000LL) / sampleRate;    // 0.50
+            forceNs = (frameCount * 950000000LL) / sampleRate;      // 0.95
+            warmupNs = (frameCount * 500000000LL) / sampleRate;     // 0.50
+        } else {
+            periodNs = 0;
+            underrunNs = 0;
+            overrunNs = 0;
+            forceNs = 0;
+            warmupNs = 0;
+        }
+        mixBufferState = UNDEFINED;
+#if !LOG_NDEBUG
+        for (unsigned i = 0; i < FastMixerState::kMaxFastTracks; ++i) {
+            fastTrackNames[i] = -1;
+        }
 #endif
-    unsigned coldGen = 0;   // last observed mColdGen
-    bool isWarm = false;    // true means ready to mix, false means wait for warmup before mixing
-    struct timespec measuredWarmupTs = {0, 0};  // how long did it take for warmup to complete
-    uint32_t warmupCycles = 0;  // counter of number of loop cycles required to warmup
-    NBAIO_Sink* teeSink = NULL; // if non-NULL, then duplicate write() to this non-blocking sink
-    NBLog::Writer dummyLogWriter, *logWriter = &dummyLogWriter;
-    uint32_t totalNativeFramesWritten = 0;  // copied to dumpState->mFramesWritten
+        // we need to reconfigure all active tracks
+        previousTrackMask = 0;
+        fastTracksGen = current->mFastTracksGen - 1;
+        dumpState->mFrameCount = frameCount;
+    } else {
+        previousTrackMask = previous->mTrackMask;
+    }
 
-    // next 2 fields are valid only when timestampStatus == NO_ERROR
-    AudioTimestamp timestamp;
-    uint32_t nativeFramesWrittenButNotPresented = 0;    // the = 0 is to silence the compiler
-    status_t timestampStatus = INVALID_OPERATION;
+    // check for change in active track set
+    const unsigned currentTrackMask = current->mTrackMask;
+    dumpState->mTrackMask = currentTrackMask;
+    if (current->mFastTracksGen != fastTracksGen) {
+        ALOG_ASSERT(mixBuffer != NULL);
+        int name;
 
-    for (;;) {
-
-        // either nanosleep, sched_yield, or busy wait
-        if (sleepNs >= 0) {
-            if (sleepNs > 0) {
-                ALOG_ASSERT(sleepNs < 1000000000);
-                const struct timespec req = {0, sleepNs};
-                nanosleep(&req, NULL);
-            } else {
-                sched_yield();
-            }
-        }
-        // default to long sleep for next cycle
-        sleepNs = FAST_DEFAULT_NS;
-
-        // poll for state change
-        const FastMixerState *next = mSQ.poll();
-        if (next == NULL) {
-            // continue to use the default initial state until a real state is available
-            ALOG_ASSERT(current == &initial && previous == &initial);
-            next = current;
-        }
-
-        FastMixerState::Command command = next->mCommand;
-        if (next != current) {
-
-            // As soon as possible of learning of a new dump area, start using it
-            dumpState = next->mDumpState != NULL ? next->mDumpState : &dummyDumpState;
-            teeSink = next->mTeeSink;
-            logWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &dummyLogWriter;
+        // process removed tracks first to avoid running out of track names
+        unsigned removedTracks = previousTrackMask & ~currentTrackMask;
+        while (removedTracks != 0) {
+            int i = __builtin_ctz(removedTracks);
+            removedTracks &= ~(1 << i);
+            const FastTrack* fastTrack = &current->mFastTracks[i];
+            ALOG_ASSERT(fastTrack->mBufferProvider == NULL);
             if (mixer != NULL) {
-                mixer->setLog(logWriter);
-            }
-
-            // We want to always have a valid reference to the previous (non-idle) state.
-            // However, the state queue only guarantees access to current and previous states.
-            // So when there is a transition from a non-idle state into an idle state, we make a
-            // copy of the last known non-idle state so it is still available on return from idle.
-            // The possible transitions are:
-            //  non-idle -> non-idle    update previous from current in-place
-            //  non-idle -> idle        update previous from copy of current
-            //  idle     -> idle        don't update previous
-            //  idle     -> non-idle    don't update previous
-            if (!(current->mCommand & FastMixerState::IDLE)) {
-                if (command & FastMixerState::IDLE) {
-                    preIdle = *current;
-                    current = &preIdle;
-                    oldTsValid = false;
-#ifdef FAST_MIXER_STATISTICS
-                    oldLoadValid = false;
-#endif
-                    ignoreNextOverrun = true;
-                }
-                previous = current;
-            }
-            current = next;
-        }
-#if !LOG_NDEBUG
-        next = NULL;    // not referenced again
-#endif
-
-        dumpState->mCommand = command;
-
-        switch (command) {
-        case FastMixerState::INITIAL:
-        case FastMixerState::HOT_IDLE:
-            sleepNs = FAST_HOT_IDLE_NS;
-            continue;
-        case FastMixerState::COLD_IDLE:
-            // only perform a cold idle command once
-            // FIXME consider checking previous state and only perform if previous != COLD_IDLE
-            if (current->mColdGen != coldGen) {
-                int32_t *coldFutexAddr = current->mColdFutexAddr;
-                ALOG_ASSERT(coldFutexAddr != NULL);
-                int32_t old = android_atomic_dec(coldFutexAddr);
-                if (old <= 0) {
-                    __futex_syscall4(coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL);
-                }
-                int policy = sched_getscheduler(0);
-                if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
-                    ALOGE("did not receive expected priority boost");
-                }
-                // This may be overly conservative; there could be times that the normal mixer
-                // requests such a brief cold idle that it doesn't require resetting this flag.
-                isWarm = false;
-                measuredWarmupTs.tv_sec = 0;
-                measuredWarmupTs.tv_nsec = 0;
-                warmupCycles = 0;
-                sleepNs = -1;
-                coldGen = current->mColdGen;
-#ifdef FAST_MIXER_STATISTICS
-                bounds = 0;
-                full = false;
-#endif
-                oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs);
-                timestampStatus = INVALID_OPERATION;
-            } else {
-                sleepNs = FAST_HOT_IDLE_NS;
-            }
-            continue;
-        case FastMixerState::EXIT:
-            delete mixer;
-            delete[] mixBuffer;
-            return false;
-        case FastMixerState::MIX:
-        case FastMixerState::WRITE:
-        case FastMixerState::MIX_WRITE:
-            break;
-        default:
-            LOG_FATAL("bad command %d", command);
-        }
-
-        // there is a non-idle state available to us; did the state change?
-        size_t frameCount = current->mFrameCount;
-        if (current != previous) {
-
-            // handle state change here, but since we want to diff the state,
-            // we're prepared for previous == &initial the first time through
-            unsigned previousTrackMask;
-
-            // check for change in output HAL configuration
-            NBAIO_Format previousFormat = format;
-            if (current->mOutputSinkGen != outputSinkGen) {
-                outputSink = current->mOutputSink;
-                outputSinkGen = current->mOutputSinkGen;
-                if (outputSink == NULL) {
-                    format = Format_Invalid;
-                    sampleRate = 0;
-                } else {
-                    format = outputSink->format();
-                    sampleRate = Format_sampleRate(format);
-                    ALOG_ASSERT(Format_channelCount(format) == FCC_2);
-                }
-            }
-
-            if ((format != previousFormat) || (frameCount != previous->mFrameCount)) {
-                // FIXME to avoid priority inversion, don't delete here
-                delete mixer;
-                mixer = NULL;
-                delete[] mixBuffer;
-                mixBuffer = NULL;
-                if (frameCount > 0 && sampleRate > 0) {
-                    // FIXME new may block for unbounded time at internal mutex of the heap
-                    //       implementation; it would be better to have normal mixer allocate for us
-                    //       to avoid blocking here and to prevent possible priority inversion
-                    mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks);
-                    mixBuffer = new short[frameCount * FCC_2];
-                    periodNs = (frameCount * 1000000000LL) / sampleRate;    // 1.00
-                    underrunNs = (frameCount * 1750000000LL) / sampleRate;  // 1.75
-                    overrunNs = (frameCount * 500000000LL) / sampleRate;    // 0.50
-                    forceNs = (frameCount * 950000000LL) / sampleRate;      // 0.95
-                    warmupNs = (frameCount * 500000000LL) / sampleRate;     // 0.50
-                } else {
-                    periodNs = 0;
-                    underrunNs = 0;
-                    overrunNs = 0;
-                    forceNs = 0;
-                    warmupNs = 0;
-                }
-                mixBufferState = UNDEFINED;
-#if !LOG_NDEBUG
-                for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) {
-                    fastTrackNames[i] = -1;
-                }
-#endif
-                // we need to reconfigure all active tracks
-                previousTrackMask = 0;
-                fastTracksGen = current->mFastTracksGen - 1;
-                dumpState->mFrameCount = frameCount;
-            } else {
-                previousTrackMask = previous->mTrackMask;
-            }
-
-            // check for change in active track set
-            unsigned currentTrackMask = current->mTrackMask;
-            dumpState->mTrackMask = currentTrackMask;
-            if (current->mFastTracksGen != fastTracksGen) {
-                ALOG_ASSERT(mixBuffer != NULL);
-                int name;
-
-                // process removed tracks first to avoid running out of track names
-                unsigned removedTracks = previousTrackMask & ~currentTrackMask;
-                while (removedTracks != 0) {
-                    i = __builtin_ctz(removedTracks);
-                    removedTracks &= ~(1 << i);
-                    const FastTrack* fastTrack = &current->mFastTracks[i];
-                    ALOG_ASSERT(fastTrack->mBufferProvider == NULL);
-                    if (mixer != NULL) {
-                        name = fastTrackNames[i];
-                        ALOG_ASSERT(name >= 0);
-                        mixer->deleteTrackName(name);
-                    }
-#if !LOG_NDEBUG
-                    fastTrackNames[i] = -1;
-#endif
-                    // don't reset track dump state, since other side is ignoring it
-                    generations[i] = fastTrack->mGeneration;
-                }
-
-                // now process added tracks
-                unsigned addedTracks = currentTrackMask & ~previousTrackMask;
-                while (addedTracks != 0) {
-                    i = __builtin_ctz(addedTracks);
-                    addedTracks &= ~(1 << i);
-                    const FastTrack* fastTrack = &current->mFastTracks[i];
-                    AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
-                    ALOG_ASSERT(bufferProvider != NULL && fastTrackNames[i] == -1);
-                    if (mixer != NULL) {
-                        // calling getTrackName with default channel mask and a random invalid
-                        //   sessionId (no effects here)
-                        name = mixer->getTrackName(AUDIO_CHANNEL_OUT_STEREO, -555);
-                        ALOG_ASSERT(name >= 0);
-                        fastTrackNames[i] = name;
-                        mixer->setBufferProvider(name, bufferProvider);
-                        mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
-                                (void *) mixBuffer);
-                        // newly allocated track names default to full scale volume
-                        mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
-                                (void *)(uintptr_t)fastTrack->mChannelMask);
-                        mixer->enable(name);
-                    }
-                    generations[i] = fastTrack->mGeneration;
-                }
-
-                // finally process (potentially) modified tracks; these use the same slot
-                // but may have a different buffer provider or volume provider
-                unsigned modifiedTracks = currentTrackMask & previousTrackMask;
-                while (modifiedTracks != 0) {
-                    i = __builtin_ctz(modifiedTracks);
-                    modifiedTracks &= ~(1 << i);
-                    const FastTrack* fastTrack = &current->mFastTracks[i];
-                    if (fastTrack->mGeneration != generations[i]) {
-                        // this track was actually modified
-                        AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
-                        ALOG_ASSERT(bufferProvider != NULL);
-                        if (mixer != NULL) {
-                            name = fastTrackNames[i];
-                            ALOG_ASSERT(name >= 0);
-                            mixer->setBufferProvider(name, bufferProvider);
-                            if (fastTrack->mVolumeProvider == NULL) {
-                                mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0,
-                                        (void *)0x1000);
-                                mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1,
-                                        (void *)0x1000);
-                            }
-                            mixer->setParameter(name, AudioMixer::RESAMPLE,
-                                    AudioMixer::REMOVE, NULL);
-                            mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
-                                    (void *)(uintptr_t) fastTrack->mChannelMask);
-                            // already enabled
-                        }
-                        generations[i] = fastTrack->mGeneration;
-                    }
-                }
-
-                fastTracksGen = current->mFastTracksGen;
-
-                dumpState->mNumTracks = popcount(currentTrackMask);
-            }
-
-#if 1   // FIXME shouldn't need this
-            // only process state change once
-            previous = current;
-#endif
-        }
-
-        // do work using current state here
-        if ((command & FastMixerState::MIX) && (mixer != NULL) && isWarm) {
-            ALOG_ASSERT(mixBuffer != NULL);
-            // for each track, update volume and check for underrun
-            unsigned currentTrackMask = current->mTrackMask;
-            while (currentTrackMask != 0) {
-                i = __builtin_ctz(currentTrackMask);
-                currentTrackMask &= ~(1 << i);
-                const FastTrack* fastTrack = &current->mFastTracks[i];
-
-                // Refresh the per-track timestamp
-                if (timestampStatus == NO_ERROR) {
-                    uint32_t trackFramesWrittenButNotPresented =
-                        nativeFramesWrittenButNotPresented;
-                    uint32_t trackFramesWritten = fastTrack->mBufferProvider->framesReleased();
-                    // Can't provide an AudioTimestamp before first frame presented,
-                    // or during the brief 32-bit wraparound window
-                    if (trackFramesWritten >= trackFramesWrittenButNotPresented) {
-                        AudioTimestamp perTrackTimestamp;
-                        perTrackTimestamp.mPosition =
-                                trackFramesWritten - trackFramesWrittenButNotPresented;
-                        perTrackTimestamp.mTime = timestamp.mTime;
-                        fastTrack->mBufferProvider->onTimestamp(perTrackTimestamp);
-                    }
-                }
-
-                int name = fastTrackNames[i];
+                name = fastTrackNames[i];
                 ALOG_ASSERT(name >= 0);
-                if (fastTrack->mVolumeProvider != NULL) {
-                    uint32_t vlr = fastTrack->mVolumeProvider->getVolumeLR();
-                    mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0,
-                            (void *)(uintptr_t)(vlr & 0xFFFF));
-                    mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1,
-                            (void *)(uintptr_t)(vlr >> 16));
-                }
-                // FIXME The current implementation of framesReady() for fast tracks
-                // takes a tryLock, which can block
-                // up to 1 ms.  If enough active tracks all blocked in sequence, this would result
-                // in the overall fast mix cycle being delayed.  Should use a non-blocking FIFO.
-                size_t framesReady = fastTrack->mBufferProvider->framesReady();
-                if (ATRACE_ENABLED()) {
-                    // I wish we had formatted trace names
-                    char traceName[16];
-                    strcpy(traceName, "fRdy");
-                    traceName[4] = i + (i < 10 ? '0' : 'A' - 10);
-                    traceName[5] = '\0';
-                    ATRACE_INT(traceName, framesReady);
-                }
-                FastTrackDump *ftDump = &dumpState->mTracks[i];
-                FastTrackUnderruns underruns = ftDump->mUnderruns;
-                if (framesReady < frameCount) {
-                    if (framesReady == 0) {
-                        underruns.mBitFields.mEmpty++;
-                        underruns.mBitFields.mMostRecent = UNDERRUN_EMPTY;
-                        mixer->disable(name);
-                    } else {
-                        // allow mixing partial buffer
-                        underruns.mBitFields.mPartial++;
-                        underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL;
-                        mixer->enable(name);
+                mixer->deleteTrackName(name);
+            }
+#if !LOG_NDEBUG
+            fastTrackNames[i] = -1;
+#endif
+            // don't reset track dump state, since other side is ignoring it
+            generations[i] = fastTrack->mGeneration;
+        }
+
+        // now process added tracks
+        unsigned addedTracks = currentTrackMask & ~previousTrackMask;
+        while (addedTracks != 0) {
+            int i = __builtin_ctz(addedTracks);
+            addedTracks &= ~(1 << i);
+            const FastTrack* fastTrack = &current->mFastTracks[i];
+            AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
+            ALOG_ASSERT(bufferProvider != NULL && fastTrackNames[i] == -1);
+            if (mixer != NULL) {
+                // calling getTrackName with default channel mask and a random invalid
+                //   sessionId (no effects here)
+                name = mixer->getTrackName(AUDIO_CHANNEL_OUT_STEREO, -555);
+                ALOG_ASSERT(name >= 0);
+                fastTrackNames[i] = name;
+                mixer->setBufferProvider(name, bufferProvider);
+                mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+                        (void *) mixBuffer);
+                // newly allocated track names default to full scale volume
+                mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
+                        (void *)(uintptr_t)fastTrack->mChannelMask);
+                mixer->enable(name);
+            }
+            generations[i] = fastTrack->mGeneration;
+        }
+
+        // finally process (potentially) modified tracks; these use the same slot
+        // but may have a different buffer provider or volume provider
+        unsigned modifiedTracks = currentTrackMask & previousTrackMask;
+        while (modifiedTracks != 0) {
+            int i = __builtin_ctz(modifiedTracks);
+            modifiedTracks &= ~(1 << i);
+            const FastTrack* fastTrack = &current->mFastTracks[i];
+            if (fastTrack->mGeneration != generations[i]) {
+                // this track was actually modified
+                AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
+                ALOG_ASSERT(bufferProvider != NULL);
+                if (mixer != NULL) {
+                    name = fastTrackNames[i];
+                    ALOG_ASSERT(name >= 0);
+                    mixer->setBufferProvider(name, bufferProvider);
+                    if (fastTrack->mVolumeProvider == NULL) {
+                        mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0,
+                                (void *)0x1000);
+                        mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1,
+                                (void *)0x1000);
                     }
+                    mixer->setParameter(name, AudioMixer::RESAMPLE,
+                            AudioMixer::REMOVE, NULL);
+                    mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
+                            (void *)(uintptr_t) fastTrack->mChannelMask);
+                    // already enabled
+                }
+                generations[i] = fastTrack->mGeneration;
+            }
+        }
+
+        fastTracksGen = current->mFastTracksGen;
+
+        dumpState->mNumTracks = popcount(currentTrackMask);
+    }
+}
+
+void FastMixer::onWork()
+{
+    const FastMixerState * const current = (const FastMixerState *) this->current;
+    FastMixerDumpState * const dumpState = (FastMixerDumpState *) this->dumpState;
+    const FastMixerState::Command command = this->command;
+    const size_t frameCount = current->mFrameCount;
+
+    if ((command & FastMixerState::MIX) && (mixer != NULL) && isWarm) {
+        ALOG_ASSERT(mixBuffer != NULL);
+        // for each track, update volume and check for underrun
+        unsigned currentTrackMask = current->mTrackMask;
+        while (currentTrackMask != 0) {
+            int i = __builtin_ctz(currentTrackMask);
+            currentTrackMask &= ~(1 << i);
+            const FastTrack* fastTrack = &current->mFastTracks[i];
+
+            // Refresh the per-track timestamp
+            if (timestampStatus == NO_ERROR) {
+                uint32_t trackFramesWrittenButNotPresented =
+                    nativeFramesWrittenButNotPresented;
+                uint32_t trackFramesWritten = fastTrack->mBufferProvider->framesReleased();
+                // Can't provide an AudioTimestamp before first frame presented,
+                // or during the brief 32-bit wraparound window
+                if (trackFramesWritten >= trackFramesWrittenButNotPresented) {
+                    AudioTimestamp perTrackTimestamp;
+                    perTrackTimestamp.mPosition =
+                            trackFramesWritten - trackFramesWrittenButNotPresented;
+                    perTrackTimestamp.mTime = timestamp.mTime;
+                    fastTrack->mBufferProvider->onTimestamp(perTrackTimestamp);
+                }
+            }
+
+            int name = fastTrackNames[i];
+            ALOG_ASSERT(name >= 0);
+            if (fastTrack->mVolumeProvider != NULL) {
+                uint32_t vlr = fastTrack->mVolumeProvider->getVolumeLR();
+                mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0,
+                        (void *)(uintptr_t)(vlr & 0xFFFF));
+                mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1,
+                        (void *)(uintptr_t)(vlr >> 16));
+            }
+            // FIXME The current implementation of framesReady() for fast tracks
+            // takes a tryLock, which can block
+            // up to 1 ms.  If enough active tracks all blocked in sequence, this would result
+            // in the overall fast mix cycle being delayed.  Should use a non-blocking FIFO.
+            size_t framesReady = fastTrack->mBufferProvider->framesReady();
+            if (ATRACE_ENABLED()) {
+                // I wish we had formatted trace names
+                char traceName[16];
+                strcpy(traceName, "fRdy");
+                traceName[4] = i + (i < 10 ? '0' : 'A' - 10);
+                traceName[5] = '\0';
+                ATRACE_INT(traceName, framesReady);
+            }
+            FastTrackDump *ftDump = &dumpState->mTracks[i];
+            FastTrackUnderruns underruns = ftDump->mUnderruns;
+            if (framesReady < frameCount) {
+                if (framesReady == 0) {
+                    underruns.mBitFields.mEmpty++;
+                    underruns.mBitFields.mMostRecent = UNDERRUN_EMPTY;
+                    mixer->disable(name);
                 } else {
-                    underruns.mBitFields.mFull++;
-                    underruns.mBitFields.mMostRecent = UNDERRUN_FULL;
+                    // allow mixing partial buffer
+                    underruns.mBitFields.mPartial++;
+                    underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL;
                     mixer->enable(name);
                 }
-                ftDump->mUnderruns = underruns;
-                ftDump->mFramesReady = framesReady;
-            }
-
-            int64_t pts;
-            if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts)))
-                pts = AudioBufferProvider::kInvalidPTS;
-
-            // process() is CPU-bound
-            mixer->process(pts);
-            mixBufferState = MIXED;
-        } else if (mixBufferState == MIXED) {
-            mixBufferState = UNDEFINED;
-        }
-        bool attemptedWrite = false;
-        //bool didFullWrite = false;    // dumpsys could display a count of partial writes
-        if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mixBuffer != NULL)) {
-            if (mixBufferState == UNDEFINED) {
-                memset(mixBuffer, 0, frameCount * FCC_2 * sizeof(short));
-                mixBufferState = ZEROED;
-            }
-            if (teeSink != NULL) {
-                (void) teeSink->write(mixBuffer, frameCount);
-            }
-            // FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink,
-            //       but this code should be modified to handle both non-blocking and blocking sinks
-            dumpState->mWriteSequence++;
-            ATRACE_BEGIN("write");
-            ssize_t framesWritten = outputSink->write(mixBuffer, frameCount);
-            ATRACE_END();
-            dumpState->mWriteSequence++;
-            if (framesWritten >= 0) {
-                ALOG_ASSERT((size_t) framesWritten <= frameCount);
-                totalNativeFramesWritten += framesWritten;
-                dumpState->mFramesWritten = totalNativeFramesWritten;
-                //if ((size_t) framesWritten == frameCount) {
-                //    didFullWrite = true;
-                //}
             } else {
-                dumpState->mWriteErrors++;
+                underruns.mBitFields.mFull++;
+                underruns.mBitFields.mMostRecent = UNDERRUN_FULL;
+                mixer->enable(name);
             }
-            attemptedWrite = true;
-            // FIXME count # of writes blocked excessively, CPU usage, etc. for dump
-
-            timestampStatus = outputSink->getTimestamp(timestamp);
-            if (timestampStatus == NO_ERROR) {
-                uint32_t totalNativeFramesPresented = timestamp.mPosition;
-                if (totalNativeFramesPresented <= totalNativeFramesWritten) {
-                    nativeFramesWrittenButNotPresented =
-                        totalNativeFramesWritten - totalNativeFramesPresented;
-                } else {
-                    // HAL reported that more frames were presented than were written
-                    timestampStatus = INVALID_OPERATION;
-                }
-            }
+            ftDump->mUnderruns = underruns;
+            ftDump->mFramesReady = framesReady;
         }
 
-        // To be exactly periodic, compute the next sleep time based on current time.
-        // This code doesn't have long-term stability when the sink is non-blocking.
-        // FIXME To avoid drift, use the local audio clock or watch the sink's fill status.
-        struct timespec newTs;
-        int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
-        if (rc == 0) {
-            //logWriter->logTimestamp(newTs);
-            if (oldTsValid) {
-                time_t sec = newTs.tv_sec - oldTs.tv_sec;
-                long nsec = newTs.tv_nsec - oldTs.tv_nsec;
-                ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
-                        "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
-                        oldTs.tv_sec, oldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
-                if (nsec < 0) {
-                    --sec;
-                    nsec += 1000000000;
-                }
-                // To avoid an initial underrun on fast tracks after exiting standby,
-                // do not start pulling data from tracks and mixing until warmup is complete.
-                // Warmup is considered complete after the earlier of:
-                //      MIN_WARMUP_CYCLES write() attempts and last one blocks for at least warmupNs
-                //      MAX_WARMUP_CYCLES write() attempts.
-                // This is overly conservative, but to get better accuracy requires a new HAL API.
-                if (!isWarm && attemptedWrite) {
-                    measuredWarmupTs.tv_sec += sec;
-                    measuredWarmupTs.tv_nsec += nsec;
-                    if (measuredWarmupTs.tv_nsec >= 1000000000) {
-                        measuredWarmupTs.tv_sec++;
-                        measuredWarmupTs.tv_nsec -= 1000000000;
-                    }
-                    ++warmupCycles;
-                    if ((nsec > warmupNs && warmupCycles >= MIN_WARMUP_CYCLES) ||
-                            (warmupCycles >= MAX_WARMUP_CYCLES)) {
-                        isWarm = true;
-                        dumpState->mMeasuredWarmupTs = measuredWarmupTs;
-                        dumpState->mWarmupCycles = warmupCycles;
-                    }
-                }
-                sleepNs = -1;
-                if (isWarm) {
-                    if (sec > 0 || nsec > underrunNs) {
-                        ATRACE_NAME("underrun");
-                        // FIXME only log occasionally
-                        ALOGV("underrun: time since last cycle %d.%03ld sec",
-                                (int) sec, nsec / 1000000L);
-                        dumpState->mUnderruns++;
-                        ignoreNextOverrun = true;
-                    } else if (nsec < overrunNs) {
-                        if (ignoreNextOverrun) {
-                            ignoreNextOverrun = false;
-                        } else {
-                            // FIXME only log occasionally
-                            ALOGV("overrun: time since last cycle %d.%03ld sec",
-                                    (int) sec, nsec / 1000000L);
-                            dumpState->mOverruns++;
-                        }
-                        // This forces a minimum cycle time. It:
-                        //  - compensates for an audio HAL with jitter due to sample rate conversion
-                        //  - works with a variable buffer depth audio HAL that never pulls at a
-                        //    rate < than overrunNs per buffer.
-                        //  - recovers from overrun immediately after underrun
-                        // It doesn't work with a non-blocking audio HAL.
-                        sleepNs = forceNs - nsec;
-                    } else {
-                        ignoreNextOverrun = false;
-                    }
-                }
-#ifdef FAST_MIXER_STATISTICS
-                if (isWarm) {
-                    // advance the FIFO queue bounds
-                    size_t i = bounds & (dumpState->mSamplingN - 1);
-                    bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF);
-                    if (full) {
-                        bounds += 0x10000;
-                    } else if (!(bounds & (dumpState->mSamplingN - 1))) {
-                        full = true;
-                    }
-                    // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
-                    uint32_t monotonicNs = nsec;
-                    if (sec > 0 && sec < 4) {
-                        monotonicNs += sec * 1000000000;
-                    }
-                    // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
-                    uint32_t loadNs = 0;
-                    struct timespec newLoad;
-                    rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
-                    if (rc == 0) {
-                        if (oldLoadValid) {
-                            sec = newLoad.tv_sec - oldLoad.tv_sec;
-                            nsec = newLoad.tv_nsec - oldLoad.tv_nsec;
-                            if (nsec < 0) {
-                                --sec;
-                                nsec += 1000000000;
-                            }
-                            loadNs = nsec;
-                            if (sec > 0 && sec < 4) {
-                                loadNs += sec * 1000000000;
-                            }
-                        } else {
-                            // first time through the loop
-                            oldLoadValid = true;
-                        }
-                        oldLoad = newLoad;
-                    }
-#ifdef CPU_FREQUENCY_STATISTICS
-                    // get the absolute value of CPU clock frequency in kHz
-                    int cpuNum = sched_getcpu();
-                    uint32_t kHz = tcu.getCpukHz(cpuNum);
-                    kHz = (kHz << 4) | (cpuNum & 0xF);
-#endif
-                    // save values in FIFO queues for dumpsys
-                    // these stores #1, #2, #3 are not atomic with respect to each other,
-                    // or with respect to store #4 below
-                    dumpState->mMonotonicNs[i] = monotonicNs;
-                    dumpState->mLoadNs[i] = loadNs;
-#ifdef CPU_FREQUENCY_STATISTICS
-                    dumpState->mCpukHz[i] = kHz;
-#endif
-                    // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
-                    // the newest open & oldest closed halves are atomic with respect to each other
-                    dumpState->mBounds = bounds;
-                    ATRACE_INT("cycle_ms", monotonicNs / 1000000);
-                    ATRACE_INT("load_us", loadNs / 1000);
-                }
-#endif
-            } else {
-                // first time through the loop
-                oldTsValid = true;
-                sleepNs = periodNs;
-                ignoreNextOverrun = true;
-            }
-            oldTs = newTs;
+        int64_t pts;
+        if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts))) {
+            pts = AudioBufferProvider::kInvalidPTS;
+        }
+
+        // process() is CPU-bound
+        mixer->process(pts);
+        mixBufferState = MIXED;
+    } else if (mixBufferState == MIXED) {
+        mixBufferState = UNDEFINED;
+    }
+    //bool didFullWrite = false;    // dumpsys could display a count of partial writes
+    if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mixBuffer != NULL)) {
+        if (mixBufferState == UNDEFINED) {
+            memset(mixBuffer, 0, frameCount * FCC_2 * sizeof(short));
+            mixBufferState = ZEROED;
+        }
+        // if non-NULL, then duplicate write() to this non-blocking sink
+        NBAIO_Sink* teeSink;
+        if ((teeSink = current->mTeeSink) != NULL) {
+            (void) teeSink->write(mixBuffer, frameCount);
+        }
+        // FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink,
+        //       but this code should be modified to handle both non-blocking and blocking sinks
+        dumpState->mWriteSequence++;
+        ATRACE_BEGIN("write");
+        ssize_t framesWritten = outputSink->write(mixBuffer, frameCount);
+        ATRACE_END();
+        dumpState->mWriteSequence++;
+        if (framesWritten >= 0) {
+            ALOG_ASSERT((size_t) framesWritten <= frameCount);
+            totalNativeFramesWritten += framesWritten;
+            dumpState->mFramesWritten = totalNativeFramesWritten;
+            //if ((size_t) framesWritten == frameCount) {
+            //    didFullWrite = true;
+            //}
         } else {
-            // monotonic clock is broken
-            oldTsValid = false;
-            sleepNs = periodNs;
+            dumpState->mWriteErrors++;
         }
+        attemptedWrite = true;
+        // FIXME count # of writes blocked excessively, CPU usage, etc. for dump
 
-
-    }   // for (;;)
-
-    // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
+        timestampStatus = outputSink->getTimestamp(timestamp);
+        if (timestampStatus == NO_ERROR) {
+            uint32_t totalNativeFramesPresented = timestamp.mPosition;
+            if (totalNativeFramesPresented <= totalNativeFramesWritten) {
+                nativeFramesWrittenButNotPresented =
+                    totalNativeFramesWritten - totalNativeFramesPresented;
+            } else {
+                // HAL reported that more frames were presented than were written
+                timestampStatus = INVALID_OPERATION;
+            }
+        }
+    }
 }
 
 FastMixerDumpState::FastMixerDumpState(
 #ifdef FAST_MIXER_STATISTICS
         uint32_t samplingN
 #endif
-        ) :
-    mCommand(FastMixerState::INITIAL), mWriteSequence(0), mFramesWritten(0),
-    mNumTracks(0), mWriteErrors(0), mUnderruns(0), mOverruns(0),
-    mSampleRate(0), mFrameCount(0), /* mMeasuredWarmupTs({0, 0}), */ mWarmupCycles(0),
+        ) : FastThreadDumpState(),
+    mWriteSequence(0), mFramesWritten(0),
+    mNumTracks(0), mWriteErrors(0),
+    mSampleRate(0), mFrameCount(0),
     mTrackMask(0)
-#ifdef FAST_MIXER_STATISTICS
-    , mSamplingN(0), mBounds(0)
-#endif
 {
-    mMeasuredWarmupTs.tv_sec = 0;
-    mMeasuredWarmupTs.tv_nsec = 0;
 #ifdef FAST_MIXER_STATISTICS
     increaseSamplingN(samplingN);
 #endif
@@ -695,7 +463,7 @@
 void FastMixerDumpState::dump(int fd) const
 {
     if (mCommand == FastMixerState::INITIAL) {
-        fdprintf(fd, "FastMixer not initialized\n");
+        fdprintf(fd, "  FastMixer not initialized\n");
         return;
     }
 #define COMMAND_MAX 32
@@ -729,10 +497,10 @@
     double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
             (mMeasuredWarmupTs.tv_nsec / 1000000.0);
     double mixPeriodSec = (double) mFrameCount / (double) mSampleRate;
-    fdprintf(fd, "FastMixer command=%s writeSequence=%u framesWritten=%u\n"
-                 "          numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
-                 "          sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
-                 "          mixPeriod=%.2f ms\n",
+    fdprintf(fd, "  FastMixer command=%s writeSequence=%u framesWritten=%u\n"
+                 "            numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
+                 "            sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
+                 "            mixPeriod=%.2f ms\n",
                  string, mWriteSequence, mFramesWritten,
                  mNumTracks, mWriteErrors, mUnderruns, mOverruns,
                  mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
@@ -783,14 +551,20 @@
         previousCpukHz = sampleCpukHz;
 #endif
     }
-    fdprintf(fd, "Simple moving statistics over last %.1f seconds:\n", wall.n() * mixPeriodSec);
-    fdprintf(fd, "  wall clock time in ms per mix cycle:\n"
-                 "    mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
-                 wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6, wall.stddev()*1e-6);
-    fdprintf(fd, "  raw CPU load in us per mix cycle:\n"
-                 "    mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
-                 loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3,
-                 loadNs.stddev()*1e-3);
+    if (n) {
+        fdprintf(fd, "  Simple moving statistics over last %.1f seconds:\n",
+                     wall.n() * mixPeriodSec);
+        fdprintf(fd, "    wall clock time in ms per mix cycle:\n"
+                     "      mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
+                     wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6,
+                     wall.stddev()*1e-6);
+        fdprintf(fd, "    raw CPU load in us per mix cycle:\n"
+                     "      mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
+                     loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3,
+                     loadNs.stddev()*1e-3);
+    } else {
+        fdprintf(fd, "  No FastMixer statistics available currently\n");
+    }
 #ifdef CPU_FREQUENCY_STATISTICS
     fdprintf(fd, "  CPU clock frequency in MHz:\n"
                  "    mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
@@ -808,9 +582,9 @@
             left.sample(tail[i]);
             right.sample(tail[n - (i + 1)]);
         }
-        fdprintf(fd, "Distribution of mix cycle times in ms for the tails (> ~3 stddev outliers):\n"
-                     "  left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
-                     "  right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
+        fdprintf(fd, "  Distribution of mix cycle times in ms for the tails (> ~3 stddev outliers):\n"
+                     "    left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
+                     "    right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
                      left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6,
                      right.mean()*1e-6, right.minimum()*1e-6, right.maximum()*1e-6,
                      right.stddev()*1e-6);
@@ -823,9 +597,9 @@
     // Instead we always display all tracks, with an indication
     // of whether we think the track is active.
     uint32_t trackMask = mTrackMask;
-    fdprintf(fd, "Fast tracks: kMaxFastTracks=%u activeMask=%#x\n",
+    fdprintf(fd, "  Fast tracks: kMaxFastTracks=%u activeMask=%#x\n",
             FastMixerState::kMaxFastTracks, trackMask);
-    fdprintf(fd, "Index Active Full Partial Empty  Recent Ready\n");
+    fdprintf(fd, "  Index Active Full Partial Empty  Recent Ready\n");
     for (uint32_t i = 0; i < FastMixerState::kMaxFastTracks; ++i, trackMask >>= 1) {
         bool isActive = trackMask & 1;
         const FastTrackDump *ftDump = &mTracks[i];
@@ -845,7 +619,7 @@
             mostRecent = "?";
             break;
         }
-        fdprintf(fd, "%5u %6s %4u %7u %5u %7s %5zu\n", i, isActive ? "yes" : "no",
+        fdprintf(fd, "  %5u %6s %4u %7u %5u %7s %5zu\n", i, isActive ? "yes" : "no",
                 (underruns.mBitFields.mFull) & UNDERRUN_MASK,
                 (underruns.mBitFields.mPartial) & UNDERRUN_MASK,
                 (underruns.mBitFields.mEmpty) & UNDERRUN_MASK,
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 6158925..981c1a7 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -18,123 +18,65 @@
 #define ANDROID_AUDIO_FAST_MIXER_H
 
 #include <utils/Debug.h>
-#include <utils/Thread.h>
+#if 1   // FIXME move to where used
 extern "C" {
 #include "../private/bionic_futex.h"
 }
+#endif
+#include "FastThread.h"
 #include "StateQueue.h"
 #include "FastMixerState.h"
+#include "FastMixerDumpState.h"
 
 namespace android {
 
+class AudioMixer;
+
 typedef StateQueue<FastMixerState> FastMixerStateQueue;
 
-class FastMixer : public Thread {
+class FastMixer : public FastThread {
 
 public:
-            FastMixer() : Thread(false /*canCallJava*/) { }
-    virtual ~FastMixer() { }
+            FastMixer();
+    virtual ~FastMixer();
 
-            FastMixerStateQueue* sq() { return &mSQ; }
+            FastMixerStateQueue* sq();
 
 private:
-    virtual bool                threadLoop();
             FastMixerStateQueue mSQ;
 
+    // callouts
+    virtual const FastThreadState *poll();
+    virtual void setLog(NBLog::Writer *logWriter);
+    virtual void onIdle();
+    virtual void onExit();
+    virtual bool isSubClassCommand(FastThreadState::Command command);
+    virtual void onStateChange();
+    virtual void onWork();
+
+    // FIXME these former local variables need comments and to be renamed to have "m" prefix
+    static const FastMixerState initial;
+    FastMixerState preIdle; // copy of state before we went into idle
+    long slopNs;        // accumulated time we've woken up too early (> 0) or too late (< 0)
+    int fastTrackNames[FastMixerState::kMaxFastTracks]; // handles used by mixer to identify tracks
+    int generations[FastMixerState::kMaxFastTracks];    // last observed mFastTracks[i].mGeneration
+    NBAIO_Sink *outputSink;
+    int outputSinkGen;
+    AudioMixer* mixer;
+    short *mixBuffer;
+    enum {UNDEFINED, MIXED, ZEROED} mixBufferState;
+    NBAIO_Format format;
+    unsigned sampleRate;
+    int fastTracksGen;
+    FastMixerDumpState dummyDumpState;
+    uint32_t totalNativeFramesWritten;  // copied to dumpState->mFramesWritten
+
+    // next 2 fields are valid only when timestampStatus == NO_ERROR
+    AudioTimestamp timestamp;
+    uint32_t nativeFramesWrittenButNotPresented;
+
 };  // class FastMixer
 
-// Describes the underrun status for a single "pull" attempt
-enum FastTrackUnderrunStatus {
-    UNDERRUN_FULL,      // framesReady() is full frame count, no underrun
-    UNDERRUN_PARTIAL,   // framesReady() is non-zero but < full frame count, partial underrun
-    UNDERRUN_EMPTY,     // framesReady() is zero, total underrun
-};
-
-// Underrun counters are not reset to zero for new tracks or if track generation changes.
-// This packed representation is used to keep the information atomic.
-union FastTrackUnderruns {
-    FastTrackUnderruns() { mAtomic = 0;
-            COMPILE_TIME_ASSERT_FUNCTION_SCOPE(sizeof(FastTrackUnderruns) == sizeof(uint32_t)); }
-    FastTrackUnderruns(const FastTrackUnderruns& copyFrom) : mAtomic(copyFrom.mAtomic) { }
-    FastTrackUnderruns& operator=(const FastTrackUnderruns& rhs)
-            { if (this != &rhs) mAtomic = rhs.mAtomic; return *this; }
-    struct {
-#define UNDERRUN_BITS 10
-#define UNDERRUN_MASK ((1 << UNDERRUN_BITS) - 1)
-        uint32_t mFull    : UNDERRUN_BITS; // framesReady() is full frame count
-        uint32_t mPartial : UNDERRUN_BITS; // framesReady() is non-zero but < full frame count
-        uint32_t mEmpty   : UNDERRUN_BITS; // framesReady() is zero
-        FastTrackUnderrunStatus mMostRecent : 2;    // status of most recent framesReady()
-    }        mBitFields;
-private:
-    uint32_t mAtomic;
-};
-
-// Represents the dump state of a fast track
-struct FastTrackDump {
-    FastTrackDump() : mFramesReady(0) { }
-    /*virtual*/ ~FastTrackDump() { }
-    FastTrackUnderruns mUnderruns;
-    size_t mFramesReady;        // most recent value only; no long-term statistics kept
-};
-
-// The FastMixerDumpState keeps a cache of FastMixer statistics that can be logged by dumpsys.
-// Each individual native word-sized field is accessed atomically.  But the
-// overall structure is non-atomic, that is there may be an inconsistency between fields.
-// No barriers or locks are used for either writing or reading.
-// Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks).
-// It has a different lifetime than the FastMixer, and so it can't be a member of FastMixer.
-struct FastMixerDumpState {
-    FastMixerDumpState(
-#ifdef FAST_MIXER_STATISTICS
-            uint32_t samplingN = kSamplingNforLowRamDevice
-#endif
-            );
-    /*virtual*/ ~FastMixerDumpState();
-
-    void dump(int fd) const;    // should only be called on a stable copy, not the original
-
-    FastMixerState::Command mCommand;   // current command
-    uint32_t mWriteSequence;    // incremented before and after each write()
-    uint32_t mFramesWritten;    // total number of frames written successfully
-    uint32_t mNumTracks;        // total number of active fast tracks
-    uint32_t mWriteErrors;      // total number of write() errors
-    uint32_t mUnderruns;        // total number of underruns
-    uint32_t mOverruns;         // total number of overruns
-    uint32_t mSampleRate;
-    size_t   mFrameCount;
-    struct timespec mMeasuredWarmupTs;  // measured warmup time
-    uint32_t mWarmupCycles;     // number of loop cycles required to warmup
-    uint32_t mTrackMask;        // mask of active tracks
-    FastTrackDump   mTracks[FastMixerState::kMaxFastTracks];
-
-#ifdef FAST_MIXER_STATISTICS
-    // Recently collected samples of per-cycle monotonic time, thread CPU time, and CPU frequency.
-    // kSamplingN is max size of sampling frame (statistics), and must be a power of 2 <= 0x8000.
-    // The sample arrays are virtually allocated based on this compile-time constant,
-    // but are only initialized and used based on the runtime parameter mSamplingN.
-    static const uint32_t kSamplingN = 0x8000;
-    // Compile-time constant for a "low RAM device", must be a power of 2 <= kSamplingN.
-    // This value was chosen such that each array uses 1 small page (4 Kbytes).
-    static const uint32_t kSamplingNforLowRamDevice = 0x400;
-    // Corresponding runtime maximum size of sample arrays, must be a power of 2 <= kSamplingN.
-    uint32_t mSamplingN;
-    // The bounds define the interval of valid samples, and are represented as follows:
-    //      newest open (excluded) endpoint   = lower 16 bits of bounds, modulo N
-    //      oldest closed (included) endpoint = upper 16 bits of bounds, modulo N
-    // Number of valid samples is newest - oldest.
-    uint32_t mBounds;                   // bounds for mMonotonicNs, mThreadCpuNs, and mCpukHz
-    // The elements in the *Ns arrays are in units of nanoseconds <= 3999999999.
-    uint32_t mMonotonicNs[kSamplingN];  // delta monotonic (wall clock) time
-    uint32_t mLoadNs[kSamplingN];       // delta CPU load in time
-#ifdef CPU_FREQUENCY_STATISTICS
-    uint32_t mCpukHz[kSamplingN];       // absolute CPU clock frequency in kHz, bits 0-3 are CPU#
-#endif
-    // Increase sampling window after construction, must be a power of 2 <= kSamplingN
-    void    increaseSamplingN(uint32_t samplingN);
-#endif
-};
-
 }   // namespace android
 
 #endif  // ANDROID_AUDIO_FAST_MIXER_H
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
new file mode 100644
index 0000000..6a1e4649
--- /dev/null
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H
+#define ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H
+
+#include "Configuration.h"
+
+namespace android {
+
+// Describes the underrun status for a single "pull" attempt
+enum FastTrackUnderrunStatus {
+    UNDERRUN_FULL,      // framesReady() is full frame count, no underrun
+    UNDERRUN_PARTIAL,   // framesReady() is non-zero but < full frame count, partial underrun
+    UNDERRUN_EMPTY,     // framesReady() is zero, total underrun
+};
+
+// Underrun counters are not reset to zero for new tracks or if track generation changes.
+// This packed representation is used to keep the information atomic.
+union FastTrackUnderruns {
+    FastTrackUnderruns() { mAtomic = 0;
+            COMPILE_TIME_ASSERT_FUNCTION_SCOPE(sizeof(FastTrackUnderruns) == sizeof(uint32_t)); }
+    FastTrackUnderruns(const FastTrackUnderruns& copyFrom) : mAtomic(copyFrom.mAtomic) { }
+    FastTrackUnderruns& operator=(const FastTrackUnderruns& rhs)
+            { if (this != &rhs) mAtomic = rhs.mAtomic; return *this; }
+    struct {
+#define UNDERRUN_BITS 10
+#define UNDERRUN_MASK ((1 << UNDERRUN_BITS) - 1)
+        uint32_t mFull    : UNDERRUN_BITS; // framesReady() is full frame count
+        uint32_t mPartial : UNDERRUN_BITS; // framesReady() is non-zero but < full frame count
+        uint32_t mEmpty   : UNDERRUN_BITS; // framesReady() is zero
+        FastTrackUnderrunStatus mMostRecent : 2;    // status of most recent framesReady()
+    }        mBitFields;
+private:
+    uint32_t mAtomic;
+};
+
+// Represents the dump state of a fast track
+struct FastTrackDump {
+    FastTrackDump() : mFramesReady(0) { }
+    /*virtual*/ ~FastTrackDump() { }
+    FastTrackUnderruns mUnderruns;
+    size_t mFramesReady;        // most recent value only; no long-term statistics kept
+};
+
+// The FastMixerDumpState keeps a cache of FastMixer statistics that can be logged by dumpsys.
+// Each individual native word-sized field is accessed atomically.  But the
+// overall structure is non-atomic, that is there may be an inconsistency between fields.
+// No barriers or locks are used for either writing or reading.
+// Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks).
+// It has a different lifetime than the FastMixer, and so it can't be a member of FastMixer.
+struct FastMixerDumpState : FastThreadDumpState {
+    FastMixerDumpState(
+#ifdef FAST_MIXER_STATISTICS
+            uint32_t samplingN = kSamplingNforLowRamDevice
+#endif
+            );
+    /*virtual*/ ~FastMixerDumpState();
+
+    void dump(int fd) const;    // should only be called on a stable copy, not the original
+
+    uint32_t mWriteSequence;    // incremented before and after each write()
+    uint32_t mFramesWritten;    // total number of frames written successfully
+    uint32_t mNumTracks;        // total number of active fast tracks
+    uint32_t mWriteErrors;      // total number of write() errors
+    uint32_t mSampleRate;
+    size_t   mFrameCount;
+    uint32_t mTrackMask;        // mask of active tracks
+    FastTrackDump   mTracks[FastMixerState::kMaxFastTracks];
+
+#ifdef FAST_MIXER_STATISTICS
+    // Compile-time constant for a "low RAM device", must be a power of 2 <= kSamplingN.
+    // This value was chosen such that each array uses 1 small page (4 Kbytes).
+    static const uint32_t kSamplingNforLowRamDevice = 0x400;
+    // Increase sampling window after construction, must be a power of 2 <= kSamplingN
+    void    increaseSamplingN(uint32_t samplingN);
+#endif
+};
+
+}   // android
+
+#endif  // ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H
diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp
index 43ff233..8e6d0d4 100644
--- a/services/audioflinger/FastMixerState.cpp
+++ b/services/audioflinger/FastMixerState.cpp
@@ -14,7 +14,6 @@
  * limitations under the License.
  */
 
-#include "Configuration.h"
 #include "FastMixerState.h"
 
 namespace android {
@@ -29,10 +28,10 @@
 {
 }
 
-FastMixerState::FastMixerState() :
+FastMixerState::FastMixerState() : FastThreadState(),
+    // mFastTracks
     mFastTracksGen(0), mTrackMask(0), mOutputSink(NULL), mOutputSinkGen(0),
-    mFrameCount(0), mCommand(INITIAL), mColdFutexAddr(NULL), mColdGen(0),
-    mDumpState(NULL), mTeeSink(NULL), mNBLogWriter(NULL)
+    mFrameCount(0), mTeeSink(NULL)
 {
 }
 
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 9739fe9..be1a376 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -21,6 +21,7 @@
 #include <media/ExtendedAudioBufferProvider.h>
 #include <media/nbaio/NBAIO.h>
 #include <media/nbaio/NBLog.h>
+#include "FastThreadState.h"
 
 namespace android {
 
@@ -48,7 +49,7 @@
 };
 
 // Represents a single state of the fast mixer
-struct FastMixerState {
+struct FastMixerState : FastThreadState {
                 FastMixerState();
     /*virtual*/ ~FastMixerState();
 
@@ -61,23 +62,16 @@
     NBAIO_Sink* mOutputSink;    // HAL output device, must already be negotiated
     int         mOutputSinkGen; // increment when mOutputSink is assigned
     size_t      mFrameCount;    // number of frames per fast mix buffer
-    enum Command {
-        INITIAL = 0,            // used only for the initial state
-        HOT_IDLE = 1,           // do nothing
-        COLD_IDLE = 2,          // wait for the futex
-        IDLE = 3,               // either HOT_IDLE or COLD_IDLE
-        EXIT = 4,               // exit from thread
+
+    // Extends FastThreadState::Command
+    static const Command
         // The following commands also process configuration changes, and can be "or"ed:
         MIX = 0x8,              // mix tracks
         WRITE = 0x10,           // write to output sink
-        MIX_WRITE = 0x18,       // mix tracks and write to output sink
-    } mCommand;
-    int32_t*    mColdFutexAddr; // for COLD_IDLE only, pointer to the associated futex
-    unsigned    mColdGen;       // increment when COLD_IDLE is requested so it's only performed once
+        MIX_WRITE = 0x18;       // mix tracks and write to output sink
+
     // This might be a one-time configuration rather than per-state
-    FastMixerDumpState* mDumpState; // if non-NULL, then update dump state periodically
     NBAIO_Sink* mTeeSink;       // if non-NULL, then duplicate write()s to this non-blocking sink
-    NBLog::Writer* mNBLogWriter; // non-blocking logger
 };  // struct FastMixerState
 
 }   // namespace android
diff --git a/services/audioflinger/FastThread.cpp b/services/audioflinger/FastThread.cpp
new file mode 100644
index 0000000..8a216b3
--- /dev/null
+++ b/services/audioflinger/FastThread.cpp
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "FastThread"
+//#define LOG_NDEBUG 0
+
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+
+#include "Configuration.h"
+#include <utils/Log.h>
+extern "C" {
+#include "../private/bionic_futex.h"
+}
+#include <utils/Trace.h>
+#include "FastThread.h"
+
+#define FAST_DEFAULT_NS    999999999L   // ~1 sec: default time to sleep
+#define FAST_HOT_IDLE_NS     1000000L   // 1 ms: time to sleep while hot idling
+#define MIN_WARMUP_CYCLES          2    // minimum number of loop cycles to wait for warmup
+#define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
+
+namespace android {
+
+FastThread::FastThread() : Thread(false /*canCallJava*/),
+    // re-initialized to &initial by subclass constructor
+     previous(NULL), current(NULL),
+    /* oldTs({0, 0}), */
+    oldTsValid(false),
+    sleepNs(-1),
+    periodNs(0),
+    underrunNs(0),
+    overrunNs(0),
+    forceNs(0),
+    warmupNs(0),
+    // re-initialized to &dummyDumpState by subclass constructor
+    mDummyDumpState(NULL),
+    dumpState(NULL),
+    ignoreNextOverrun(true),
+#ifdef FAST_MIXER_STATISTICS
+    // oldLoad
+    oldLoadValid(false),
+    bounds(0),
+    full(false),
+    // tcu
+#endif
+    coldGen(0),
+    isWarm(false),
+    /* measuredWarmupTs({0, 0}), */
+    warmupCycles(0),
+    // dummyLogWriter
+    logWriter(&dummyLogWriter),
+    timestampStatus(INVALID_OPERATION),
+
+    command(FastThreadState::INITIAL),
+#if 0
+    frameCount(0),
+#endif
+    attemptedWrite(false)
+{
+    oldTs.tv_sec = 0;
+    oldTs.tv_nsec = 0;
+    measuredWarmupTs.tv_sec = 0;
+    measuredWarmupTs.tv_nsec = 0;
+}
+
+FastThread::~FastThread()
+{
+}
+
+bool FastThread::threadLoop()
+{
+    for (;;) {
+
+        // either nanosleep, sched_yield, or busy wait
+        if (sleepNs >= 0) {
+            if (sleepNs > 0) {
+                ALOG_ASSERT(sleepNs < 1000000000);
+                const struct timespec req = {0, sleepNs};
+                nanosleep(&req, NULL);
+            } else {
+                sched_yield();
+            }
+        }
+        // default to long sleep for next cycle
+        sleepNs = FAST_DEFAULT_NS;
+
+        // poll for state change
+        const FastThreadState *next = poll();
+        if (next == NULL) {
+            // continue to use the default initial state until a real state is available
+            // FIXME &initial not available, should save address earlier
+            //ALOG_ASSERT(current == &initial && previous == &initial);
+            next = current;
+        }
+
+        command = next->mCommand;
+        if (next != current) {
+
+            // As soon as possible of learning of a new dump area, start using it
+            dumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
+            logWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &dummyLogWriter;
+            setLog(logWriter);
+
+            // We want to always have a valid reference to the previous (non-idle) state.
+            // However, the state queue only guarantees access to current and previous states.
+            // So when there is a transition from a non-idle state into an idle state, we make a
+            // copy of the last known non-idle state so it is still available on return from idle.
+            // The possible transitions are:
+            //  non-idle -> non-idle    update previous from current in-place
+            //  non-idle -> idle        update previous from copy of current
+            //  idle     -> idle        don't update previous
+            //  idle     -> non-idle    don't update previous
+            if (!(current->mCommand & FastThreadState::IDLE)) {
+                if (command & FastThreadState::IDLE) {
+                    onIdle();
+                    oldTsValid = false;
+#ifdef FAST_MIXER_STATISTICS
+                    oldLoadValid = false;
+#endif
+                    ignoreNextOverrun = true;
+                }
+                previous = current;
+            }
+            current = next;
+        }
+#if !LOG_NDEBUG
+        next = NULL;    // not referenced again
+#endif
+
+        dumpState->mCommand = command;
+
+        // << current, previous, command, dumpState >>
+
+        switch (command) {
+        case FastThreadState::INITIAL:
+        case FastThreadState::HOT_IDLE:
+            sleepNs = FAST_HOT_IDLE_NS;
+            continue;
+        case FastThreadState::COLD_IDLE:
+            // only perform a cold idle command once
+            // FIXME consider checking previous state and only perform if previous != COLD_IDLE
+            if (current->mColdGen != coldGen) {
+                int32_t *coldFutexAddr = current->mColdFutexAddr;
+                ALOG_ASSERT(coldFutexAddr != NULL);
+                int32_t old = android_atomic_dec(coldFutexAddr);
+                if (old <= 0) {
+                    __futex_syscall4(coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL);
+                }
+                int policy = sched_getscheduler(0);
+                if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
+                    ALOGE("did not receive expected priority boost");
+                }
+                // This may be overly conservative; there could be times that the normal mixer
+                // requests such a brief cold idle that it doesn't require resetting this flag.
+                isWarm = false;
+                measuredWarmupTs.tv_sec = 0;
+                measuredWarmupTs.tv_nsec = 0;
+                warmupCycles = 0;
+                sleepNs = -1;
+                coldGen = current->mColdGen;
+#ifdef FAST_MIXER_STATISTICS
+                bounds = 0;
+                full = false;
+#endif
+                oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs);
+                timestampStatus = INVALID_OPERATION;
+            } else {
+                sleepNs = FAST_HOT_IDLE_NS;
+            }
+            continue;
+        case FastThreadState::EXIT:
+            onExit();
+            return false;
+        default:
+            LOG_ALWAYS_FATAL_IF(!isSubClassCommand(command));
+            break;
+        }
+
+        // there is a non-idle state available to us; did the state change?
+        if (current != previous) {
+            onStateChange();
+#if 1   // FIXME shouldn't need this
+            // only process state change once
+            previous = current;
+#endif
+        }
+
+        // do work using current state here
+        attemptedWrite = false;
+        onWork();
+
+        // To be exactly periodic, compute the next sleep time based on current time.
+        // This code doesn't have long-term stability when the sink is non-blocking.
+        // FIXME To avoid drift, use the local audio clock or watch the sink's fill status.
+        struct timespec newTs;
+        int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
+        if (rc == 0) {
+            //logWriter->logTimestamp(newTs);
+            if (oldTsValid) {
+                time_t sec = newTs.tv_sec - oldTs.tv_sec;
+                long nsec = newTs.tv_nsec - oldTs.tv_nsec;
+                ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
+                        "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
+                        oldTs.tv_sec, oldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
+                if (nsec < 0) {
+                    --sec;
+                    nsec += 1000000000;
+                }
+                // To avoid an initial underrun on fast tracks after exiting standby,
+                // do not start pulling data from tracks and mixing until warmup is complete.
+                // Warmup is considered complete after the earlier of:
+                //      MIN_WARMUP_CYCLES write() attempts and last one blocks for at least warmupNs
+                //      MAX_WARMUP_CYCLES write() attempts.
+                // This is overly conservative, but to get better accuracy requires a new HAL API.
+                if (!isWarm && attemptedWrite) {
+                    measuredWarmupTs.tv_sec += sec;
+                    measuredWarmupTs.tv_nsec += nsec;
+                    if (measuredWarmupTs.tv_nsec >= 1000000000) {
+                        measuredWarmupTs.tv_sec++;
+                        measuredWarmupTs.tv_nsec -= 1000000000;
+                    }
+                    ++warmupCycles;
+                    if ((nsec > warmupNs && warmupCycles >= MIN_WARMUP_CYCLES) ||
+                            (warmupCycles >= MAX_WARMUP_CYCLES)) {
+                        isWarm = true;
+                        dumpState->mMeasuredWarmupTs = measuredWarmupTs;
+                        dumpState->mWarmupCycles = warmupCycles;
+                    }
+                }
+                sleepNs = -1;
+                if (isWarm) {
+                    if (sec > 0 || nsec > underrunNs) {
+                        ATRACE_NAME("underrun");
+                        // FIXME only log occasionally
+                        ALOGV("underrun: time since last cycle %d.%03ld sec",
+                                (int) sec, nsec / 1000000L);
+                        dumpState->mUnderruns++;
+                        ignoreNextOverrun = true;
+                    } else if (nsec < overrunNs) {
+                        if (ignoreNextOverrun) {
+                            ignoreNextOverrun = false;
+                        } else {
+                            // FIXME only log occasionally
+                            ALOGV("overrun: time since last cycle %d.%03ld sec",
+                                    (int) sec, nsec / 1000000L);
+                            dumpState->mOverruns++;
+                        }
+                        // This forces a minimum cycle time. It:
+                        //  - compensates for an audio HAL with jitter due to sample rate conversion
+                        //  - works with a variable buffer depth audio HAL that never pulls at a
+                        //    rate < than overrunNs per buffer.
+                        //  - recovers from overrun immediately after underrun
+                        // It doesn't work with a non-blocking audio HAL.
+                        sleepNs = forceNs - nsec;
+                    } else {
+                        ignoreNextOverrun = false;
+                    }
+                }
+#ifdef FAST_MIXER_STATISTICS
+                if (isWarm) {
+                    // advance the FIFO queue bounds
+                    size_t i = bounds & (dumpState->mSamplingN - 1);
+                    bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF);
+                    if (full) {
+                        bounds += 0x10000;
+                    } else if (!(bounds & (dumpState->mSamplingN - 1))) {
+                        full = true;
+                    }
+                    // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
+                    uint32_t monotonicNs = nsec;
+                    if (sec > 0 && sec < 4) {
+                        monotonicNs += sec * 1000000000;
+                    }
+                    // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
+                    uint32_t loadNs = 0;
+                    struct timespec newLoad;
+                    rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
+                    if (rc == 0) {
+                        if (oldLoadValid) {
+                            sec = newLoad.tv_sec - oldLoad.tv_sec;
+                            nsec = newLoad.tv_nsec - oldLoad.tv_nsec;
+                            if (nsec < 0) {
+                                --sec;
+                                nsec += 1000000000;
+                            }
+                            loadNs = nsec;
+                            if (sec > 0 && sec < 4) {
+                                loadNs += sec * 1000000000;
+                            }
+                        } else {
+                            // first time through the loop
+                            oldLoadValid = true;
+                        }
+                        oldLoad = newLoad;
+                    }
+#ifdef CPU_FREQUENCY_STATISTICS
+                    // get the absolute value of CPU clock frequency in kHz
+                    int cpuNum = sched_getcpu();
+                    uint32_t kHz = tcu.getCpukHz(cpuNum);
+                    kHz = (kHz << 4) | (cpuNum & 0xF);
+#endif
+                    // save values in FIFO queues for dumpsys
+                    // these stores #1, #2, #3 are not atomic with respect to each other,
+                    // or with respect to store #4 below
+                    dumpState->mMonotonicNs[i] = monotonicNs;
+                    dumpState->mLoadNs[i] = loadNs;
+#ifdef CPU_FREQUENCY_STATISTICS
+                    dumpState->mCpukHz[i] = kHz;
+#endif
+                    // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
+                    // the newest open & oldest closed halves are atomic with respect to each other
+                    dumpState->mBounds = bounds;
+                    ATRACE_INT("cycle_ms", monotonicNs / 1000000);
+                    ATRACE_INT("load_us", loadNs / 1000);
+                }
+#endif
+            } else {
+                // first time through the loop
+                oldTsValid = true;
+                sleepNs = periodNs;
+                ignoreNextOverrun = true;
+            }
+            oldTs = newTs;
+        } else {
+            // monotonic clock is broken
+            oldTsValid = false;
+            sleepNs = periodNs;
+        }
+
+    }   // for (;;)
+
+    // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
+}
+
+}   // namespace android
diff --git a/services/audioflinger/FastThread.h b/services/audioflinger/FastThread.h
new file mode 100644
index 0000000..1330334
--- /dev/null
+++ b/services/audioflinger/FastThread.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_FAST_THREAD_H
+#define ANDROID_AUDIO_FAST_THREAD_H
+
+#include "Configuration.h"
+#ifdef CPU_FREQUENCY_STATISTICS
+#include <cpustats/ThreadCpuUsage.h>
+#endif
+#include <utils/Thread.h>
+#include "FastThreadState.h"
+
+namespace android {
+
+// FastThread is the common abstract base class of FastMixer and FastCapture
+class FastThread : public Thread {
+
+public:
+            FastThread();
+    virtual ~FastThread();
+
+private:
+    // implement Thread::threadLoop()
+    virtual bool threadLoop();
+
+protected:
+    // callouts to subclass in same lexical order as they were in original FastMixer.cpp
+    // FIXME need comments
+    virtual const FastThreadState *poll() = 0;
+    virtual void setLog(NBLog::Writer *logWriter __unused) { }
+    virtual void onIdle() = 0;
+    virtual void onExit() = 0;
+    virtual bool isSubClassCommand(FastThreadState::Command command) = 0;
+    virtual void onStateChange() = 0;
+    virtual void onWork() = 0;
+
+    // FIXME these former local variables need comments and to be renamed to have an "m" prefix
+    const FastThreadState *previous;
+    const FastThreadState *current;
+    struct timespec oldTs;
+    bool oldTsValid;
+    long sleepNs;   // -1: busy wait, 0: sched_yield, > 0: nanosleep
+    long periodNs;      // expected period; the time required to render one mix buffer
+    long underrunNs;    // underrun likely when write cycle is greater than this value
+    long overrunNs;     // overrun likely when write cycle is less than this value
+    long forceNs;       // if overrun detected, force the write cycle to take this much time
+    long warmupNs;      // warmup complete when write cycle is greater than to this value
+    FastThreadDumpState *mDummyDumpState;
+    FastThreadDumpState *dumpState;
+    bool ignoreNextOverrun;  // used to ignore initial overrun and first after an underrun
+#ifdef FAST_MIXER_STATISTICS
+    struct timespec oldLoad;    // previous value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
+    bool oldLoadValid;  // whether oldLoad is valid
+    uint32_t bounds;
+    bool full;          // whether we have collected at least mSamplingN samples
+#ifdef CPU_FREQUENCY_STATISTICS
+    ThreadCpuUsage tcu;     // for reading the current CPU clock frequency in kHz
+#endif
+#endif
+    unsigned coldGen;   // last observed mColdGen
+    bool isWarm;        // true means ready to mix, false means wait for warmup before mixing
+    struct timespec measuredWarmupTs;  // how long did it take for warmup to complete
+    uint32_t warmupCycles;  // counter of number of loop cycles required to warmup
+    NBLog::Writer dummyLogWriter;
+    NBLog::Writer *logWriter;
+    status_t timestampStatus;
+
+    FastThreadState::Command command;
+#if 0
+    size_t frameCount;
+#endif
+    bool attemptedWrite;
+
+};  // class FastThread
+
+}   // android
+
+#endif  // ANDROID_AUDIO_FAST_THREAD_H
diff --git a/services/audioflinger/FastThreadState.cpp b/services/audioflinger/FastThreadState.cpp
new file mode 100644
index 0000000..d4d6255
--- /dev/null
+++ b/services/audioflinger/FastThreadState.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Configuration.h"
+#include "FastThreadState.h"
+
+namespace android {
+
+FastThreadState::FastThreadState() :
+    mCommand(INITIAL), mColdFutexAddr(NULL), mColdGen(0), mDumpState(NULL), mNBLogWriter(NULL)
+
+{
+}
+
+FastThreadState::~FastThreadState()
+{
+}
+
+
+FastThreadDumpState::FastThreadDumpState() :
+    mCommand(FastThreadState::INITIAL), mUnderruns(0), mOverruns(0),
+    /* mMeasuredWarmupTs({0, 0}), */
+    mWarmupCycles(0)
+#ifdef FAST_MIXER_STATISTICS
+    , mSamplingN(0), mBounds(0)
+#endif
+{
+    mMeasuredWarmupTs.tv_sec = 0;
+    mMeasuredWarmupTs.tv_nsec = 0;
+}
+
+FastThreadDumpState::~FastThreadDumpState()
+{
+}
+
+}   // namespace android
diff --git a/services/audioflinger/FastThreadState.h b/services/audioflinger/FastThreadState.h
new file mode 100644
index 0000000..1ab8a0a
--- /dev/null
+++ b/services/audioflinger/FastThreadState.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_FAST_THREAD_STATE_H
+#define ANDROID_AUDIO_FAST_THREAD_STATE_H
+
+#include "Configuration.h"
+#include <stdint.h>
+#include <media/nbaio/NBLog.h>
+
+namespace android {
+
+struct FastThreadDumpState;
+
+// Represents a single state of a FastThread
+struct FastThreadState {
+                FastThreadState();
+    /*virtual*/ ~FastThreadState();
+
+    typedef uint32_t Command;
+    static const Command
+        INITIAL = 0,            // used only for the initial state
+        HOT_IDLE = 1,           // do nothing
+        COLD_IDLE = 2,          // wait for the futex
+        IDLE = 3,               // either HOT_IDLE or COLD_IDLE
+        EXIT = 4;               // exit from thread
+        // additional values defined per subclass
+    Command     mCommand;       // current command
+    int32_t*    mColdFutexAddr; // for COLD_IDLE only, pointer to the associated futex
+    unsigned    mColdGen;       // increment when COLD_IDLE is requested so it's only performed once
+
+    // This might be a one-time configuration rather than per-state
+    FastThreadDumpState* mDumpState; // if non-NULL, then update dump state periodically
+    NBLog::Writer* mNBLogWriter; // non-blocking logger
+
+};  // struct FastThreadState
+
+
+// FIXME extract common part of comment at FastMixerDumpState
+struct FastThreadDumpState {
+    FastThreadDumpState();
+    /*virtual*/ ~FastThreadDumpState();
+
+    FastThreadState::Command mCommand;   // current command
+    uint32_t mUnderruns;        // total number of underruns
+    uint32_t mOverruns;         // total number of overruns
+    struct timespec mMeasuredWarmupTs;  // measured warmup time
+    uint32_t mWarmupCycles;     // number of loop cycles required to warmup
+
+#ifdef FAST_MIXER_STATISTICS
+    // Recently collected samples of per-cycle monotonic time, thread CPU time, and CPU frequency.
+    // kSamplingN is max size of sampling frame (statistics), and must be a power of 2 <= 0x8000.
+    // The sample arrays are virtually allocated based on this compile-time constant,
+    // but are only initialized and used based on the runtime parameter mSamplingN.
+    static const uint32_t kSamplingN = 0x8000;
+    // Corresponding runtime maximum size of sample arrays, must be a power of 2 <= kSamplingN.
+    uint32_t mSamplingN;
+    // The bounds define the interval of valid samples, and are represented as follows:
+    //      newest open (excluded) endpoint   = lower 16 bits of bounds, modulo N
+    //      oldest closed (included) endpoint = upper 16 bits of bounds, modulo N
+    // Number of valid samples is newest - oldest.
+    uint32_t mBounds;                   // bounds for mMonotonicNs, mThreadCpuNs, and mCpukHz
+    // The elements in the *Ns arrays are in units of nanoseconds <= 3999999999.
+    uint32_t mMonotonicNs[kSamplingN];  // delta monotonic (wall clock) time
+    uint32_t mLoadNs[kSamplingN];       // delta CPU load in time
+#ifdef CPU_FREQUENCY_STATISTICS
+    uint32_t mCpukHz[kSamplingN];       // absolute CPU clock frequency in kHz, bits 0-3 are CPU#
+#endif
+#endif
+
+};  // struct FastThreadDumpState
+
+}   // android
+
+#endif  // ANDROID_AUDIO_FAST_THREAD_STATE_H
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 43b77f3..e9c6834 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -34,9 +34,10 @@
                                 int uid,
                                 IAudioFlinger::track_flags_t flags);
     virtual             ~Track();
+    virtual status_t    initCheck() const;
 
     static  void        appendDumpHeader(String8& result);
-            void        dump(char* buffer, size_t size);
+            void        dump(char* buffer, size_t size, bool active);
     virtual status_t    start(AudioSystem::sync_event_t event =
                                     AudioSystem::SYNC_EVENT_NONE,
                              int triggerSession = 0);
@@ -93,6 +94,10 @@
     bool isReady() const;
     void setPaused() { mState = PAUSED; }
     void reset();
+    bool isFlushPending() const { return mFlushHwPending; }
+    void flushAck();
+    bool isResumePending();
+    void resumeAck();
 
     bool isOutputTrack() const {
         return (mStreamType == AUDIO_STREAM_CNT);
@@ -154,6 +159,7 @@
     bool                mIsInvalid; // non-resettable latch, set by invalidate()
     AudioTrackServerProxy*  mAudioTrackServerProxy;
     bool                mResumeToStopping; // track was paused in stopping state.
+    bool                mFlushHwPending; // track requests for thread flush
 };  // end of Track
 
 class TimedTrack : public Track {
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 57de568..6fc06d8 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -45,7 +45,10 @@
                                                 return tmp; }
 
     static  void        appendDumpHeader(String8& result);
-            void        dump(char* buffer, size_t size);
+            void        dump(char* buffer, size_t size, bool active);
+
+            void        handleSyncStartEvent(const sp<SyncEvent>& event);
+            void        clearSyncStartEvent();
 
 private:
     friend class AudioFlinger;  // for mState
@@ -59,5 +62,33 @@
     // releaseBuffer() not overridden
 
     bool                mOverflow;  // overflow on most recent attempt to fill client buffer
-    AudioRecordServerProxy* mAudioRecordServerProxy;
+
+           // updated by RecordThread::readInputParameters_l()
+            AudioResampler                      *mResampler;
+
+            // interleaved stereo pairs of fixed-point Q4.27
+            int32_t                             *mRsmpOutBuffer;
+            // current allocated frame count for the above, which may be larger than needed
+            size_t                              mRsmpOutFrameCount;
+
+            size_t                              mRsmpInUnrel;   // unreleased frames remaining from
+                                                                // most recent getNextBuffer
+                                                                // for debug only
+
+            // rolling counter that is never cleared
+            int32_t                             mRsmpInFront;   // next available frame
+
+            AudioBufferProvider::Buffer mSink;  // references client's buffer sink in shared memory
+
+            // sync event triggering actual audio capture. Frames read before this event will
+            // be dropped and therefore not read by the application.
+            sp<SyncEvent>                       mSyncStartEvent;
+
+            // number of captured frames to drop after the start sync event has been received.
+            // when < 0, maximum frames to drop before starting capture even if sync event is
+            // not received
+            ssize_t                             mFramesToDrop;
+
+            // used by resampler to find source frames
+            ResamplerBufferProvider *mResamplerBufferProvider;
 };
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index cac785a..ae3dd8b 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -34,6 +34,7 @@
 #include <audio_effects/effect_ns.h>
 #include <audio_effects/effect_aec.h>
 #include <audio_utils/primitives.h>
+#include <audio_utils/format.h>
 
 // NBAIO implementations
 #include <media/nbaio/AudioStreamOutSink.h>
@@ -104,10 +105,10 @@
 // maximum divider applied to the active sleep time in the mixer thread loop
 static const uint32_t kMaxThreadSleepTimeShift = 2;
 
-// minimum normal mix buffer size, expressed in milliseconds rather than frames
-static const uint32_t kMinNormalMixBufferSizeMs = 20;
-// maximum normal mix buffer size
-static const uint32_t kMaxNormalMixBufferSizeMs = 24;
+// minimum normal sink buffer size, expressed in milliseconds rather than frames
+static const uint32_t kMinNormalSinkBufferSizeMs = 20;
+// maximum normal sink buffer size
+static const uint32_t kMaxNormalSinkBufferSizeMs = 24;
 
 // Offloaded output thread standby delay: allows track transition without going to standby
 static const nsecs_t kOffloadStandbyDelayNs = seconds(1);
@@ -185,7 +186,11 @@
 {
 }
 
-void CpuStats::sample(const String8 &title) {
+void CpuStats::sample(const String8 &title
+#ifndef DEBUG_CPU_USAGE
+                __unused
+#endif
+        ) {
 #ifdef DEBUG_CPU_USAGE
     // get current thread's delta CPU time in wall clock ns
     double wcNs;
@@ -269,8 +274,9 @@
     :   Thread(false /*canCallJava*/),
         mType(type),
         mAudioFlinger(audioFlinger),
-        // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, and mFormat are
-        // set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters()
+        // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize
+        // are set by PlaybackThread::readOutputParameters_l() or
+        // RecordThread::readInputParameters_l()
         mParamStatus(NO_ERROR),
         //FIXME: mStandby should be true here. Is this some kind of hack?
         mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
@@ -297,6 +303,17 @@
     }
 }
 
+status_t AudioFlinger::ThreadBase::readyToRun()
+{
+    status_t status = initCheck();
+    if (status == NO_ERROR) {
+        ALOGI("AudioFlinger's thread %p ready to run", this);
+    } else {
+        ALOGE("No working audio driver found.");
+    }
+    return status;
+}
+
 void AudioFlinger::ThreadBase::exit()
 {
     ALOGV("ThreadBase::exit");
@@ -369,7 +386,13 @@
 
 void AudioFlinger::ThreadBase::processConfigEvents()
 {
-    mLock.lock();
+    Mutex::Autolock _l(mLock);
+    processConfigEvents_l();
+}
+
+// post condition: mConfigEvents.isEmpty()
+void AudioFlinger::ThreadBase::processConfigEvents_l()
+{
     while (!mConfigEvents.isEmpty()) {
         ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size());
         ConfigEvent *event = mConfigEvents[0];
@@ -377,35 +400,81 @@
         // release mLock before locking AudioFlinger mLock: lock order is always
         // AudioFlinger then ThreadBase to avoid cross deadlock
         mLock.unlock();
-        switch(event->type()) {
-            case CFG_EVENT_PRIO: {
-                PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event);
-                // FIXME Need to understand why this has be done asynchronously
-                int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(),
-                        true /*asynchronous*/);
-                if (err != 0) {
-                    ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; "
-                          "error %d",
-                          prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err);
-                }
-            } break;
-            case CFG_EVENT_IO: {
-                IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event);
-                mAudioFlinger->mLock.lock();
+        switch (event->type()) {
+        case CFG_EVENT_PRIO: {
+            PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event);
+            // FIXME Need to understand why this has be done asynchronously
+            int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(),
+                    true /*asynchronous*/);
+            if (err != 0) {
+                ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
+                      prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err);
+            }
+        } break;
+        case CFG_EVENT_IO: {
+            IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event);
+            {
+                Mutex::Autolock _l(mAudioFlinger->mLock);
                 audioConfigChanged_l(ioEvent->event(), ioEvent->param());
-                mAudioFlinger->mLock.unlock();
-            } break;
-            default:
-                ALOGE("processConfigEvents() unknown event type %d", event->type());
-                break;
+            }
+        } break;
+        default:
+            ALOGE("processConfigEvents() unknown event type %d", event->type());
+            break;
         }
         delete event;
         mLock.lock();
     }
-    mLock.unlock();
 }
 
-void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args)
+String8 channelMaskToString(audio_channel_mask_t mask, bool output) {
+    String8 s;
+    if (output) {
+        if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
+        if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
+        if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
+        if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
+        if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
+        if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
+        if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
+        if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
+        if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
+        if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
+        if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
+        if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
+        if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
+        if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
+        if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
+        if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
+        if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
+        if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
+        if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown,  ");
+    } else {
+        if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
+        if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
+        if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
+        if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
+        if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
+        if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
+        if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
+        if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
+        if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
+        if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
+        if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
+        if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
+        if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
+        if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
+        if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown,  ");
+    }
+    int len = s.length();
+    if (s.length() > 2) {
+        char *str = s.lockBuffer(len);
+        s.unlockBuffer(len - 2);
+    }
+    return s;
+}
+
+void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __unused)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
@@ -413,47 +482,43 @@
 
     bool locked = AudioFlinger::dumpTryLock(mLock);
     if (!locked) {
-        snprintf(buffer, SIZE, "thread %p maybe dead locked\n", this);
-        write(fd, buffer, strlen(buffer));
+        fdprintf(fd, "thread %p maybe dead locked\n", this);
     }
 
-    snprintf(buffer, SIZE, "io handle: %d\n", mId);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "TID: %d\n", getTid());
-    result.append(buffer);
-    snprintf(buffer, SIZE, "standby: %d\n", mStandby);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "Sample rate: %u\n", mSampleRate);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "HAL frame count: %zu\n", mFrameCount);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "Channel Count: %u\n", mChannelCount);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "Format: %d\n", mFormat);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "Frame size: %zu\n", mFrameSize);
-    result.append(buffer);
-
-    snprintf(buffer, SIZE, "\nPending setParameters commands: \n");
-    result.append(buffer);
-    result.append(" Index Command");
-    for (size_t i = 0; i < mNewParameters.size(); ++i) {
-        snprintf(buffer, SIZE, "\n %02zu    ", i);
-        result.append(buffer);
-        result.append(mNewParameters[i]);
+    fdprintf(fd, "  I/O handle: %d\n", mId);
+    fdprintf(fd, "  TID: %d\n", getTid());
+    fdprintf(fd, "  Standby: %s\n", mStandby ? "yes" : "no");
+    fdprintf(fd, "  Sample rate: %u\n", mSampleRate);
+    fdprintf(fd, "  HAL frame count: %zu\n", mFrameCount);
+    fdprintf(fd, "  HAL buffer size: %u bytes\n", mBufferSize);
+    fdprintf(fd, "  Channel Count: %u\n", mChannelCount);
+    fdprintf(fd, "  Channel Mask: 0x%08x (%s)\n", mChannelMask,
+            channelMaskToString(mChannelMask, mType != RECORD).string());
+    fdprintf(fd, "  Format: 0x%x (%s)\n", mFormat, formatToString(mFormat));
+    fdprintf(fd, "  Frame size: %zu\n", mFrameSize);
+    fdprintf(fd, "  Pending setParameters commands:");
+    size_t numParams = mNewParameters.size();
+    if (numParams) {
+        fdprintf(fd, "\n   Index Command");
+        for (size_t i = 0; i < numParams; ++i) {
+            fdprintf(fd, "\n   %02zu    ", i);
+            fdprintf(fd, mNewParameters[i]);
+        }
+        fdprintf(fd, "\n");
+    } else {
+        fdprintf(fd, " none\n");
     }
-
-    snprintf(buffer, SIZE, "\n\nPending config events: \n");
-    result.append(buffer);
-    for (size_t i = 0; i < mConfigEvents.size(); i++) {
-        mConfigEvents[i]->dump(buffer, SIZE);
-        result.append(buffer);
+    fdprintf(fd, "  Pending config events:");
+    size_t numConfig = mConfigEvents.size();
+    if (numConfig) {
+        for (size_t i = 0; i < numConfig; i++) {
+            mConfigEvents[i]->dump(buffer, SIZE);
+            fdprintf(fd, "\n    %s", buffer);
+        }
+        fdprintf(fd, "\n");
+    } else {
+        fdprintf(fd, " none\n");
     }
-    result.append("\n");
-
-    write(fd, result.string(), result.size());
 
     if (locked) {
         mLock.unlock();
@@ -466,10 +531,11 @@
     char buffer[SIZE];
     String8 result;
 
-    snprintf(buffer, SIZE, "\n- %zu Effect Chains:\n", mEffectChains.size());
+    size_t numEffectChains = mEffectChains.size();
+    snprintf(buffer, SIZE, "  %zu Effect Chains\n", numEffectChains);
     write(fd, buffer, strlen(buffer));
 
-    for (size_t i = 0; i < mEffectChains.size(); ++i) {
+    for (size_t i = 0; i < numEffectChains; ++i) {
         sp<EffectChain> chain = mEffectChains[i];
         if (chain != 0) {
             chain->dump(fd, args);
@@ -586,7 +652,7 @@
     mPowerManager.clear();
 }
 
-void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who)
+void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who __unused)
 {
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
@@ -739,8 +805,7 @@
         int sessionId,
         effect_descriptor_t *desc,
         int *enabled,
-        status_t *status
-        )
+        status_t *status)
 {
     sp<EffectModule> effect;
     sp<EffectHandle> handle;
@@ -756,6 +821,15 @@
         goto Exit;
     }
 
+    // Reject any effect on Direct output threads for now, since the format of
+    // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
+    if (mType == DIRECT) {
+        ALOGW("createEffect_l() Cannot add effect %s on Direct output type thread %s",
+                desc->name, mName);
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
     // Allow global effects only on offloaded and mixer threads
     if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
         switch (mType) {
@@ -829,7 +903,10 @@
         }
         // create effect handle and connect it to effect module
         handle = new EffectHandle(effect, client, effectClient, priority);
-        lStatus = effect->addHandle(handle.get());
+        lStatus = handle->initCheck();
+        if (lStatus == OK) {
+            lStatus = effect->addHandle(handle.get());
+        }
         if (enabled != NULL) {
             *enabled = (int)effect->isEnabled();
         }
@@ -850,9 +927,7 @@
         handle.clear();
     }
 
-    if (status != NULL) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return handle;
 }
 
@@ -1001,8 +1076,18 @@
                                              audio_devices_t device,
                                              type_t type)
     :   ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
-        mNormalFrameCount(0), mMixBuffer(NULL),
-        mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
+        mNormalFrameCount(0), mSinkBuffer(NULL),
+        mMixerBufferEnabled(false),
+        mMixerBuffer(NULL),
+        mMixerBufferSize(0),
+        mMixerBufferFormat(AUDIO_FORMAT_INVALID),
+        mMixerBufferValid(false),
+        mEffectBufferEnabled(false),
+        mEffectBuffer(NULL),
+        mEffectBufferSize(0),
+        mEffectBufferFormat(AUDIO_FORMAT_INVALID),
+        mEffectBufferValid(false),
+        mSuspended(0), mBytesWritten(0),
         mActiveTracksGeneration(0),
         // mStreamTypes[] initialized in constructor body
         mOutput(output),
@@ -1044,11 +1129,11 @@
         }
     }
 
-    readOutputParameters();
+    readOutputParameters_l();
 
     // mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor
     // There is no AUDIO_STREAM_MIN, and ++ operator does not compile
-    for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT;
+    for (audio_stream_type_t stream = AUDIO_STREAM_MIN; stream < AUDIO_STREAM_CNT;
             stream = (audio_stream_type_t) (stream + 1)) {
         mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
         mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
@@ -1060,7 +1145,9 @@
 AudioFlinger::PlaybackThread::~PlaybackThread()
 {
     mAudioFlinger->unregisterWriter(mNBLogWriter);
-    delete [] mAllocMixBuffer;
+    free(mSinkBuffer);
+    free(mMixerBuffer);
+    free(mEffectBuffer);
 }
 
 void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
@@ -1070,13 +1157,13 @@
     dumpEffectChains(fd, args);
 }
 
-void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args)
+void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args __unused)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
     String8 result;
 
-    result.appendFormat("Output thread %p stream volumes in dB:\n    ", this);
+    result.appendFormat("  Stream volumes in dB: ");
     for (int i = 0; i < AUDIO_STREAM_CNT; ++i) {
         const stream_type_t *st = &mStreamTypes[i];
         if (i > 0) {
@@ -1091,75 +1178,69 @@
     write(fd, result.string(), result.length());
     result.clear();
 
-    snprintf(buffer, SIZE, "Output thread %p tracks\n", this);
-    result.append(buffer);
-    Track::appendDumpHeader(result);
-    for (size_t i = 0; i < mTracks.size(); ++i) {
-        sp<Track> track = mTracks[i];
-        if (track != 0) {
-            track->dump(buffer, SIZE);
-            result.append(buffer);
-        }
-    }
-
-    snprintf(buffer, SIZE, "Output thread %p active tracks\n", this);
-    result.append(buffer);
-    Track::appendDumpHeader(result);
-    for (size_t i = 0; i < mActiveTracks.size(); ++i) {
-        sp<Track> track = mActiveTracks[i].promote();
-        if (track != 0) {
-            track->dump(buffer, SIZE);
-            result.append(buffer);
-        }
-    }
-    write(fd, result.string(), result.size());
-
     // These values are "raw"; they will wrap around.  See prepareTracks_l() for a better way.
     FastTrackUnderruns underruns = getFastTrackUnderruns(0);
-    fdprintf(fd, "Normal mixer raw underrun counters: partial=%u empty=%u\n",
+    fdprintf(fd, "  Normal mixer raw underrun counters: partial=%u empty=%u\n",
             underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty);
+
+    size_t numtracks = mTracks.size();
+    size_t numactive = mActiveTracks.size();
+    fdprintf(fd, "  %d Tracks", numtracks);
+    size_t numactiveseen = 0;
+    if (numtracks) {
+        fdprintf(fd, " of which %d are active\n", numactive);
+        Track::appendDumpHeader(result);
+        for (size_t i = 0; i < numtracks; ++i) {
+            sp<Track> track = mTracks[i];
+            if (track != 0) {
+                bool active = mActiveTracks.indexOf(track) >= 0;
+                if (active) {
+                    numactiveseen++;
+                }
+                track->dump(buffer, SIZE, active);
+                result.append(buffer);
+            }
+        }
+    } else {
+        result.append("\n");
+    }
+    if (numactiveseen != numactive) {
+        // some tracks in the active list were not in the tracks list
+        snprintf(buffer, SIZE, "  The following tracks are in the active list but"
+                " not in the track list\n");
+        result.append(buffer);
+        Track::appendDumpHeader(result);
+        for (size_t i = 0; i < numactive; ++i) {
+            sp<Track> track = mActiveTracks[i].promote();
+            if (track != 0 && mTracks.indexOf(track) < 0) {
+                track->dump(buffer, SIZE, true);
+                result.append(buffer);
+            }
+        }
+    }
+
+    write(fd, result.string(), result.size());
+
 }
 
 void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
 {
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "Normal frame count: %zu\n", mNormalFrameCount);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n",
-            ns2ms(systemTime() - mLastWriteTime));
-    result.append(buffer);
-    snprintf(buffer, SIZE, "total writes: %d\n", mNumWrites);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "delayed writes: %d\n", mNumDelayedWrites);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "blocked in write: %d\n", mInWrite);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "suspend count: %d\n", mSuspended);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "mix buffer : %p\n", mMixBuffer);
-    result.append(buffer);
-    write(fd, result.string(), result.size());
-    fdprintf(fd, "Fast track availMask=%#x\n", mFastTrackAvailMask);
+    fdprintf(fd, "\nOutput thread %p:\n", this);
+    fdprintf(fd, "  Normal frame count: %zu\n", mNormalFrameCount);
+    fdprintf(fd, "  Last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime));
+    fdprintf(fd, "  Total writes: %d\n", mNumWrites);
+    fdprintf(fd, "  Delayed writes: %d\n", mNumDelayedWrites);
+    fdprintf(fd, "  Blocked in write: %s\n", mInWrite ? "yes" : "no");
+    fdprintf(fd, "  Suspend count: %d\n", mSuspended);
+    fdprintf(fd, "  Sink buffer : %p\n", mSinkBuffer);
+    fdprintf(fd, "  Mixer buffer: %p\n", mMixerBuffer);
+    fdprintf(fd, "  Effect buffer: %p\n", mEffectBuffer);
+    fdprintf(fd, "  Fast track availMask=%#x\n", mFastTrackAvailMask);
 
     dumpBase(fd, args);
 }
 
 // Thread virtuals
-status_t AudioFlinger::PlaybackThread::readyToRun()
-{
-    status_t status = initCheck();
-    if (status == NO_ERROR) {
-        ALOGI("AudioFlinger's thread %p ready to run", this);
-    } else {
-        ALOGE("No working audio driver found.");
-    }
-    return status;
-}
 
 void AudioFlinger::PlaybackThread::onFirstRef()
 {
@@ -1182,7 +1263,7 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
-        size_t frameCount,
+        size_t *pFrameCount,
         const sp<IMemory>& sharedBuffer,
         int sessionId,
         IAudioFlinger::track_flags_t *flags,
@@ -1190,6 +1271,7 @@
         int uid,
         status_t *status)
 {
+    size_t frameCount = *pFrameCount;
     sp<Track> track;
     status_t lStatus;
 
@@ -1256,29 +1338,36 @@
         }
       }
     }
+    *pFrameCount = frameCount;
 
-    if (mType == DIRECT) {
+    switch (mType) {
+
+    case DIRECT:
         if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) {
             if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
-                ALOGE("createTrack_l() Bad parameter: sampleRate %u format %d, channelMask 0x%08x "
-                        "for output %p with format %d",
+                ALOGE("createTrack_l() Bad parameter: sampleRate %u format %#x, channelMask 0x%08x "
+                        "for output %p with format %#x",
                         sampleRate, format, channelMask, mOutput, mFormat);
                 lStatus = BAD_VALUE;
                 goto Exit;
             }
         }
-    } else if (mType == OFFLOAD) {
+        break;
+
+    case OFFLOAD:
         if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
-            ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \""
-                    "for output %p with format %d",
+            ALOGE("createTrack_l() Bad parameter: sampleRate %d format %#x, channelMask 0x%08x \""
+                    "for output %p with format %#x",
                     sampleRate, format, channelMask, mOutput, mFormat);
             lStatus = BAD_VALUE;
             goto Exit;
         }
-    } else {
+        break;
+
+    default:
         if ((format & AUDIO_FORMAT_MAIN_MASK) != AUDIO_FORMAT_PCM) {
-                ALOGE("createTrack_l() Bad parameter: format %d \""
-                        "for output %p with format %d",
+                ALOGE("createTrack_l() Bad parameter: format %#x \""
+                        "for output %p with format %#x",
                         format, mOutput, mFormat);
                 lStatus = BAD_VALUE;
                 goto Exit;
@@ -1289,11 +1378,13 @@
             lStatus = BAD_VALUE;
             goto Exit;
         }
+        break;
+
     }
 
     lStatus = initCheck();
     if (lStatus != NO_ERROR) {
-        ALOGE("Audio driver not initialized.");
+        ALOGE("createTrack_l() audio driver not initialized");
         goto Exit;
     }
 
@@ -1325,12 +1416,14 @@
                     channelMask, frameCount, sharedBuffer, sessionId, uid);
         }
 
-        if (track == 0 || track->getCblk() == NULL || track->name() < 0) {
-            lStatus = NO_MEMORY;
+        // new Track always returns non-NULL,
+        // but TimedTrack::create() is a factory that could fail by returning NULL
+        lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
+        if (lStatus != NO_ERROR) {
+            ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus);
             // track must be cleared from the caller as the caller has the AF lock
             goto Exit;
         }
-
         mTracks.add(track);
 
         sp<EffectChain> chain = getEffectChain_l(sessionId);
@@ -1352,9 +1445,7 @@
     lStatus = NO_ERROR;
 
 Exit:
-    if (status) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return track;
 }
 
@@ -1473,9 +1564,7 @@
         status = NO_ERROR;
     }
 
-    ALOGV("signal playback thread");
-    broadcast_l();
-
+    onAddNewTrack_l();
     return status;
 }
 
@@ -1601,7 +1690,7 @@
 
 // static
 int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event,
-                                                void *param,
+                                                void *param __unused,
                                                 void *cookie)
 {
     AudioFlinger::PlaybackThread *me = (AudioFlinger::PlaybackThread *)cookie;
@@ -1620,29 +1709,30 @@
     return 0;
 }
 
-void AudioFlinger::PlaybackThread::readOutputParameters()
+void AudioFlinger::PlaybackThread::readOutputParameters_l()
 {
-    // unfortunately we have no way of recovering from errors here, hence the LOG_FATAL
+    // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
     mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
     mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
     if (!audio_is_output_channel(mChannelMask)) {
-        LOG_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
+        LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
     }
     if ((mType == MIXER || mType == DUPLICATING) && mChannelMask != AUDIO_CHANNEL_OUT_STEREO) {
-        LOG_FATAL("HAL channel mask %#x not supported for mixed output; "
+        LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output; "
                 "must be AUDIO_CHANNEL_OUT_STEREO", mChannelMask);
     }
     mChannelCount = popcount(mChannelMask);
     mFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
     if (!audio_is_valid_format(mFormat)) {
-        LOG_FATAL("HAL format %d not valid for output", mFormat);
+        LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
     }
     if ((mType == MIXER || mType == DUPLICATING) && mFormat != AUDIO_FORMAT_PCM_16_BIT) {
-        LOG_FATAL("HAL format %d not supported for mixed output; must be AUDIO_FORMAT_PCM_16_BIT",
-                mFormat);
+        LOG_ALWAYS_FATAL("HAL format %#x not supported for mixed output; "
+                "must be AUDIO_FORMAT_PCM_16_BIT", mFormat);
     }
     mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
-    mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
+    mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
+    mFrameCount = mBufferSize / mFrameSize;
     if (mFrameCount & 15) {
         ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames",
                 mFrameCount);
@@ -1657,12 +1747,12 @@
         }
     }
 
-    // Calculate size of normal mix buffer relative to the HAL output buffer size
+    // Calculate size of normal sink buffer relative to the HAL output buffer size
     double multiplier = 1.0;
     if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
             kUseFastMixer == FastMixer_Dynamic)) {
-        size_t minNormalFrameCount = (kMinNormalMixBufferSizeMs * mSampleRate) / 1000;
-        size_t maxNormalFrameCount = (kMaxNormalMixBufferSizeMs * mSampleRate) / 1000;
+        size_t minNormalFrameCount = (kMinNormalSinkBufferSizeMs * mSampleRate) / 1000;
+        size_t maxNormalFrameCount = (kMaxNormalSinkBufferSizeMs * mSampleRate) / 1000;
         // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer
         minNormalFrameCount = (minNormalFrameCount + 15) & ~15;
         maxNormalFrameCount = maxNormalFrameCount & ~15;
@@ -1680,7 +1770,7 @@
             }
         } else {
             // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL
-            // SRC (it would be unusual for the normal mix buffer size to not be a multiple of fast
+            // SRC (it would be unusual for the normal sink buffer size to not be a multiple of fast
             // track, but we sometimes have to do this to satisfy the maximum frame count
             // constraint)
             // FIXME this rounding up should not be done if no HAL SRC
@@ -1696,18 +1786,40 @@
     mNormalFrameCount = multiplier * mFrameCount;
     // round up to nearest 16 frames to satisfy AudioMixer
     mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
-    ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount,
+    ALOGI("HAL output buffer size %u frames, normal sink buffer size %u frames", mFrameCount,
             mNormalFrameCount);
 
-    delete[] mAllocMixBuffer;
-    size_t align = (mFrameSize < sizeof(int16_t)) ? sizeof(int16_t) : mFrameSize;
-    mAllocMixBuffer = new int8_t[mNormalFrameCount * mFrameSize + align - 1];
-    mMixBuffer = (int16_t *) ((((size_t)mAllocMixBuffer + align - 1) / align) * align);
-    memset(mMixBuffer, 0, mNormalFrameCount * mFrameSize);
+    // mSinkBuffer is the sink buffer.  Size is always multiple-of-16 frames.
+    // Originally this was int16_t[] array, need to remove legacy implications.
+    free(mSinkBuffer);
+    mSinkBuffer = NULL;
+    // For sink buffer size, we use the frame size from the downstream sink to avoid problems
+    // with non PCM formats for compressed music, e.g. AAC, and Offload threads.
+    const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
+    (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
+
+    // We resize the mMixerBuffer according to the requirements of the sink buffer which
+    // drives the output.
+    free(mMixerBuffer);
+    mMixerBuffer = NULL;
+    if (mMixerBufferEnabled) {
+        mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // also valid: AUDIO_FORMAT_PCM_16_BIT.
+        mMixerBufferSize = mNormalFrameCount * mChannelCount
+                * audio_bytes_per_sample(mMixerBufferFormat);
+        (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
+    }
+    free(mEffectBuffer);
+    mEffectBuffer = NULL;
+    if (mEffectBufferEnabled) {
+        mEffectBufferFormat = AUDIO_FORMAT_PCM_16_BIT; // Note: Effects support 16b only
+        mEffectBufferSize = mNormalFrameCount * mChannelCount
+                * audio_bytes_per_sample(mEffectBufferFormat);
+        (void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
+    }
 
     // force reconfiguration of effect chains and engines to take new buffer size and audio
     // parameters into account
-    // Note that mLock is not held when readOutputParameters() is called from the constructor
+    // Note that mLock is not held when readOutputParameters_l() is called from the constructor
     // but in this case nothing is done below as no audio sessions have effect yet so it doesn't
     // matter.
     // create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains
@@ -1841,7 +1953,7 @@
         const Vector< sp<Track> >& tracksToRemove)
 {
     size_t count = tracksToRemove.size();
-    if (count) {
+    if (count > 0) {
         for (size_t i = 0 ; i < count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
             if (!track->isOutputTrack()) {
@@ -1882,12 +1994,12 @@
     mLastWriteTime = systemTime();
     mInWrite = true;
     ssize_t bytesWritten;
+    const size_t offset = mCurrentWriteLength - mBytesRemaining;
 
     // If an NBAIO sink is present, use it to write the normal mixer's submix
     if (mNormalSink != 0) {
-#define mBitShift 2 // FIXME
-        size_t count = mBytesRemaining >> mBitShift;
-        size_t offset = (mCurrentWriteLength - mBytesRemaining) >> 1;
+        const size_t count = mBytesRemaining / mFrameSize;
+
         ATRACE_BEGIN("write");
         // update the setpoint when AudioFlinger::mScreenState changes
         uint32_t screenState = AudioFlinger::mScreenState;
@@ -1899,10 +2011,10 @@
                         (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
             }
         }
-        ssize_t framesWritten = mNormalSink->write(mMixBuffer + offset, count);
+        ssize_t framesWritten = mNormalSink->write((char *)mSinkBuffer + offset, count);
         ATRACE_END();
         if (framesWritten > 0) {
-            bytesWritten = framesWritten << mBitShift;
+            bytesWritten = framesWritten * mFrameSize;
         } else {
             bytesWritten = framesWritten;
         }
@@ -1917,7 +2029,7 @@
     // otherwise use the HAL / AudioStreamOut directly
     } else {
         // Direct output and offload threads
-        size_t offset = (mCurrentWriteLength - mBytesRemaining);
+
         if (mUseAsyncWrite) {
             ALOGW_IF(mWriteAckSequence & 1, "threadLoop_write(): out of sequence write request");
             mWriteAckSequence += 2;
@@ -1928,7 +2040,7 @@
         // FIXME We should have an implementation of timestamps for direct output threads.
         // They are used e.g for multichannel PCM playback over HDMI.
         bytesWritten = mOutput->stream->write(mOutput->stream,
-                                                   (char *)mMixBuffer + offset, mBytesRemaining);
+                                                   (char *)mSinkBuffer + offset, mBytesRemaining);
         if (mUseAsyncWrite &&
                 ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
             // do not wait for async callback in case of error of full write
@@ -1967,7 +2079,7 @@
 
 /*
 The derived values that are cached:
- - mixBufferSize from frame count * frame size
+ - mSinkBufferSize from frame count * frame size
  - activeSleepTime from activeSleepTimeUs()
  - idleSleepTime from idleSleepTimeUs()
  - standbyDelay from mActiveSleepTimeUs (DIRECT only)
@@ -1986,7 +2098,7 @@
 
 void AudioFlinger::PlaybackThread::cacheParameters_l()
 {
-    mixBufferSize = mNormalFrameCount * mFrameSize;
+    mSinkBufferSize = mNormalFrameCount * mFrameSize;
     activeSleepTime = activeSleepTimeUs();
     idleSleepTime = idleSleepTimeUs();
 }
@@ -2009,13 +2121,14 @@
 status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
 {
     int session = chain->sessionId();
-    int16_t *buffer = mMixBuffer;
+    int16_t* buffer = reinterpret_cast<int16_t*>(mEffectBufferEnabled
+            ? mEffectBuffer : mSinkBuffer);
     bool ownsBuffer = false;
 
     ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
     if (session > 0) {
         // Only one effect chain can be present in direct output thread and it uses
-        // the mix buffer as input
+        // the sink buffer as input
         if (mType != DIRECT) {
             size_t numSamples = mNormalFrameCount * mChannelCount;
             buffer = new int16_t[numSamples];
@@ -2049,7 +2162,8 @@
     }
 
     chain->setInBuffer(buffer, ownsBuffer);
-    chain->setOutBuffer(mMixBuffer);
+    chain->setOutBuffer(reinterpret_cast<int16_t*>(mEffectBufferEnabled
+            ? mEffectBuffer : mSinkBuffer));
     // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect
     // chains list in order to be processed last as it contains output stage effects
     // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
@@ -2099,7 +2213,7 @@
             for (size_t i = 0; i < mTracks.size(); ++i) {
                 sp<Track> track = mTracks[i];
                 if (session == track->sessionId()) {
-                    track->setMainBuffer(mMixBuffer);
+                    track->setMainBuffer(reinterpret_cast<int16_t*>(mSinkBuffer));
                     chain->decTrackCnt();
                 }
             }
@@ -2302,14 +2416,32 @@
                 // must be written to HAL
                 threadLoop_sleepTime();
                 if (sleepTime == 0) {
-                    mCurrentWriteLength = mixBufferSize;
+                    mCurrentWriteLength = mSinkBufferSize;
                 }
             }
+            // Either threadLoop_mix() or threadLoop_sleepTime() should have set
+            // mMixerBuffer with data if mMixerBufferValid is true and sleepTime == 0.
+            // Merge mMixerBuffer data into mEffectBuffer (if any effects are valid)
+            // or mSinkBuffer (if there are no effects).
+            //
+            // This is done pre-effects computation; if effects change to
+            // support higher precision, this needs to move.
+            //
+            // mMixerBufferValid is only set true by MixerThread::prepareTracks_l().
+            // TODO use sleepTime == 0 as an additional condition.
+            if (mMixerBufferValid) {
+                void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
+                audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
+
+                memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
+                        mNormalFrameCount * mChannelCount);
+            }
+
             mBytesRemaining = mCurrentWriteLength;
             if (isSuspended()) {
                 sleepTime = suspendSleepTimeUs();
                 // simulate write to HAL when suspended
-                mBytesWritten += mixBufferSize;
+                mBytesWritten += mSinkBufferSize;
                 mBytesRemaining = 0;
             }
 
@@ -2330,6 +2462,16 @@
             }
         }
 
+        // Only if the Effects buffer is enabled and there is data in the
+        // Effects buffer (buffer valid), we need to
+        // copy into the sink buffer.
+        // TODO use sleepTime == 0 as an additional condition.
+        if (mEffectBufferValid) {
+            //ALOGV("writing effect buffer to sink buffer format %#x", mFormat);
+            memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
+                    mNormalFrameCount * mChannelCount);
+        }
+
         // enable changes in effect chain
         unlockEffectChains(effectChains);
 
@@ -2348,20 +2490,20 @@
                         (mMixerStatus == MIXER_DRAIN_ALL)) {
                     threadLoop_drain();
                 }
-if (mType == MIXER) {
-                // write blocked detection
-                nsecs_t now = systemTime();
-                nsecs_t delta = now - mLastWriteTime;
-                if (!mStandby && delta > maxPeriod) {
-                    mNumDelayedWrites++;
-                    if ((now - lastWarning) > kWarningThrottleNs) {
-                        ATRACE_NAME("underrun");
-                        ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
-                                ns2ms(delta), mNumDelayedWrites, this);
-                        lastWarning = now;
+                if (mType == MIXER) {
+                    // write blocked detection
+                    nsecs_t now = systemTime();
+                    nsecs_t delta = now - mLastWriteTime;
+                    if (!mStandby && delta > maxPeriod) {
+                        mNumDelayedWrites++;
+                        if ((now - lastWarning) > kWarningThrottleNs) {
+                            ATRACE_NAME("underrun");
+                            ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
+                                    ns2ms(delta), mNumDelayedWrites, this);
+                            lastWarning = now;
+                        }
                     }
                 }
-}
 
             } else {
                 usleep(sleepTime);
@@ -2409,7 +2551,7 @@
 void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
 {
     size_t count = tracksToRemove.size();
-    if (count) {
+    if (count > 0) {
         for (size_t i=0 ; i<count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
             mActiveTracks.remove(track);
@@ -2473,7 +2615,7 @@
     // create an NBAIO sink for the HAL output stream, and negotiate
     mOutputSink = new AudioStreamOutSink(output->stream);
     size_t numCounterOffers = 0;
-    const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount)};
+    const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
     ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
     ALOG_ASSERT(index == 0);
 
@@ -2713,12 +2855,6 @@
     PlaybackThread::threadLoop_standby();
 }
 
-// Empty implementation for standard mixer
-// Overridden for offloaded playback
-void AudioFlinger::PlaybackThread::flushOutput_l()
-{
-}
-
 bool AudioFlinger::PlaybackThread::waitingAsyncCallback_l()
 {
     return false;
@@ -2750,6 +2886,12 @@
     }
 }
 
+void AudioFlinger::PlaybackThread::onAddNewTrack_l()
+{
+    ALOGV("signal playback thread");
+    broadcast_l();
+}
+
 void AudioFlinger::MixerThread::threadLoop_mix()
 {
     // obtain the presentation timestamp of the next output buffer
@@ -2768,7 +2910,7 @@
 
     // mix buffers...
     mAudioMixer->process(pts);
-    mCurrentWriteLength = mixBufferSize;
+    mCurrentWriteLength = mSinkBufferSize;
     // increase sleep time progressively when application underrun condition clears.
     // Only increase sleep time if the mixer is ready for two consecutive times to avoid
     // that a steady state of alternating ready/not ready conditions keeps the sleep time
@@ -2802,7 +2944,13 @@
             sleepTime = idleSleepTime;
         }
     } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
-        memset (mMixBuffer, 0, mixBufferSize);
+        // clear out mMixerBuffer or mSinkBuffer, to ensure buffers are cleared
+        // before effects processing or output.
+        if (mMixerBufferValid) {
+            memset(mMixerBuffer, 0, mMixerBufferSize);
+        } else {
+            memset(mSinkBuffer, 0, mSinkBufferSize);
+        }
         sleepTime = 0;
         ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED),
                 "anticipated start");
@@ -2849,6 +2997,9 @@
         state = sq->begin();
     }
 
+    mMixerBufferValid = false;  // mMixerBuffer has no valid data until appropriate tracks found.
+    mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found.
+
     for (size_t i=0 ; i<count ; i++) {
         const sp<Track> t = mActiveTracks[i].promote();
         if (t == 0) {
@@ -2967,7 +3118,7 @@
                 break;
             case TrackBase::IDLE:
             default:
-                LOG_FATAL("unexpected track state %d", track->mState);
+                LOG_ALWAYS_FATAL("unexpected track state %d", track->mState);
             }
 
             if (isActive) {
@@ -2998,7 +3149,7 @@
                     // because we're about to decrement the last sp<> on those tracks.
                     block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
                 } else {
-                    LOG_FATAL("fast track %d should have been active", j);
+                    LOG_ALWAYS_FATAL("fast track %d should have been active", j);
                 }
                 tracksToRemove->add(track);
                 // Avoids a misleading display in dumpsys
@@ -3027,12 +3178,14 @@
             // +1 for rounding and +1 for additional sample needed for interpolation
             desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1;
             // add frames already consumed but not yet released by the resampler
-            // because cblk->framesReady() will include these frames
+            // because mAudioTrackServerProxy->framesReady() will include these frames
             desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
+#if 0
             // the minimum track buffer size is normally twice the number of frames necessary
             // to fill one buffer and the resampler should not leave more than one buffer worth
             // of unreleased frames after each pass, but just in case...
             ALOG_ASSERT(desiredFrames <= cblk->frameCount_);
+#endif
         }
         uint32_t minFrames = 1;
         if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
@@ -3048,10 +3201,14 @@
 
             mixedTracks++;
 
-            // track->mainBuffer() != mMixBuffer means there is an effect chain
-            // connected to the track
+            // track->mainBuffer() != mSinkBuffer or mMixerBuffer means
+            // there is an effect chain connected to the track
             chain.clear();
-            if (track->mainBuffer() != mMixBuffer) {
+            if (track->mainBuffer() != mSinkBuffer &&
+                    track->mainBuffer() != mMixerBuffer) {
+                if (mEffectBufferEnabled) {
+                    mEffectBufferValid = true; // Later can set directly.
+                }
                 chain = getEffectChain_l(track->sessionId());
                 // Delegate volume control to effect in track effect chain if needed
                 if (chain != 0) {
@@ -3177,10 +3334,41 @@
                 AudioMixer::RESAMPLE,
                 AudioMixer::SAMPLE_RATE,
                 (void *)(uintptr_t)reqSampleRate);
-            mAudioMixer->setParameter(
-                name,
-                AudioMixer::TRACK,
-                AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer());
+            /*
+             * Select the appropriate output buffer for the track.
+             *
+             * Tracks with effects go into their own effects chain buffer
+             * and from there into either mEffectBuffer or mSinkBuffer.
+             *
+             * Other tracks can use mMixerBuffer for higher precision
+             * channel accumulation.  If this buffer is enabled
+             * (mMixerBufferEnabled true), then selected tracks will accumulate
+             * into it.
+             *
+             */
+            if (mMixerBufferEnabled
+                    && (track->mainBuffer() == mSinkBuffer
+                            || track->mainBuffer() == mMixerBuffer)) {
+                mAudioMixer->setParameter(
+                        name,
+                        AudioMixer::TRACK,
+                        AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
+                mAudioMixer->setParameter(
+                        name,
+                        AudioMixer::TRACK,
+                        AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
+                // TODO: override track->mainBuffer()?
+                mMixerBufferValid = true;
+            } else {
+                mAudioMixer->setParameter(
+                        name,
+                        AudioMixer::TRACK,
+                        AudioMixer::MIXER_FORMAT, (void *)AUDIO_FORMAT_PCM_16_BIT);
+                mAudioMixer->setParameter(
+                        name,
+                        AudioMixer::TRACK,
+                        AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer());
+            }
             mAudioMixer->setParameter(
                 name,
                 AudioMixer::TRACK,
@@ -3294,13 +3482,30 @@
     // remove all the tracks that need to be...
     removeTracks_l(*tracksToRemove);
 
-    // mix buffer must be cleared if all tracks are connected to an
-    // effect chain as in this case the mixer will not write to
-    // mix buffer and track effects will accumulate into it
+    // sink or mix buffer must be cleared if all tracks are connected to an
+    // effect chain as in this case the mixer will not write to the sink or mix buffer
+    // and track effects will accumulate into it
     if ((mBytesRemaining == 0) && ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
             (mixedTracks == 0 && fastTracks > 0))) {
         // FIXME as a performance optimization, should remember previous zero status
-        memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
+        if (mMixerBufferValid) {
+            memset(mMixerBuffer, 0, mMixerBufferSize);
+            // TODO: In testing, mSinkBuffer below need not be cleared because
+            // the PlaybackThread::threadLoop() copies mMixerBuffer into mSinkBuffer
+            // after mixing.
+            //
+            // To enforce this guarantee:
+            // ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
+            // (mixedTracks == 0 && fastTracks > 0))
+            // must imply MIXER_TRACKS_READY.
+            // Later, we may clear buffers regardless, and skip much of this logic.
+        }
+        // TODO - either mEffectBuffer or mSinkBuffer needs to be cleared.
+        if (mEffectBufferValid) {
+            memset(mEffectBuffer, 0, mEffectBufferSize);
+        }
+        // FIXME as a performance optimization, should remember previous zero status
+        memset(mSinkBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
     }
 
     // if any fast tracks, then status is ready
@@ -3358,6 +3563,7 @@
             if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) {
                 status = BAD_VALUE;
             } else {
+                // no need to save value, since it's constant
                 reconfig = true;
             }
         }
@@ -3365,6 +3571,7 @@
             if ((audio_channel_mask_t) value != AUDIO_CHANNEL_OUT_STEREO) {
                 status = BAD_VALUE;
             } else {
+                // no need to save value, since it's constant
                 reconfig = true;
             }
         }
@@ -3423,7 +3630,7 @@
                                                        keyValuePair.string());
             }
             if (status == NO_ERROR && reconfig) {
-                readOutputParameters();
+                readOutputParameters_l();
                 delete mAudioMixer;
                 mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
                 for (size_t i = 0; i < mTracks.size() ; i++) {
@@ -3468,9 +3675,7 @@
 
     PlaybackThread::dumpInternals(fd, args);
 
-    snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames());
-    result.append(buffer);
-    write(fd, result.string(), result.size());
+    fdprintf(fd, "  AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames());
 
     // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
     const FastMixerDumpState copy(mFastMixerDumpState);
@@ -3688,7 +3893,7 @@
 void AudioFlinger::DirectOutputThread::threadLoop_mix()
 {
     size_t frameCount = mFrameCount;
-    int8_t *curBuf = (int8_t *)mMixBuffer;
+    int8_t *curBuf = (int8_t *)mSinkBuffer;
     // output audio to hardware
     while (frameCount) {
         AudioBufferProvider::Buffer buffer;
@@ -3703,7 +3908,7 @@
         curBuf += buffer.frameCount * mFrameSize;
         mActiveTrack->releaseBuffer(&buffer);
     }
-    mCurrentWriteLength = curBuf - (int8_t *)mMixBuffer;
+    mCurrentWriteLength = curBuf - (int8_t *)mSinkBuffer;
     sleepTime = 0;
     standbyTime = systemTime() + standbyDelay;
     mActiveTrack.clear();
@@ -3718,20 +3923,20 @@
             sleepTime = idleSleepTime;
         }
     } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) {
-        memset(mMixBuffer, 0, mFrameCount * mFrameSize);
+        memset(mSinkBuffer, 0, mFrameCount * mFrameSize);
         sleepTime = 0;
     }
 }
 
 // getTrackName_l() must be called with ThreadBase::mLock held
-int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask,
-        int sessionId)
+int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask __unused,
+        int sessionId __unused)
 {
     return 0;
 }
 
 // deleteTrackName_l() must be called with ThreadBase::mLock held
-void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name)
+void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name __unused)
 {
 }
 
@@ -3746,6 +3951,16 @@
         AudioParameter param = AudioParameter(keyValuePair);
         int value;
 
+        if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
+            // forward device change to effects that have requested to be
+            // aware of attached audio device.
+            if (value != AUDIO_DEVICE_NONE) {
+                mOutDevice = value;
+                for (size_t i = 0; i < mEffectChains.size(); i++) {
+                    mEffectChains[i]->setDevice_l(mOutDevice);
+                }
+            }
+        }
         if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
             // do not accept frame count changes if tracks are open as the track buffer
             // size depends on frame count and correct behavior would not be garantied
@@ -3767,7 +3982,7 @@
                                                        keyValuePair.string());
             }
             if (status == NO_ERROR && reconfig) {
-                readOutputParameters();
+                readOutputParameters_l();
                 sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
             }
         }
@@ -3984,6 +4199,17 @@
         sp<Track> l = mLatestActiveTrack.promote();
         bool last = l.get() == track;
 
+        if (track->isInvalid()) {
+            ALOGW("An invalidated track shouldn't be in active list");
+            tracksToRemove->add(track);
+            continue;
+        }
+
+        if (track->mState == TrackBase::IDLE) {
+            ALOGW("An idle track shouldn't be in active list");
+            continue;
+        }
+
         if (track->isPausing()) {
             track->setPaused();
             if (last) {
@@ -4002,32 +4228,39 @@
                 mBytesRemaining = 0;    // stop writing
             }
             tracksToRemove->add(track);
-        } else if (track->framesReady() && track->isReady() &&
+        } else if (track->isFlushPending()) {
+            track->flushAck();
+            if (last) {
+                mFlushPending = true;
+            }
+        } else if (track->isResumePending()){
+            track->resumeAck();
+            if (last) {
+                if (mPausedBytesRemaining) {
+                    // Need to continue write that was interrupted
+                    mCurrentWriteLength = mPausedWriteLength;
+                    mBytesRemaining = mPausedBytesRemaining;
+                    mPausedBytesRemaining = 0;
+                }
+                if (mHwPaused) {
+                    doHwResume = true;
+                    mHwPaused = false;
+                    // threadLoop_mix() will handle the case that we need to
+                    // resume an interrupted write
+                }
+                // enable write to audio HAL
+                sleepTime = 0;
+
+                // Do not handle new data in this iteration even if track->framesReady()
+                mixerStatus = MIXER_TRACKS_ENABLED;
+            }
+        }  else if (track->framesReady() && track->isReady() &&
                 !track->isPaused() && !track->isTerminated() && !track->isStopping_2()) {
             ALOGVV("OffloadThread: track %d s=%08x [OK]", track->name(), cblk->mServer);
             if (track->mFillingUpStatus == Track::FS_FILLED) {
                 track->mFillingUpStatus = Track::FS_ACTIVE;
                 // make sure processVolume_l() will apply new volume even if 0
                 mLeftVolFloat = mRightVolFloat = -1.0;
-                if (track->mState == TrackBase::RESUMING) {
-                    track->mState = TrackBase::ACTIVE;
-                    if (last) {
-                        if (mPausedBytesRemaining) {
-                            // Need to continue write that was interrupted
-                            mCurrentWriteLength = mPausedWriteLength;
-                            mBytesRemaining = mPausedBytesRemaining;
-                            mPausedBytesRemaining = 0;
-                        }
-                        if (mHwPaused) {
-                            doHwResume = true;
-                            mHwPaused = false;
-                            // threadLoop_mix() will handle the case that we need to
-                            // resume an interrupted write
-                        }
-                        // enable write to audio HAL
-                        sleepTime = 0;
-                    }
-                }
             }
 
             if (last) {
@@ -4051,7 +4284,6 @@
                         // seek when resuming.
                         if (previousTrack->sessionId() != track->sessionId()) {
                             previousTrack->invalidate();
-                            mFlushPending = true;
                         }
                     }
                 }
@@ -4127,9 +4359,6 @@
     // if resume is received before pause is executed.
     if (!mStandby && (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
         mOutput->stream->pause(mOutput->stream);
-        if (!doHwPause) {
-            doHwResume = true;
-        }
     }
     if (mFlushPending) {
         flushHw_l();
@@ -4145,11 +4374,6 @@
     return mixerStatus;
 }
 
-void AudioFlinger::OffloadThread::flushOutput_l()
-{
-    mFlushPending = true;
-}
-
 // must be called with thread mutex locked
 bool AudioFlinger::OffloadThread::waitingAsyncCallback_l()
 {
@@ -4164,15 +4388,15 @@
 // must be called with thread mutex locked
 bool AudioFlinger::OffloadThread::shouldStandby_l()
 {
-    bool TrackPaused = false;
+    bool trackPaused = false;
 
     // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
     // after a timeout and we will enter standby then.
     if (mTracks.size() > 0) {
-        TrackPaused = mTracks[mTracks.size() - 1]->isPaused();
+        trackPaused = mTracks[mTracks.size() - 1]->isPaused();
     }
 
-    return !mStandby && !TrackPaused;
+    return !mStandby && !trackPaused;
 }
 
 
@@ -4190,6 +4414,8 @@
     mBytesRemaining = 0;
     mPausedWriteLength = 0;
     mPausedBytesRemaining = 0;
+    mHwPaused = false;
+
     if (mUseAsyncWrite) {
         // discard any pending drain or write ack by incrementing sequence
         mWriteAckSequence = (mWriteAckSequence + 2) & ~1;
@@ -4200,6 +4426,18 @@
     }
 }
 
+void AudioFlinger::OffloadThread::onAddNewTrack_l()
+{
+    sp<Track> previousTrack = mPreviousTrack.promote();
+    sp<Track> latestTrack = mLatestActiveTrack.promote();
+
+    if (previousTrack != 0 && latestTrack != 0 &&
+        (previousTrack->sessionId() != latestTrack->sessionId())) {
+        mFlushPending = true;
+    }
+    PlaybackThread::onAddNewTrack_l();
+}
+
 // ----------------------------------------------------------------------------
 
 AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
@@ -4224,11 +4462,11 @@
     if (outputsReady(outputTracks)) {
         mAudioMixer->process(AudioBufferProvider::kInvalidPTS);
     } else {
-        memset(mMixBuffer, 0, mixBufferSize);
+        memset(mSinkBuffer, 0, mSinkBufferSize);
     }
     sleepTime = 0;
     writeFrames = mNormalFrameCount;
-    mCurrentWriteLength = mixBufferSize;
+    mCurrentWriteLength = mSinkBufferSize;
     standbyTime = systemTime() + standbyDelay;
 }
 
@@ -4243,7 +4481,7 @@
     } else if (mBytesWritten != 0) {
         if (mMixerStatus == MIXER_TRACKS_ENABLED) {
             writeFrames = mNormalFrameCount;
-            memset(mMixBuffer, 0, mixBufferSize);
+            memset(mSinkBuffer, 0, mSinkBufferSize);
         } else {
             // flush remaining overflow buffers in output tracks
             writeFrames = 0;
@@ -4255,10 +4493,18 @@
 ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
 {
     for (size_t i = 0; i < outputTracks.size(); i++) {
-        outputTracks[i]->write(mMixBuffer, writeFrames);
+        // We convert the duplicating thread format to AUDIO_FORMAT_PCM_16_BIT
+        // for delivery downstream as needed. This in-place conversion is safe as
+        // AUDIO_FORMAT_PCM_16_BIT is smaller than any other supported format
+        // (AUDIO_FORMAT_PCM_8_BIT is not allowed here).
+        if (mFormat != AUDIO_FORMAT_PCM_16_BIT) {
+            memcpy_by_audio_format(mSinkBuffer, AUDIO_FORMAT_PCM_16_BIT,
+                    mSinkBuffer, mFormat, writeFrames * mChannelCount);
+        }
+        outputTracks[i]->write(reinterpret_cast<int16_t*>(mSinkBuffer), writeFrames);
     }
     mStandby = false;
-    return (ssize_t)mixBufferSize;
+    return (ssize_t)mSinkBufferSize;
 }
 
 void AudioFlinger::DuplicatingThread::threadLoop_standby()
@@ -4284,10 +4530,16 @@
     Mutex::Autolock _l(mLock);
     // FIXME explain this formula
     size_t frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate();
+    // OutputTrack is forced to AUDIO_FORMAT_PCM_16_BIT regardless of mFormat
+    // due to current usage case and restrictions on the AudioBufferProvider.
+    // Actual buffer conversion is done in threadLoop_write().
+    //
+    // TODO: This may change in the future, depending on multichannel
+    // (and non int16_t*) support on AF::PlaybackThread::OutputTrack
     OutputTrack *outputTrack = new OutputTrack(thread,
                                             this,
                                             mSampleRate,
-                                            mFormat,
+                                            AUDIO_FORMAT_PCM_16_BIT,
                                             mChannelMask,
                                             frameCount,
                                             IPCThreadState::self()->getCallingUid());
@@ -4369,8 +4621,6 @@
 
 AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
                                          AudioStreamIn *input,
-                                         uint32_t sampleRate,
-                                         audio_channel_mask_t channelMask,
                                          audio_io_handle_t id,
                                          audio_devices_t outDevice,
                                          audio_devices_t inDevice
@@ -4379,27 +4629,24 @@
 #endif
                                          ) :
     ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD),
-    mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
-    // mRsmpInIndex and mBufferSize set by readInputParameters()
-    mReqChannelCount(popcount(channelMask)),
-    mReqSampleRate(sampleRate)
-    // mBytesRead is only meaningful while active, and so is cleared in start()
-    // (but might be better to also clear here for dump?)
+    mInput(input), mActiveTracksGen(0), mRsmpInBuffer(NULL),
+    // mRsmpInFrames and mRsmpInFramesP2 are set by readInputParameters_l()
+    mRsmpInRear(0)
 #ifdef TEE_SINK
     , mTeeSink(teeSink)
 #endif
 {
     snprintf(mName, kNameLength, "AudioIn_%X", id);
+    mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName);
 
-    readInputParameters();
+    readInputParameters_l();
 }
 
 
 AudioFlinger::RecordThread::~RecordThread()
 {
+    mAudioFlinger->unregisterWriter(mNBLogWriter);
     delete[] mRsmpInBuffer;
-    delete mResampler;
-    delete[] mRsmpOutBuffer;
 }
 
 void AudioFlinger::RecordThread::onFirstRef()
@@ -4407,230 +4654,393 @@
     run(mName, PRIORITY_URGENT_AUDIO);
 }
 
-status_t AudioFlinger::RecordThread::readyToRun()
-{
-    status_t status = initCheck();
-    ALOGW_IF(status != NO_ERROR,"RecordThread %p could not initialize", this);
-    return status;
-}
-
 bool AudioFlinger::RecordThread::threadLoop()
 {
-    AudioBufferProvider::Buffer buffer;
-    sp<RecordTrack> activeTrack;
-    Vector< sp<EffectChain> > effectChains;
-
     nsecs_t lastWarning = 0;
 
     inputStandBy();
+
+reacquire_wakelock:
+    sp<RecordTrack> activeTrack;
+    int activeTracksGen;
     {
         Mutex::Autolock _l(mLock);
-        activeTrack = mActiveTrack;
-        acquireWakeLock_l(activeTrack != 0 ? activeTrack->uid() : -1);
+        size_t size = mActiveTracks.size();
+        activeTracksGen = mActiveTracksGen;
+        if (size > 0) {
+            // FIXME an arbitrary choice
+            activeTrack = mActiveTracks[0];
+            acquireWakeLock_l(activeTrack->uid());
+            if (size > 1) {
+                SortedVector<int> tmp;
+                for (size_t i = 0; i < size; i++) {
+                    tmp.add(mActiveTracks[i]->uid());
+                }
+                updateWakeLockUids_l(tmp);
+            }
+        } else {
+            acquireWakeLock_l(-1);
+        }
     }
 
-    // used to verify we've read at least once before evaluating how many bytes were read
-    bool readOnce = false;
+    // used to request a deferred sleep, to be executed later while mutex is unlocked
+    uint32_t sleepUs = 0;
 
-    // start recording
-    while (!exitPending()) {
+    // loop while there is work to do
+    for (;;) {
+        Vector< sp<EffectChain> > effectChains;
 
-        processConfigEvents();
+        // sleep with mutex unlocked
+        if (sleepUs > 0) {
+            usleep(sleepUs);
+            sleepUs = 0;
+        }
+
+        // activeTracks accumulates a copy of a subset of mActiveTracks
+        Vector< sp<RecordTrack> > activeTracks;
 
         { // scope for mLock
             Mutex::Autolock _l(mLock);
-            checkForNewParameters_l();
-            if (mActiveTrack != 0 && activeTrack != mActiveTrack) {
-                SortedVector<int> tmp;
-                tmp.add(mActiveTrack->uid());
-                updateWakeLockUids_l(tmp);
+
+            processConfigEvents_l();
+            // return value 'reconfig' is currently unused
+            bool reconfig = checkForNewParameters_l();
+
+            // check exitPending here because checkForNewParameters_l() and
+            // checkForNewParameters_l() can temporarily release mLock
+            if (exitPending()) {
+                break;
             }
-            activeTrack = mActiveTrack;
-            if (mActiveTrack == 0 && mConfigEvents.isEmpty()) {
-                standby();
 
-                if (exitPending()) {
-                    break;
-                }
-
+            // if no active track(s), then standby and release wakelock
+            size_t size = mActiveTracks.size();
+            if (size == 0) {
+                standbyIfNotAlreadyInStandby();
+                // exitPending() can't become true here
                 releaseWakeLock_l();
                 ALOGV("RecordThread: loop stopping");
                 // go to sleep
                 mWaitWorkCV.wait(mLock);
                 ALOGV("RecordThread: loop starting");
-                acquireWakeLock_l(mActiveTrack != 0 ? mActiveTrack->uid() : -1);
+                goto reacquire_wakelock;
+            }
+
+            if (mActiveTracksGen != activeTracksGen) {
+                activeTracksGen = mActiveTracksGen;
+                SortedVector<int> tmp;
+                for (size_t i = 0; i < size; i++) {
+                    tmp.add(mActiveTracks[i]->uid());
+                }
+                updateWakeLockUids_l(tmp);
+            }
+
+            bool doBroadcast = false;
+            for (size_t i = 0; i < size; ) {
+
+                activeTrack = mActiveTracks[i];
+                if (activeTrack->isTerminated()) {
+                    removeTrack_l(activeTrack);
+                    mActiveTracks.remove(activeTrack);
+                    mActiveTracksGen++;
+                    size--;
+                    continue;
+                }
+
+                TrackBase::track_state activeTrackState = activeTrack->mState;
+                switch (activeTrackState) {
+
+                case TrackBase::PAUSING:
+                    mActiveTracks.remove(activeTrack);
+                    mActiveTracksGen++;
+                    doBroadcast = true;
+                    size--;
+                    continue;
+
+                case TrackBase::STARTING_1:
+                    sleepUs = 10000;
+                    i++;
+                    continue;
+
+                case TrackBase::STARTING_2:
+                    doBroadcast = true;
+                    mStandby = false;
+                    activeTrack->mState = TrackBase::ACTIVE;
+                    break;
+
+                case TrackBase::ACTIVE:
+                    break;
+
+                case TrackBase::IDLE:
+                    i++;
+                    continue;
+
+                default:
+                    LOG_ALWAYS_FATAL("Unexpected activeTrackState %d", activeTrackState);
+                }
+
+                activeTracks.add(activeTrack);
+                i++;
+
+            }
+            if (doBroadcast) {
+                mStartStopCond.broadcast();
+            }
+
+            // sleep if there are no active tracks to process
+            if (activeTracks.size() == 0) {
+                if (sleepUs == 0) {
+                    sleepUs = kRecordThreadSleepUs;
+                }
                 continue;
             }
-            if (mActiveTrack != 0) {
-                if (mActiveTrack->isTerminated()) {
-                    removeTrack_l(mActiveTrack);
-                    mActiveTrack.clear();
-                } else if (mActiveTrack->mState == TrackBase::PAUSING) {
-                    standby();
-                    mActiveTrack.clear();
-                    mStartStopCond.broadcast();
-                } else if (mActiveTrack->mState == TrackBase::RESUMING) {
-                    if (mReqChannelCount != mActiveTrack->channelCount()) {
-                        mActiveTrack.clear();
-                        mStartStopCond.broadcast();
-                    } else if (readOnce) {
-                        // record start succeeds only if first read from audio input
-                        // succeeds
-                        if (mBytesRead >= 0) {
-                            mActiveTrack->mState = TrackBase::ACTIVE;
-                        } else {
-                            mActiveTrack.clear();
-                        }
-                        mStartStopCond.broadcast();
-                    }
-                    mStandby = false;
-                }
-            }
+            sleepUs = 0;
 
             lockEffectChains_l(effectChains);
         }
 
-        if (mActiveTrack != 0) {
-            if (mActiveTrack->mState != TrackBase::ACTIVE &&
-                mActiveTrack->mState != TrackBase::RESUMING) {
-                unlockEffectChains(effectChains);
-                usleep(kRecordThreadSleepUs);
-                continue;
-            }
-            for (size_t i = 0; i < effectChains.size(); i ++) {
-                effectChains[i]->process_l();
-            }
+        // thread mutex is now unlocked, mActiveTracks unknown, activeTracks.size() > 0
 
-            buffer.frameCount = mFrameCount;
-            status_t status = mActiveTrack->getNextBuffer(&buffer);
-            if (status == NO_ERROR) {
-                readOnce = true;
-                size_t framesOut = buffer.frameCount;
-                if (mResampler == NULL) {
+        size_t size = effectChains.size();
+        for (size_t i = 0; i < size; i++) {
+            // thread mutex is not locked, but effect chain is locked
+            effectChains[i]->process_l();
+        }
+
+        // Read from HAL to keep up with fastest client if multiple active tracks, not slowest one.
+        // Only the client(s) that are too slow will overrun. But if even the fastest client is too
+        // slow, then this RecordThread will overrun by not calling HAL read often enough.
+        // If destination is non-contiguous, first read past the nominal end of buffer, then
+        // copy to the right place.  Permitted because mRsmpInBuffer was over-allocated.
+
+        int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
+        ssize_t bytesRead = mInput->stream->read(mInput->stream,
+                &mRsmpInBuffer[rear * mChannelCount], mBufferSize);
+        if (bytesRead <= 0) {
+            ALOGE("read failed: bytesRead=%d < %u", bytesRead, mBufferSize);
+            // Force input into standby so that it tries to recover at next read attempt
+            inputStandBy();
+            sleepUs = kRecordThreadSleepUs;
+            continue;
+        }
+        ALOG_ASSERT((size_t) bytesRead <= mBufferSize);
+        size_t framesRead = bytesRead / mFrameSize;
+        ALOG_ASSERT(framesRead > 0);
+        if (mTeeSink != 0) {
+            (void) mTeeSink->write(&mRsmpInBuffer[rear * mChannelCount], framesRead);
+        }
+        // If destination is non-contiguous, we now correct for reading past end of buffer.
+        size_t part1 = mRsmpInFramesP2 - rear;
+        if (framesRead > part1) {
+            memcpy(mRsmpInBuffer, &mRsmpInBuffer[mRsmpInFramesP2 * mChannelCount],
+                    (framesRead - part1) * mFrameSize);
+        }
+        rear = mRsmpInRear += framesRead;
+
+        size = activeTracks.size();
+        // loop over each active track
+        for (size_t i = 0; i < size; i++) {
+            activeTrack = activeTracks[i];
+
+            enum {
+                OVERRUN_UNKNOWN,
+                OVERRUN_TRUE,
+                OVERRUN_FALSE
+            } overrun = OVERRUN_UNKNOWN;
+
+            // loop over getNextBuffer to handle circular sink
+            for (;;) {
+
+                activeTrack->mSink.frameCount = ~0;
+                status_t status = activeTrack->getNextBuffer(&activeTrack->mSink);
+                size_t framesOut = activeTrack->mSink.frameCount;
+                LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0));
+
+                int32_t front = activeTrack->mRsmpInFront;
+                ssize_t filled = rear - front;
+                size_t framesIn;
+
+                if (filled < 0) {
+                    // should not happen, but treat like a massive overrun and re-sync
+                    framesIn = 0;
+                    activeTrack->mRsmpInFront = rear;
+                    overrun = OVERRUN_TRUE;
+                } else if ((size_t) filled <= mRsmpInFrames) {
+                    framesIn = (size_t) filled;
+                } else {
+                    // client is not keeping up with server, but give it latest data
+                    framesIn = mRsmpInFrames;
+                    activeTrack->mRsmpInFront = front = rear - framesIn;
+                    overrun = OVERRUN_TRUE;
+                }
+
+                if (framesOut == 0 || framesIn == 0) {
+                    break;
+                }
+
+                if (activeTrack->mResampler == NULL) {
                     // no resampling
-                    while (framesOut) {
-                        size_t framesIn = mFrameCount - mRsmpInIndex;
-                        if (framesIn) {
-                            int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize;
-                            int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) *
-                                    mActiveTrack->mFrameSize;
-                            if (framesIn > framesOut)
-                                framesIn = framesOut;
-                            mRsmpInIndex += framesIn;
-                            framesOut -= framesIn;
-                            if (mChannelCount == mReqChannelCount) {
-                                memcpy(dst, src, framesIn * mFrameSize);
-                            } else {
-                                if (mChannelCount == 1) {
-                                    upmix_to_stereo_i16_from_mono_i16((int16_t *)dst,
-                                            (int16_t *)src, framesIn);
-                                } else {
-                                    downmix_to_mono_i16_from_stereo_i16((int16_t *)dst,
-                                            (int16_t *)src, framesIn);
-                                }
-                            }
-                        }
-                        if (framesOut && mFrameCount == mRsmpInIndex) {
-                            void *readInto;
-                            if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) {
-                                readInto = buffer.raw;
-                                framesOut = 0;
-                            } else {
-                                readInto = mRsmpInBuffer;
-                                mRsmpInIndex = 0;
-                            }
-                            mBytesRead = mInput->stream->read(mInput->stream, readInto,
-                                    mBufferSize);
-                            if (mBytesRead <= 0) {
-                                if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE))
-                                {
-                                    ALOGE("Error reading audio input");
-                                    // Force input into standby so that it tries to
-                                    // recover at next read attempt
-                                    inputStandBy();
-                                    usleep(kRecordThreadSleepUs);
-                                }
-                                mRsmpInIndex = mFrameCount;
-                                framesOut = 0;
-                                buffer.frameCount = 0;
-                            }
-#ifdef TEE_SINK
-                            else if (mTeeSink != 0) {
-                                (void) mTeeSink->write(readInto,
-                                        mBytesRead >> Format_frameBitShift(mTeeSink->format()));
-                            }
-#endif
-                        }
+                    if (framesIn > framesOut) {
+                        framesIn = framesOut;
+                    } else {
+                        framesOut = framesIn;
                     }
+                    int8_t *dst = activeTrack->mSink.i8;
+                    while (framesIn > 0) {
+                        front &= mRsmpInFramesP2 - 1;
+                        size_t part1 = mRsmpInFramesP2 - front;
+                        if (part1 > framesIn) {
+                            part1 = framesIn;
+                        }
+                        int8_t *src = (int8_t *)mRsmpInBuffer + (front * mFrameSize);
+                        if (mChannelCount == activeTrack->mChannelCount) {
+                            memcpy(dst, src, part1 * mFrameSize);
+                        } else if (mChannelCount == 1) {
+                            upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (int16_t *)src,
+                                    part1);
+                        } else {
+                            downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, (int16_t *)src,
+                                    part1);
+                        }
+                        dst += part1 * activeTrack->mFrameSize;
+                        front += part1;
+                        framesIn -= part1;
+                    }
+                    activeTrack->mRsmpInFront += framesOut;
+
                 } else {
                     // resampling
+                    // FIXME framesInNeeded should really be part of resampler API, and should
+                    //       depend on the SRC ratio
+                    //       to keep mRsmpInBuffer full so resampler always has sufficient input
+                    size_t framesInNeeded;
+                    // FIXME only re-calculate when it changes, and optimize for common ratios
+                    double inOverOut = (double) mSampleRate / activeTrack->mSampleRate;
+                    double outOverIn = (double) activeTrack->mSampleRate / mSampleRate;
+                    framesInNeeded = ceil(framesOut * inOverOut) + 1;
+                    ALOGV("need %u frames in to produce %u out given in/out ratio of %.4g",
+                                framesInNeeded, framesOut, inOverOut);
+                    // Although we theoretically have framesIn in circular buffer, some of those are
+                    // unreleased frames, and thus must be discounted for purpose of budgeting.
+                    size_t unreleased = activeTrack->mRsmpInUnrel;
+                    framesIn = framesIn > unreleased ? framesIn - unreleased : 0;
+                    if (framesIn < framesInNeeded) {
+                        ALOGV("not enough to resample: have %u frames in but need %u in to "
+                                "produce %u out given in/out ratio of %.4g",
+                                framesIn, framesInNeeded, framesOut, inOverOut);
+                        size_t newFramesOut = framesIn > 0 ? floor((framesIn - 1) * outOverIn) : 0;
+                        LOG_ALWAYS_FATAL_IF(newFramesOut >= framesOut);
+                        if (newFramesOut == 0) {
+                            break;
+                        }
+                        framesInNeeded = ceil(newFramesOut * inOverOut) + 1;
+                        ALOGV("now need %u frames in to produce %u out given out/in ratio of %.4g",
+                                framesInNeeded, newFramesOut, outOverIn);
+                        LOG_ALWAYS_FATAL_IF(framesIn < framesInNeeded);
+                        ALOGV("success 2: have %u frames in and need %u in to produce %u out "
+                              "given in/out ratio of %.4g",
+                              framesIn, framesInNeeded, newFramesOut, inOverOut);
+                        framesOut = newFramesOut;
+                    } else {
+                        ALOGV("success 1: have %u in and need %u in to produce %u out "
+                            "given in/out ratio of %.4g",
+                            framesIn, framesInNeeded, framesOut, inOverOut);
+                    }
+
+                    // reallocate mRsmpOutBuffer as needed; we will grow but never shrink
+                    if (activeTrack->mRsmpOutFrameCount < framesOut) {
+                        // FIXME why does each track need it's own mRsmpOutBuffer? can't they share?
+                        delete[] activeTrack->mRsmpOutBuffer;
+                        // resampler always outputs stereo
+                        activeTrack->mRsmpOutBuffer = new int32_t[framesOut * FCC_2];
+                        activeTrack->mRsmpOutFrameCount = framesOut;
+                    }
 
                     // resampler accumulates, but we only have one source track
-                    memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
-                    // alter output frame count as if we were expecting stereo samples
-                    if (mChannelCount == 1 && mReqChannelCount == 1) {
-                        framesOut >>= 1;
-                    }
-                    mResampler->resample(mRsmpOutBuffer, framesOut,
-                            this /* AudioBufferProvider* */);
+                    memset(activeTrack->mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
+                    activeTrack->mResampler->resample(activeTrack->mRsmpOutBuffer, framesOut,
+                            // FIXME how about having activeTrack implement this interface itself?
+                            activeTrack->mResamplerBufferProvider
+                            /*this*/ /* AudioBufferProvider* */);
                     // ditherAndClamp() works as long as all buffers returned by
-                    // mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true.
-                    if (mChannelCount == 2 && mReqChannelCount == 1) {
-                        // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t
-                        ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
+                    // activeTrack->getNextBuffer() are 32 bit aligned which should be always true.
+                    if (activeTrack->mChannelCount == 1) {
+                        // temporarily type pun mRsmpOutBuffer from Q4.27 to int16_t
+                        ditherAndClamp(activeTrack->mRsmpOutBuffer, activeTrack->mRsmpOutBuffer,
+                                framesOut);
                         // the resampler always outputs stereo samples:
                         // do post stereo to mono conversion
-                        downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer,
-                                framesOut);
+                        downmix_to_mono_i16_from_stereo_i16(activeTrack->mSink.i16,
+                                (int16_t *)activeTrack->mRsmpOutBuffer, framesOut);
                     } else {
-                        ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
+                        ditherAndClamp((int32_t *)activeTrack->mSink.raw,
+                                activeTrack->mRsmpOutBuffer, framesOut);
                     }
                     // now done with mRsmpOutBuffer
 
                 }
-                if (mFramestoDrop == 0) {
-                    mActiveTrack->releaseBuffer(&buffer);
+
+                if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
+                    overrun = OVERRUN_FALSE;
+                }
+
+                if (activeTrack->mFramesToDrop == 0) {
+                    if (framesOut > 0) {
+                        activeTrack->mSink.frameCount = framesOut;
+                        activeTrack->releaseBuffer(&activeTrack->mSink);
+                    }
                 } else {
-                    if (mFramestoDrop > 0) {
-                        mFramestoDrop -= buffer.frameCount;
-                        if (mFramestoDrop <= 0) {
-                            clearSyncStartEvent();
+                    // FIXME could do a partial drop of framesOut
+                    if (activeTrack->mFramesToDrop > 0) {
+                        activeTrack->mFramesToDrop -= framesOut;
+                        if (activeTrack->mFramesToDrop <= 0) {
+                            activeTrack->clearSyncStartEvent();
                         }
                     } else {
-                        mFramestoDrop += buffer.frameCount;
-                        if (mFramestoDrop >= 0 || mSyncStartEvent == 0 ||
-                                mSyncStartEvent->isCancelled()) {
+                        activeTrack->mFramesToDrop += framesOut;
+                        if (activeTrack->mFramesToDrop >= 0 || activeTrack->mSyncStartEvent == 0 ||
+                                activeTrack->mSyncStartEvent->isCancelled()) {
                             ALOGW("Synced record %s, session %d, trigger session %d",
-                                  (mFramestoDrop >= 0) ? "timed out" : "cancelled",
-                                  mActiveTrack->sessionId(),
-                                  (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0);
-                            clearSyncStartEvent();
+                                  (activeTrack->mFramesToDrop >= 0) ? "timed out" : "cancelled",
+                                  activeTrack->sessionId(),
+                                  (activeTrack->mSyncStartEvent != 0) ?
+                                          activeTrack->mSyncStartEvent->triggerSession() : 0);
+                            activeTrack->clearSyncStartEvent();
                         }
                     }
                 }
-                mActiveTrack->clearOverflow();
+
+                if (framesOut == 0) {
+                    break;
+                }
             }
-            // client isn't retrieving buffers fast enough
-            else {
-                if (!mActiveTrack->setOverflow()) {
+
+            switch (overrun) {
+            case OVERRUN_TRUE:
+                // client isn't retrieving buffers fast enough
+                if (!activeTrack->setOverflow()) {
                     nsecs_t now = systemTime();
+                    // FIXME should lastWarning per track?
                     if ((now - lastWarning) > kWarningThrottleNs) {
                         ALOGW("RecordThread: buffer overflow");
                         lastWarning = now;
                     }
                 }
-                // Release the processor for a while before asking for a new buffer.
-                // This will give the application more chance to read from the buffer and
-                // clear the overflow.
-                usleep(kRecordThreadSleepUs);
+                break;
+            case OVERRUN_FALSE:
+                activeTrack->clearOverflow();
+                break;
+            case OVERRUN_UNKNOWN:
+                break;
             }
+
         }
+
         // enable changes in effect chain
         unlockEffectChains(effectChains);
-        effectChains.clear();
+        // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end
     }
 
-    standby();
+    standbyIfNotAlreadyInStandby();
 
     {
         Mutex::Autolock _l(mLock);
@@ -4638,7 +5048,8 @@
             sp<RecordTrack> track = mTracks[i];
             track->invalidate();
         }
-        mActiveTrack.clear();
+        mActiveTracks.clear();
+        mActiveTracksGen++;
         mStartStopCond.broadcast();
     }
 
@@ -4648,7 +5059,7 @@
     return false;
 }
 
-void AudioFlinger::RecordThread::standby()
+void AudioFlinger::RecordThread::standbyIfNotAlreadyInStandby()
 {
     if (!mStandby) {
         inputStandBy();
@@ -4661,26 +5072,23 @@
     mInput->stream->common.standby(&mInput->stream->common);
 }
 
-sp<AudioFlinger::RecordThread::RecordTrack>  AudioFlinger::RecordThread::createRecordTrack_l(
+// RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
+sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
         const sp<AudioFlinger::Client>& client,
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
-        size_t frameCount,
+        size_t *pFrameCount,
         int sessionId,
         int uid,
         IAudioFlinger::track_flags_t *flags,
         pid_t tid,
         status_t *status)
 {
+    size_t frameCount = *pFrameCount;
     sp<RecordTrack> track;
     status_t lStatus;
 
-    lStatus = initCheck();
-    if (lStatus != NO_ERROR) {
-        ALOGE("createRecordTrack_l() audio driver not initialized");
-        goto Exit;
-    }
     // client expresses a preference for FAST, but we get the final say
     if (*flags & IAudioFlinger::TRACK_FAST) {
       if (
@@ -4688,21 +5096,24 @@
             (
                 (tid != -1) &&
                 ((frameCount == 0) ||
+                // FIXME not necessarily true, should be native frame count for native SR!
                 (frameCount >= mFrameCount))
             ) &&
-            // FIXME when record supports non-PCM data, also check for audio_is_linear_pcm(format)
+            // PCM data
+            audio_is_linear_pcm(format) &&
             // mono or stereo
             ( (channelMask == AUDIO_CHANNEL_OUT_MONO) ||
               (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) &&
             // hardware sample rate
+            // FIXME actually the native hardware sample rate
             (sampleRate == mSampleRate) &&
-            // record thread has an associated fast recorder
-            hasFastRecorder()
-            // FIXME test that RecordThread for this fast track has a capable output HAL
-            // FIXME add a permission test also?
+            // record thread has an associated fast capture
+            hasFastCapture()
+            // fast capture does not require slots
         ) {
-        // if frameCount not specified, then it defaults to fast recorder (HAL) frame count
+        // if frameCount not specified, then it defaults to fast capture (HAL) frame count
         if (frameCount == 0) {
+            // FIXME wrong mFrameCount
             frameCount = mFrameCount * kFastTrackMultiplier;
         }
         ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
@@ -4710,11 +5121,12 @@
       } else {
         ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%d "
                 "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
-                "hasFastRecorder=%d tid=%d",
+                "hasFastCapture=%d tid=%d",
                 frameCount, mFrameCount, format,
                 audio_is_linear_pcm(format),
-                channelMask, sampleRate, mSampleRate, hasFastRecorder(), tid);
+                channelMask, sampleRate, mSampleRate, hasFastCapture(), tid);
         *flags &= ~IAudioFlinger::TRACK_FAST;
+        // FIXME It's not clear that we need to enforce this any more, since we have a pipe.
         // For compatibility with AudioRecord calculation, buffer depth is forced
         // to be at least 2 x the record thread frame count and cover audio hardware latency.
         // This is probably too conservative, but legacy application code may depend on it.
@@ -4731,8 +5143,13 @@
         }
       }
     }
+    *pFrameCount = frameCount;
 
-    // FIXME use flags and tid similar to createTrack_l()
+    lStatus = initCheck();
+    if (lStatus != NO_ERROR) {
+        ALOGE("createRecordTrack_l() audio driver not initialized");
+        goto Exit;
+    }
 
     { // scope for mLock
         Mutex::Autolock _l(mLock);
@@ -4740,9 +5157,9 @@
         track = new RecordTrack(this, client, sampleRate,
                       format, channelMask, frameCount, sessionId, uid);
 
-        if (track->getCblk() == 0) {
-            ALOGE("createRecordTrack_l() no control block");
-            lStatus = NO_MEMORY;
+        lStatus = track->initCheck();
+        if (lStatus != NO_ERROR) {
+            ALOGE("createRecordTrack_l() initCheck failed %d; no control block?", lStatus);
             // track must be cleared from the caller as the caller has the AF lock
             goto Exit;
         }
@@ -4761,12 +5178,11 @@
             sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp);
         }
     }
+
     lStatus = NO_ERROR;
 
 Exit:
-    if (status) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return track;
 }
 
@@ -4779,129 +5195,123 @@
     status_t status = NO_ERROR;
 
     if (event == AudioSystem::SYNC_EVENT_NONE) {
-        clearSyncStartEvent();
+        recordTrack->clearSyncStartEvent();
     } else if (event != AudioSystem::SYNC_EVENT_SAME) {
-        mSyncStartEvent = mAudioFlinger->createSyncEvent(event,
+        recordTrack->mSyncStartEvent = mAudioFlinger->createSyncEvent(event,
                                        triggerSession,
                                        recordTrack->sessionId(),
                                        syncStartEventCallback,
-                                       this);
+                                       recordTrack);
         // Sync event can be cancelled by the trigger session if the track is not in a
         // compatible state in which case we start record immediately
-        if (mSyncStartEvent->isCancelled()) {
-            clearSyncStartEvent();
+        if (recordTrack->mSyncStartEvent->isCancelled()) {
+            recordTrack->clearSyncStartEvent();
         } else {
             // do not wait for the event for more than AudioSystem::kSyncRecordStartTimeOutMs
-            mFramestoDrop = - ((AudioSystem::kSyncRecordStartTimeOutMs * mReqSampleRate) / 1000);
+            recordTrack->mFramesToDrop = -
+                    ((AudioSystem::kSyncRecordStartTimeOutMs * recordTrack->mSampleRate) / 1000);
         }
     }
 
     {
+        // This section is a rendezvous between binder thread executing start() and RecordThread
         AutoMutex lock(mLock);
-        if (mActiveTrack != 0) {
-            if (recordTrack != mActiveTrack.get()) {
-                status = -EBUSY;
-            } else if (mActiveTrack->mState == TrackBase::PAUSING) {
-                mActiveTrack->mState = TrackBase::ACTIVE;
+        if (mActiveTracks.indexOf(recordTrack) >= 0) {
+            if (recordTrack->mState == TrackBase::PAUSING) {
+                ALOGV("active record track PAUSING -> ACTIVE");
+                recordTrack->mState = TrackBase::ACTIVE;
+            } else {
+                ALOGV("active record track state %d", recordTrack->mState);
             }
             return status;
         }
 
-        recordTrack->mState = TrackBase::IDLE;
-        mActiveTrack = recordTrack;
+        // TODO consider other ways of handling this, such as changing the state to :STARTING and
+        //      adding the track to mActiveTracks after returning from AudioSystem::startInput(),
+        //      or using a separate command thread
+        recordTrack->mState = TrackBase::STARTING_1;
+        mActiveTracks.add(recordTrack);
+        mActiveTracksGen++;
         mLock.unlock();
         status_t status = AudioSystem::startInput(mId);
         mLock.lock();
+        // FIXME should verify that recordTrack is still in mActiveTracks
         if (status != NO_ERROR) {
-            mActiveTrack.clear();
-            clearSyncStartEvent();
+            mActiveTracks.remove(recordTrack);
+            mActiveTracksGen++;
+            recordTrack->clearSyncStartEvent();
             return status;
         }
-        mRsmpInIndex = mFrameCount;
-        mBytesRead = 0;
-        if (mResampler != NULL) {
-            mResampler->reset();
+        // Catch up with current buffer indices if thread is already running.
+        // This is what makes a new client discard all buffered data.  If the track's mRsmpInFront
+        // was initialized to some value closer to the thread's mRsmpInFront, then the track could
+        // see previously buffered data before it called start(), but with greater risk of overrun.
+
+        recordTrack->mRsmpInFront = mRsmpInRear;
+        recordTrack->mRsmpInUnrel = 0;
+        // FIXME why reset?
+        if (recordTrack->mResampler != NULL) {
+            recordTrack->mResampler->reset();
         }
-        mActiveTrack->mState = TrackBase::RESUMING;
+        recordTrack->mState = TrackBase::STARTING_2;
         // signal thread to start
-        ALOGV("Signal record thread");
         mWaitWorkCV.broadcast();
-        // do not wait for mStartStopCond if exiting
-        if (exitPending()) {
-            mActiveTrack.clear();
-            status = INVALID_OPERATION;
-            goto startError;
-        }
-        mStartStopCond.wait(mLock);
-        if (mActiveTrack == 0) {
+        if (mActiveTracks.indexOf(recordTrack) < 0) {
             ALOGV("Record failed to start");
             status = BAD_VALUE;
             goto startError;
         }
-        ALOGV("Record started OK");
         return status;
     }
 
 startError:
     AudioSystem::stopInput(mId);
-    clearSyncStartEvent();
+    recordTrack->clearSyncStartEvent();
+    // FIXME I wonder why we do not reset the state here?
     return status;
 }
 
-void AudioFlinger::RecordThread::clearSyncStartEvent()
-{
-    if (mSyncStartEvent != 0) {
-        mSyncStartEvent->cancel();
-    }
-    mSyncStartEvent.clear();
-    mFramestoDrop = 0;
-}
-
 void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& event)
 {
     sp<SyncEvent> strongEvent = event.promote();
 
     if (strongEvent != 0) {
-        RecordThread *me = (RecordThread *)strongEvent->cookie();
-        me->handleSyncStartEvent(strongEvent);
-    }
-}
-
-void AudioFlinger::RecordThread::handleSyncStartEvent(const sp<SyncEvent>& event)
-{
-    if (event == mSyncStartEvent) {
-        // TODO: use actual buffer filling status instead of 2 buffers when info is available
-        // from audio HAL
-        mFramestoDrop = mFrameCount * 2;
+        sp<RefBase> ptr = strongEvent->cookie().promote();
+        if (ptr != 0) {
+            RecordTrack *recordTrack = (RecordTrack *)ptr.get();
+            recordTrack->handleSyncStartEvent(strongEvent);
+        }
     }
 }
 
 bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
     ALOGV("RecordThread::stop");
     AutoMutex _l(mLock);
-    if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) {
+    if (mActiveTracks.indexOf(recordTrack) != 0 || recordTrack->mState == TrackBase::PAUSING) {
         return false;
     }
+    // note that threadLoop may still be processing the track at this point [without lock]
     recordTrack->mState = TrackBase::PAUSING;
     // do not wait for mStartStopCond if exiting
     if (exitPending()) {
         return true;
     }
+    // FIXME incorrect usage of wait: no explicit predicate or loop
     mStartStopCond.wait(mLock);
-    // if we have been restarted, recordTrack == mActiveTrack.get() here
-    if (exitPending() || recordTrack != mActiveTrack.get()) {
+    // if we have been restarted, recordTrack is in mActiveTracks here
+    if (exitPending() || mActiveTracks.indexOf(recordTrack) != 0) {
         ALOGV("Record stopped OK");
         return true;
     }
     return false;
 }
 
-bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event) const
+bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event __unused) const
 {
     return false;
 }
 
-status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event)
+status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event __unused)
 {
 #if 0   // This branch is currently dead code, but is preserved in case it will be needed in future
     if (!isValidSyncEvent(event)) {
@@ -4932,7 +5342,7 @@
     track->terminate();
     track->mState = TrackBase::STOPPED;
     // active tracks are removed by threadLoop()
-    if (mActiveTrack != track) {
+    if (mActiveTracks.indexOf(track) < 0) {
         removeTrack_l(track);
     }
 }
@@ -4952,104 +5362,119 @@
 
 void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args)
 {
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
+    fdprintf(fd, "\nInput thread %p:\n", this);
 
-    snprintf(buffer, SIZE, "\nInput thread %p internals\n", this);
-    result.append(buffer);
-
-    if (mActiveTrack != 0) {
-        snprintf(buffer, SIZE, "In index: %zu\n", mRsmpInIndex);
-        result.append(buffer);
-        snprintf(buffer, SIZE, "Buffer size: %zu bytes\n", mBufferSize);
-        result.append(buffer);
-        snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != NULL));
-        result.append(buffer);
-        snprintf(buffer, SIZE, "Out channel count: %u\n", mReqChannelCount);
-        result.append(buffer);
-        snprintf(buffer, SIZE, "Out sample rate: %u\n", mReqSampleRate);
-        result.append(buffer);
+    if (mActiveTracks.size() > 0) {
+        fdprintf(fd, "  Buffer size: %zu bytes\n", mBufferSize);
     } else {
-        result.append("No active record client\n");
+        fdprintf(fd, "  No active record clients\n");
     }
 
-    write(fd, result.string(), result.size());
-
     dumpBase(fd, args);
 }
 
-void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args)
+void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
     String8 result;
 
-    snprintf(buffer, SIZE, "Input thread %p tracks\n", this);
-    result.append(buffer);
-    RecordTrack::appendDumpHeader(result);
-    for (size_t i = 0; i < mTracks.size(); ++i) {
-        sp<RecordTrack> track = mTracks[i];
-        if (track != 0) {
-            track->dump(buffer, SIZE);
-            result.append(buffer);
+    size_t numtracks = mTracks.size();
+    size_t numactive = mActiveTracks.size();
+    size_t numactiveseen = 0;
+    fdprintf(fd, "  %d Tracks", numtracks);
+    if (numtracks) {
+        fdprintf(fd, " of which %d are active\n", numactive);
+        RecordTrack::appendDumpHeader(result);
+        for (size_t i = 0; i < numtracks ; ++i) {
+            sp<RecordTrack> track = mTracks[i];
+            if (track != 0) {
+                bool active = mActiveTracks.indexOf(track) >= 0;
+                if (active) {
+                    numactiveseen++;
+                }
+                track->dump(buffer, SIZE, active);
+                result.append(buffer);
+            }
         }
+    } else {
+        fdprintf(fd, "\n");
     }
 
-    if (mActiveTrack != 0) {
-        snprintf(buffer, SIZE, "\nInput thread %p active tracks\n", this);
+    if (numactiveseen != numactive) {
+        snprintf(buffer, SIZE, "  The following tracks are in the active list but"
+                " not in the track list\n");
         result.append(buffer);
         RecordTrack::appendDumpHeader(result);
-        mActiveTrack->dump(buffer, SIZE);
-        result.append(buffer);
+        for (size_t i = 0; i < numactive; ++i) {
+            sp<RecordTrack> track = mActiveTracks[i];
+            if (mTracks.indexOf(track) < 0) {
+                track->dump(buffer, SIZE, true);
+                result.append(buffer);
+            }
+        }
 
     }
     write(fd, result.string(), result.size());
 }
 
 // AudioBufferProvider interface
-status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
+status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer(
+        AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
 {
-    size_t framesReq = buffer->frameCount;
-    size_t framesReady = mFrameCount - mRsmpInIndex;
-    int channelCount;
-
-    if (framesReady == 0) {
-        mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mBufferSize);
-        if (mBytesRead <= 0) {
-            if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) {
-                ALOGE("RecordThread::getNextBuffer() Error reading audio input");
-                // Force input into standby so that it tries to
-                // recover at next read attempt
-                inputStandBy();
-                usleep(kRecordThreadSleepUs);
-            }
-            buffer->raw = NULL;
-            buffer->frameCount = 0;
-            return NOT_ENOUGH_DATA;
-        }
-        mRsmpInIndex = 0;
-        framesReady = mFrameCount;
+    RecordTrack *activeTrack = mRecordTrack;
+    sp<ThreadBase> threadBase = activeTrack->mThread.promote();
+    if (threadBase == 0) {
+        buffer->frameCount = 0;
+        buffer->raw = NULL;
+        return NOT_ENOUGH_DATA;
+    }
+    RecordThread *recordThread = (RecordThread *) threadBase.get();
+    int32_t rear = recordThread->mRsmpInRear;
+    int32_t front = activeTrack->mRsmpInFront;
+    ssize_t filled = rear - front;
+    // FIXME should not be P2 (don't want to increase latency)
+    // FIXME if client not keeping up, discard
+    LOG_ALWAYS_FATAL_IF(!(0 <= filled && (size_t) filled <= recordThread->mRsmpInFrames));
+    // 'filled' may be non-contiguous, so return only the first contiguous chunk
+    front &= recordThread->mRsmpInFramesP2 - 1;
+    size_t part1 = recordThread->mRsmpInFramesP2 - front;
+    if (part1 > (size_t) filled) {
+        part1 = filled;
+    }
+    size_t ask = buffer->frameCount;
+    ALOG_ASSERT(ask > 0);
+    if (part1 > ask) {
+        part1 = ask;
+    }
+    if (part1 == 0) {
+        // Higher-level should keep mRsmpInBuffer full, and not call resampler if empty
+        LOG_ALWAYS_FATAL("RecordThread::getNextBuffer() starved");
+        buffer->raw = NULL;
+        buffer->frameCount = 0;
+        activeTrack->mRsmpInUnrel = 0;
+        return NOT_ENOUGH_DATA;
     }
 
-    if (framesReq > framesReady) {
-        framesReq = framesReady;
-    }
-
-    if (mChannelCount == 1 && mReqChannelCount == 2) {
-        channelCount = 1;
-    } else {
-        channelCount = 2;
-    }
-    buffer->raw = mRsmpInBuffer + mRsmpInIndex * channelCount;
-    buffer->frameCount = framesReq;
+    buffer->raw = recordThread->mRsmpInBuffer + front * recordThread->mChannelCount;
+    buffer->frameCount = part1;
+    activeTrack->mRsmpInUnrel = part1;
     return NO_ERROR;
 }
 
 // AudioBufferProvider interface
-void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+void AudioFlinger::RecordThread::ResamplerBufferProvider::releaseBuffer(
+        AudioBufferProvider::Buffer* buffer)
 {
-    mRsmpInIndex += buffer->frameCount;
+    RecordTrack *activeTrack = mRecordTrack;
+    size_t stepCount = buffer->frameCount;
+    if (stepCount == 0) {
+        return;
+    }
+    ALOG_ASSERT(stepCount <= activeTrack->mRsmpInUnrel);
+    activeTrack->mRsmpInUnrel -= stepCount;
+    activeTrack->mRsmpInFront += stepCount;
+    buffer->raw = NULL;
     buffer->frameCount = 0;
 }
 
@@ -5063,11 +5488,14 @@
         AudioParameter param = AudioParameter(keyValuePair);
         int value;
         audio_format_t reqFormat = mFormat;
-        uint32_t reqSamplingRate = mReqSampleRate;
-        uint32_t reqChannelCount = mReqChannelCount;
+        uint32_t samplingRate = mSampleRate;
+        audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(mChannelCount);
 
+        // TODO Investigate when this code runs. Check with audio policy when a sample rate and
+        //      channel count change can be requested. Do we mandate the first client defines the
+        //      HAL sampling rate and channel count or do we allow changes on the fly?
         if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
-            reqSamplingRate = value;
+            samplingRate = value;
             reconfig = true;
         }
         if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
@@ -5079,14 +5507,19 @@
             }
         }
         if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
-            reqChannelCount = popcount(value);
-            reconfig = true;
+            audio_channel_mask_t mask = (audio_channel_mask_t) value;
+            if (mask != AUDIO_CHANNEL_IN_MONO && mask != AUDIO_CHANNEL_IN_STEREO) {
+                status = BAD_VALUE;
+            } else {
+                channelMask = mask;
+                reconfig = true;
+            }
         }
         if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
             // do not accept frame count changes if tracks are open as the track buffer
             // size depends on frame count and correct behavior would not be guaranteed
             // if frame count is changed after track creation
-            if (mActiveTrack != 0) {
+            if (mActiveTracks.size() > 0) {
                 status = INVALID_OPERATION;
             } else {
                 reconfig = true;
@@ -5129,6 +5562,7 @@
             }
             mAudioSource = (audio_source_t)value;
         }
+
         if (status == NO_ERROR) {
             status = mInput->stream->common.set_parameters(&mInput->stream->common,
                     keyValuePair.string());
@@ -5142,14 +5576,15 @@
                     reqFormat == mInput->stream->common.get_format(&mInput->stream->common) &&
                     reqFormat == AUDIO_FORMAT_PCM_16_BIT &&
                     (mInput->stream->common.get_sample_rate(&mInput->stream->common)
-                            <= (2 * reqSamplingRate)) &&
+                            <= (2 * samplingRate)) &&
                     popcount(mInput->stream->common.get_channels(&mInput->stream->common))
                             <= FCC_2 &&
-                    (reqChannelCount <= FCC_2)) {
+                    (channelMask == AUDIO_CHANNEL_IN_MONO ||
+                            channelMask == AUDIO_CHANNEL_IN_STEREO)) {
                     status = NO_ERROR;
                 }
                 if (status == NO_ERROR) {
-                    readInputParameters();
+                    readInputParameters_l();
                     sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED);
                 }
             }
@@ -5179,9 +5614,9 @@
     return out_s8;
 }
 
-void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) {
+void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param __unused) {
     AudioSystem::OutputDescriptor desc;
-    void *param2 = NULL;
+    const void *param2 = NULL;
 
     switch (event) {
     case AudioSystem::INPUT_OPENED:
@@ -5201,53 +5636,35 @@
     mAudioFlinger->audioConfigChanged_l(event, mId, param2);
 }
 
-void AudioFlinger::RecordThread::readInputParameters()
+void AudioFlinger::RecordThread::readInputParameters_l()
 {
-    delete[] mRsmpInBuffer;
-    // mRsmpInBuffer is always assigned a new[] below
-    delete[] mRsmpOutBuffer;
-    mRsmpOutBuffer = NULL;
-    delete mResampler;
-    mResampler = NULL;
-
     mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
     mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
     mChannelCount = popcount(mChannelMask);
     mFormat = mInput->stream->common.get_format(&mInput->stream->common);
     if (mFormat != AUDIO_FORMAT_PCM_16_BIT) {
-        ALOGE("HAL format %d not supported; must be AUDIO_FORMAT_PCM_16_BIT", mFormat);
+        ALOGE("HAL format %#x not supported; must be AUDIO_FORMAT_PCM_16_BIT", mFormat);
     }
     mFrameSize = audio_stream_frame_size(&mInput->stream->common);
     mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common);
     mFrameCount = mBufferSize / mFrameSize;
-    mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount];
+    // This is the formula for calculating the temporary buffer size.
+    // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to
+    // 1 full output buffer, regardless of the alignment of the available input.
+    // The value is somewhat arbitrary, and could probably be even larger.
+    // A larger value should allow more old data to be read after a track calls start(),
+    // without increasing latency.
+    mRsmpInFrames = mFrameCount * 7;
+    mRsmpInFramesP2 = roundup(mRsmpInFrames);
+    delete[] mRsmpInBuffer;
+    // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
+    mRsmpInBuffer = new int16_t[(mRsmpInFramesP2 + mFrameCount - 1) * mChannelCount];
 
-    if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2)
-    {
-        int channelCount;
-        // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid
-        // stereo to mono post process as the resampler always outputs stereo.
-        if (mChannelCount == 1 && mReqChannelCount == 2) {
-            channelCount = 1;
-        } else {
-            channelCount = 2;
-        }
-        mResampler = AudioResampler::create(16, channelCount, mReqSampleRate);
-        mResampler->setSampleRate(mSampleRate);
-        mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
-        mRsmpOutBuffer = new int32_t[mFrameCount * FCC_2];
-
-        // optmization: if mono to mono, alter input frame count as if we were inputing
-        // stereo samples
-        if (mChannelCount == 1 && mReqChannelCount == 1) {
-            mFrameCount >>= 1;
-        }
-
-    }
-    mRsmpInIndex = mFrameCount;
+    // AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
+    // But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
 }
 
-unsigned int AudioFlinger::RecordThread::getInputFramesLost()
+uint32_t AudioFlinger::RecordThread::getInputFramesLost()
 {
     Mutex::Autolock _l(mLock);
     if (initCheck() != NO_ERROR) {
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index a2fb874..5617c0c 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -36,6 +36,8 @@
                 audio_devices_t outDevice, audio_devices_t inDevice, type_t type);
     virtual             ~ThreadBase();
 
+    virtual status_t    readyToRun();
+
     void dumpBase(int fd, const Vector<String16>& args);
     void dumpEffectChains(int fd, const Vector<String16>& args);
 
@@ -63,7 +65,7 @@
     class IoConfigEvent : public ConfigEvent {
     public:
         IoConfigEvent(int event, int param) :
-            ConfigEvent(CFG_EVENT_IO), mEvent(event), mParam(event) {}
+            ConfigEvent(CFG_EVENT_IO), mEvent(event), mParam(param) {}
         virtual ~IoConfigEvent() {}
 
                 int event() const { return mEvent; }
@@ -141,6 +143,7 @@
                 void        sendIoConfigEvent_l(int event, int param = 0);
                 void        sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
                 void        processConfigEvents();
+                void        processConfigEvents_l();
 
                 // see note at declaration of mStandby, mOutDevice and mInDevice
                 bool        standby() const { return mStandby; }
@@ -156,7 +159,7 @@
                                     int sessionId,
                                     effect_descriptor_t *desc,
                                     int *enabled,
-                                    status_t *status);
+                                    status_t *status /*non-NULL*/);
                 void disconnectEffect(const sp< EffectModule>& effect,
                                       EffectHandle *handle,
                                       bool unpinIfLast);
@@ -198,13 +201,13 @@
                 // effect
                 void removeEffect_l(const sp< EffectModule>& effect);
                 // detach all tracks connected to an auxiliary effect
-    virtual     void detachAuxEffect_l(int effectId) {}
+    virtual     void detachAuxEffect_l(int effectId __unused) {}
                 // returns either EFFECT_SESSION if effects on this audio session exist in one
                 // chain, or TRACK_SESSION if tracks on this audio session exist, or both
                 virtual uint32_t hasAudioSession(int sessionId) const = 0;
                 // the value returned by default implementation is not important as the
                 // strategy is only meaningful for PlaybackThread which implements this method
-                virtual uint32_t getStrategyForSession_l(int sessionId) { return 0; }
+                virtual uint32_t getStrategyForSession_l(int sessionId __unused) { return 0; }
 
                 // suspend or restore effect according to the type of effect passed. a NULL
                 // type pointer means suspend all effects in the session
@@ -267,14 +270,15 @@
 
                 const sp<AudioFlinger>  mAudioFlinger;
 
-                // updated by PlaybackThread::readOutputParameters() or
-                // RecordThread::readInputParameters()
+                // updated by PlaybackThread::readOutputParameters_l() or
+                // RecordThread::readInputParameters_l()
                 uint32_t                mSampleRate;
                 size_t                  mFrameCount;       // output HAL, direct output, record
                 audio_channel_mask_t    mChannelMask;
                 uint32_t                mChannelCount;
                 size_t                  mFrameSize;
                 audio_format_t          mFormat;
+                size_t                  mBufferSize;       // HAL buffer size for read() or write()
 
                 // Parameter sequence by client: binder thread calling setParameters():
                 //  1. Lock mLock
@@ -303,12 +307,12 @@
                 Vector<ConfigEvent *>     mConfigEvents;
 
                 // These fields are written and read by thread itself without lock or barrier,
-                // and read by other threads without lock or barrier via standby() , outDevice()
+                // and read by other threads without lock or barrier via standby(), outDevice()
                 // and inDevice().
                 // Because of the absence of a lock or barrier, any other thread that reads
                 // these fields must use the information in isolation, or be prepared to deal
                 // with possibility that it might be inconsistent with other information.
-                bool                    mStandby;   // Whether thread is currently in standby.
+                bool                    mStandby;     // Whether thread is currently in standby.
                 audio_devices_t         mOutDevice;   // output device
                 audio_devices_t         mInDevice;    // input device
                 audio_source_t          mAudioSource; // (see audio.h, audio_source_t)
@@ -358,7 +362,6 @@
                 void        dump(int fd, const Vector<String16>& args);
 
     // Thread virtuals
-    virtual     status_t    readyToRun();
     virtual     bool        threadLoop();
 
     // RefBase
@@ -391,7 +394,7 @@
     virtual     bool        waitingAsyncCallback();
     virtual     bool        waitingAsyncCallback_l();
     virtual     bool        shouldStandby_l();
-
+    virtual     void        onAddNewTrack_l();
 
     // ThreadBase virtuals
     virtual     void        preExit();
@@ -419,13 +422,13 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
-                                size_t frameCount,
+                                size_t *pFrameCount,
                                 const sp<IMemory>& sharedBuffer,
                                 int sessionId,
                                 IAudioFlinger::track_flags_t *flags,
                                 pid_t tid,
                                 int uid,
-                                status_t *status);
+                                status_t *status /*non-NULL*/);
 
                 AudioStreamOut* getOutput() const;
                 AudioStreamOut* clearOutput();
@@ -447,7 +450,11 @@
     virtual     String8     getParameters(const String8& keys);
     virtual     void        audioConfigChanged_l(int event, int param = 0);
                 status_t    getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
-                int16_t     *mixBuffer() const { return mMixBuffer; };
+                // FIXME rename mixBuffer() to sinkBuffer() and remove int16_t* dependency.
+                // Consider also removing and passing an explicit mMainBuffer initialization
+                // parameter to AF::PlaybackThread::Track::Track().
+                int16_t     *mixBuffer() const {
+                    return reinterpret_cast<int16_t *>(mSinkBuffer); };
 
     virtual     void detachAuxEffect_l(int effectId);
                 status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track,
@@ -475,11 +482,68 @@
                 status_t         getTimestamp_l(AudioTimestamp& timestamp);
 
 protected:
-    // updated by readOutputParameters()
+    // updated by readOutputParameters_l()
     size_t                          mNormalFrameCount;  // normal mixer and effects
 
-    int16_t*                        mMixBuffer;         // frame size aligned mix buffer
-    int8_t*                         mAllocMixBuffer;    // mixer buffer allocation address
+    void*                           mSinkBuffer;         // frame size aligned sink buffer
+
+    // TODO:
+    // Rearrange the buffer info into a struct/class with
+    // clear, copy, construction, destruction methods.
+    //
+    // mSinkBuffer also has associated with it:
+    //
+    // mSinkBufferSize: Sink Buffer Size
+    // mFormat: Sink Buffer Format
+
+    // Mixer Buffer (mMixerBuffer*)
+    //
+    // In the case of floating point or multichannel data, which is not in the
+    // sink format, it is required to accumulate in a higher precision or greater channel count
+    // buffer before downmixing or data conversion to the sink buffer.
+
+    // Set to "true" to enable the Mixer Buffer otherwise mixer output goes to sink buffer.
+    bool                            mMixerBufferEnabled;
+
+    // Storage, 32 byte aligned (may make this alignment a requirement later).
+    // Due to constraints on mNormalFrameCount, the buffer size is a multiple of 16 frames.
+    void*                           mMixerBuffer;
+
+    // Size of mMixerBuffer in bytes: mNormalFrameCount * #channels * sampsize.
+    size_t                          mMixerBufferSize;
+
+    // The audio format of mMixerBuffer. Set to AUDIO_FORMAT_PCM_(FLOAT|16_BIT) only.
+    audio_format_t                  mMixerBufferFormat;
+
+    // An internal flag set to true by MixerThread::prepareTracks_l()
+    // when mMixerBuffer contains valid data after mixing.
+    bool                            mMixerBufferValid;
+
+    // Effects Buffer (mEffectsBuffer*)
+    //
+    // In the case of effects data, which is not in the sink format,
+    // it is required to accumulate in a different buffer before data conversion
+    // to the sink buffer.
+
+    // Set to "true" to enable the Effects Buffer otherwise effects output goes to sink buffer.
+    bool                            mEffectBufferEnabled;
+
+    // Storage, 32 byte aligned (may make this alignment a requirement later).
+    // Due to constraints on mNormalFrameCount, the buffer size is a multiple of 16 frames.
+    void*                           mEffectBuffer;
+
+    // Size of mEffectsBuffer in bytes: mNormalFrameCount * #channels * sampsize.
+    size_t                          mEffectBufferSize;
+
+    // The audio format of mEffectsBuffer. Set to AUDIO_FORMAT_PCM_16_BIT only.
+    audio_format_t                  mEffectBufferFormat;
+
+    // An internal flag set to true by MixerThread::prepareTracks_l()
+    // when mEffectsBuffer contains valid data after mixing.
+    //
+    // When this is set, all mixer data is routed into the effects buffer
+    // for any processing (including output processing).
+    bool                            mEffectBufferValid;
 
     // suspend count, > 0 means suspended.  While suspended, the thread continues to pull from
     // tracks and mix, but doesn't write to HAL.  A2DP and SCO HAL implementations can't handle
@@ -539,7 +603,7 @@
     void        removeTrack_l(const sp<Track>& track);
     void        broadcast_l();
 
-    void        readOutputParameters();
+    void        readOutputParameters_l();
 
     virtual void dumpInternals(int fd, const Vector<String16>& args);
     void        dumpTracks(int fd, const Vector<String16>& args);
@@ -558,7 +622,7 @@
 
     // FIXME rename these former local variables of threadLoop to standard "m" names
     nsecs_t                         standbyTime;
-    size_t                          mixBufferSize;
+    size_t                          mSinkBufferSize;
 
     // cached copies of activeSleepTimeUs() and idleSleepTimeUs() made by cacheParameters_l()
     uint32_t                        activeSleepTime;
@@ -623,13 +687,12 @@
     sp<NBLog::Writer>       mFastMixerNBLogWriter;
 public:
     virtual     bool        hasFastMixer() const = 0;
-    virtual     FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const
+    virtual     FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex __unused) const
                                 { FastTrackUnderruns dummy; return dummy; }
 
 protected:
                 // accessed by both binder threads and within threadLoop(), lock on mutex needed
                 unsigned    mFastTrackAvailMask;    // bit i set if fast track [i] is available
-    virtual     void        flushOutput_l();
 
 private:
     // timestamp latch:
@@ -748,11 +811,11 @@
     // threadLoop snippets
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
     virtual     void        threadLoop_exit();
-    virtual     void        flushOutput_l();
 
     virtual     bool        waitingAsyncCallback();
     virtual     bool        waitingAsyncCallback_l();
     virtual     bool        shouldStandby_l();
+    virtual     void        onAddNewTrack_l();
 
 private:
                 void        flushHw_l();
@@ -838,17 +901,28 @@
 
 
 // record thread
-class RecordThread : public ThreadBase, public AudioBufferProvider
-                        // derives from AudioBufferProvider interface for use by resampler
+class RecordThread : public ThreadBase
 {
 public:
 
+    class RecordTrack;
+    class ResamplerBufferProvider : public AudioBufferProvider
+                        // derives from AudioBufferProvider interface for use by resampler
+    {
+    public:
+        ResamplerBufferProvider(RecordTrack* recordTrack) : mRecordTrack(recordTrack) { }
+        virtual ~ResamplerBufferProvider() { }
+        // AudioBufferProvider interface
+        virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
+        virtual void        releaseBuffer(AudioBufferProvider::Buffer* buffer);
+    private:
+        RecordTrack * const mRecordTrack;
+    };
+
 #include "RecordTracks.h"
 
             RecordThread(const sp<AudioFlinger>& audioFlinger,
                     AudioStreamIn *input,
-                    uint32_t sampleRate,
-                    audio_channel_mask_t channelMask,
                     audio_io_handle_t id,
                     audio_devices_t outDevice,
                     audio_devices_t inDevice
@@ -867,23 +941,23 @@
 
     // Thread virtuals
     virtual bool        threadLoop();
-    virtual status_t    readyToRun();
 
     // RefBase
     virtual void        onFirstRef();
 
     virtual status_t    initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; }
+
             sp<AudioFlinger::RecordThread::RecordTrack>  createRecordTrack_l(
                     const sp<AudioFlinger::Client>& client,
                     uint32_t sampleRate,
                     audio_format_t format,
                     audio_channel_mask_t channelMask,
-                    size_t frameCount,
+                    size_t *pFrameCount,
                     int sessionId,
                     int uid,
                     IAudioFlinger::track_flags_t *flags,
                     pid_t tid,
-                    status_t *status);
+                    status_t *status /*non-NULL*/);
 
             status_t    start(RecordTrack* recordTrack,
                               AudioSystem::sync_event_t event,
@@ -897,15 +971,12 @@
             AudioStreamIn* clearInput();
             virtual audio_stream_t* stream() const;
 
-    // AudioBufferProvider interface
-    virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
-    virtual void        releaseBuffer(AudioBufferProvider::Buffer* buffer);
 
     virtual bool        checkForNewParameters_l();
     virtual String8     getParameters(const String8& keys);
     virtual void        audioConfigChanged_l(int event, int param = 0);
-            void        readInputParameters();
-    virtual unsigned int  getInputFramesLost();
+            void        readInputParameters_l();
+    virtual uint32_t    getInputFramesLost();
 
     virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
     virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
@@ -920,44 +991,33 @@
     virtual bool     isValidSyncEvent(const sp<SyncEvent>& event) const;
 
     static void syncStartEventCallback(const wp<SyncEvent>& event);
-           void handleSyncStartEvent(const sp<SyncEvent>& event);
 
     virtual size_t      frameCount() const { return mFrameCount; }
-            bool        hasFastRecorder() const { return false; }
+            bool        hasFastCapture() const { return false; }
 
 private:
-            void clearSyncStartEvent();
-
             // Enter standby if not already in standby, and set mStandby flag
-            void standby();
+            void    standbyIfNotAlreadyInStandby();
 
             // Call the HAL standby method unconditionally, and don't change mStandby flag
-            void inputStandBy();
+            void    inputStandBy();
 
             AudioStreamIn                       *mInput;
             SortedVector < sp<RecordTrack> >    mTracks;
-            // mActiveTrack has dual roles:  it indicates the current active track, and
+            // mActiveTracks has dual roles:  it indicates the current active track(s), and
             // is used together with mStartStopCond to indicate start()/stop() progress
-            sp<RecordTrack>                     mActiveTrack;
+            SortedVector< sp<RecordTrack> >     mActiveTracks;
+            // generation counter for mActiveTracks
+            int                                 mActiveTracksGen;
             Condition                           mStartStopCond;
 
-            // updated by RecordThread::readInputParameters()
-            AudioResampler                      *mResampler;
-            // interleaved stereo pairs of fixed-point signed Q19.12
-            int32_t                             *mRsmpOutBuffer;
-            int16_t                             *mRsmpInBuffer; // [mFrameCount * mChannelCount]
-            size_t                              mRsmpInIndex;
-            size_t                              mBufferSize;    // stream buffer size for read()
-            const uint32_t                      mReqChannelCount;
-            const uint32_t                      mReqSampleRate;
-            ssize_t                             mBytesRead;
-            // sync event triggering actual audio capture. Frames read before this event will
-            // be dropped and therefore not read by the application.
-            sp<SyncEvent>                       mSyncStartEvent;
-            // number of captured frames to drop after the start sync event has been received.
-            // when < 0, maximum frames to drop before starting capture even if sync event is
-            // not received
-            ssize_t                             mFramestoDrop;
+            // resampler converts input at HAL Hz to output at AudioRecord client Hz
+            int16_t                             *mRsmpInBuffer; // see new[] for details on the size
+            size_t                              mRsmpInFrames;  // size of resampler input in frames
+            size_t                              mRsmpInFramesP2;// size rounded up to a power-of-2
+
+            // rolling index that is never cleared
+            int32_t                             mRsmpInRear;    // last filled frame + 1
 
             // For dumpsys
             const sp<NBAIO_Sink>                mTeeSink;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index cd201d9..58705c4 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -34,7 +34,9 @@
         RESUMING,
         ACTIVE,
         PAUSING,
-        PAUSED
+        PAUSED,
+        STARTING_1,     // for RecordTrack only
+        STARTING_2,     // for RecordTrack only
     };
 
                         TrackBase(ThreadBase *thread,
@@ -48,6 +50,7 @@
                                 int uid,
                                 bool isOut);
     virtual             ~TrackBase();
+    virtual status_t    initCheck() const { return getCblk() != 0 ? NO_ERROR : NO_MEMORY; }
 
     virtual status_t    start(AudioSystem::sync_event_t event,
                              int triggerSession) = 0;
@@ -78,15 +81,6 @@
 
     virtual uint32_t sampleRate() const { return mSampleRate; }
 
-    // Return a pointer to the start of a contiguous slice of the track buffer.
-    // Parameter 'offset' is the requested start position, expressed in
-    // monotonically increasing frame units relative to the track epoch.
-    // Parameter 'frames' is the requested length, also in frame units.
-    // Always returns non-NULL.  It is the caller's responsibility to
-    // verify that this will be successful; the result of calling this
-    // function with invalid 'offset' or 'frames' is undefined.
-    void* getBuffer(uint32_t offset, uint32_t frames) const;
-
     bool isStopped() const {
         return (mState == STOPPED || mState == FLUSHED);
     }
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index d07113c..1064fd1 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -116,12 +116,11 @@
 
     if (client != 0) {
         mCblkMemory = client->heap()->allocate(size);
-        if (mCblkMemory != 0) {
-            mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
-            // can't assume mCblk != NULL
-        } else {
+        if (mCblkMemory == 0 ||
+                (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
             ALOGE("not enough memory for AudioTrack size=%u", size);
             client->heap()->dump("AudioTrack");
+            mCblkMemory.clear();
             return;
         }
     } else {
@@ -134,7 +133,6 @@
     if (mCblk != NULL) {
         new(mCblk) audio_track_cblk_t();
         // clear all buffers
-        mCblk->frameCount_ = frameCount;
         if (sharedBuffer == 0) {
             mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
             memset(mBuffer, 0, bufferSize);
@@ -148,7 +146,7 @@
 #ifdef TEE_SINK
         if (mTeeSinkTrackEnabled) {
             NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount);
-            if (pipeFormat != Format_Invalid) {
+            if (Format_isValid(pipeFormat)) {
                 Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
                 size_t numCounterOffers = 0;
                 const NBAIO_Format offers[1] = {pipeFormat};
@@ -275,6 +273,11 @@
     if (!mTrack->isTimedTrack())
         return INVALID_OPERATION;
 
+    if (buffer == 0 || buffer->pointer() == NULL) {
+        ALOGE("queueTimedBuffer() buffer is 0 or has NULL pointer()");
+        return BAD_VALUE;
+    }
+
     PlaybackThread::TimedTrack* tt =
             reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
     return tt->queueTimedBuffer(buffer, pts);
@@ -344,41 +347,42 @@
     mCachedVolume(1.0),
     mIsInvalid(false),
     mAudioTrackServerProxy(NULL),
-    mResumeToStopping(false)
+    mResumeToStopping(false),
+    mFlushHwPending(false)
 {
-    if (mCblk != NULL) {
-        if (sharedBuffer == 0) {
-            mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
-                    mFrameSize);
-        } else {
-            mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
-                    mFrameSize);
-        }
-        mServerProxy = mAudioTrackServerProxy;
-        // to avoid leaking a track name, do not allocate one unless there is an mCblk
-        mName = thread->getTrackName_l(channelMask, sessionId);
-        if (mName < 0) {
-            ALOGE("no more track names available");
-            return;
-        }
-        // only allocate a fast track index if we were able to allocate a normal track name
-        if (flags & IAudioFlinger::TRACK_FAST) {
-            mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
-            ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
-            int i = __builtin_ctz(thread->mFastTrackAvailMask);
-            ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
-            // FIXME This is too eager.  We allocate a fast track index before the
-            //       fast track becomes active.  Since fast tracks are a scarce resource,
-            //       this means we are potentially denying other more important fast tracks from
-            //       being created.  It would be better to allocate the index dynamically.
-            mFastIndex = i;
-            // Read the initial underruns because this field is never cleared by the fast mixer
-            mObservedUnderruns = thread->getFastTrackUnderruns(i);
-            thread->mFastTrackAvailMask &= ~(1 << i);
-        }
+    if (mCblk == NULL) {
+        return;
     }
-    ALOGV("Track constructor name %d, calling pid %d", mName,
-            IPCThreadState::self()->getCallingPid());
+
+    if (sharedBuffer == 0) {
+        mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
+                mFrameSize);
+    } else {
+        mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
+                mFrameSize);
+    }
+    mServerProxy = mAudioTrackServerProxy;
+
+    mName = thread->getTrackName_l(channelMask, sessionId);
+    if (mName < 0) {
+        ALOGE("no more track names available");
+        return;
+    }
+    // only allocate a fast track index if we were able to allocate a normal track name
+    if (flags & IAudioFlinger::TRACK_FAST) {
+        mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
+        ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
+        int i = __builtin_ctz(thread->mFastTrackAvailMask);
+        ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
+        // FIXME This is too eager.  We allocate a fast track index before the
+        //       fast track becomes active.  Since fast tracks are a scarce resource,
+        //       this means we are potentially denying other more important fast tracks from
+        //       being created.  It would be better to allocate the index dynamically.
+        mFastIndex = i;
+        // Read the initial underruns because this field is never cleared by the fast mixer
+        mObservedUnderruns = thread->getFastTrackUnderruns(i);
+        thread->mFastTrackAvailMask &= ~(1 << i);
+    }
 }
 
 AudioFlinger::PlaybackThread::Track::~Track()
@@ -396,6 +400,15 @@
     }
 }
 
+status_t AudioFlinger::PlaybackThread::Track::initCheck() const
+{
+    status_t status = TrackBase::initCheck();
+    if (status == NO_ERROR && mName < 0) {
+        status = NO_MEMORY;
+    }
+    return status;
+}
+
 void AudioFlinger::PlaybackThread::Track::destroy()
 {
     // NOTE: destroyTrack_l() can remove a strong reference to this Track
@@ -422,17 +435,19 @@
 
 /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
 {
-    result.append("   Name Client Type      Fmt Chn mask Session fCount S F SRate  "
+    result.append("    Name Active Client Type      Fmt Chn mask Session fCount S F SRate  "
                   "L dB  R dB    Server Main buf  Aux Buf Flags UndFrmCnt\n");
 }
 
-void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
+void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size, bool active)
 {
     uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
     if (isFastTrack()) {
-        sprintf(buffer, "   F %2d", mFastIndex);
+        sprintf(buffer, "    F %2d", mFastIndex);
+    } else if (mName >= AudioMixer::TRACK0) {
+        sprintf(buffer, "    %4d", mName - AudioMixer::TRACK0);
     } else {
-        sprintf(buffer, "   %4d", mName - AudioMixer::TRACK0);
+        sprintf(buffer, "    none");
     }
     track_state state = mState;
     char stateChar;
@@ -487,8 +502,9 @@
         nowInUnderrun = '?';
         break;
     }
-    snprintf(&buffer[7], size-7, " %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g  "
+    snprintf(&buffer[8], size-8, " %6s %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g  "
                                  "%08X %p %p 0x%03X %9u%c\n",
+            active ? "yes" : "no",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mStreamType,
             mFormat,
@@ -514,7 +530,7 @@
 
 // AudioBufferProvider interface
 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
-        AudioBufferProvider::Buffer* buffer, int64_t pts)
+        AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
 {
     ServerProxy::Buffer buf;
     size_t desiredFrames = buffer->frameCount;
@@ -551,7 +567,14 @@
 
 // Don't call for fast tracks; the framesReady() could result in priority inversion
 bool AudioFlinger::PlaybackThread::Track::isReady() const {
-    if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing() || isStopping()) {
+    if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
+        return true;
+    }
+
+    if (isStopping()) {
+        if (framesReady() > 0) {
+            mFillingUpStatus = FS_FILLED;
+        }
         return true;
     }
 
@@ -564,8 +587,8 @@
     return false;
 }
 
-status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event,
-                                                    int triggerSession)
+status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused,
+                                                    int triggerSession __unused)
 {
     status_t status = NO_ERROR;
     ALOGV("start(%d), calling pid %d session %d",
@@ -588,7 +611,10 @@
         // here the track could be either new, or restarted
         // in both cases "unstop" the track
 
-        if (state == PAUSED) {
+        // initial state-stopping. next state-pausing.
+        // What if resume is called ?
+
+        if (state == PAUSED || state == PAUSING) {
             if (mResumeToStopping) {
                 // happened we need to resume to STOPPING_1
                 mState = TrackBase::STOPPING_1;
@@ -719,6 +745,7 @@
                 mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
             }
 
+            mFlushHwPending = true;
             mResumeToStopping = false;
         } else {
             if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
@@ -739,11 +766,19 @@
         // Prevent flush being lost if the track is flushed and then resumed
         // before mixer thread can run. This is important when offloading
         // because the hardware buffer could hold a large amount of audio
-        playbackThread->flushOutput_l();
         playbackThread->broadcast_l();
     }
 }
 
+// must be called with thread lock held
+void AudioFlinger::PlaybackThread::Track::flushAck()
+{
+    if (!isOffloaded())
+        return;
+
+    mFlushHwPending = false;
+}
+
 void AudioFlinger::PlaybackThread::Track::reset()
 {
     // Do not reset twice to avoid discarding data written just after a flush and before
@@ -966,6 +1001,33 @@
     }
 }
 
+//To be called with thread lock held
+bool AudioFlinger::PlaybackThread::Track::isResumePending() {
+
+    if (mState == RESUMING)
+        return true;
+    /* Resume is pending if track was stopping before pause was called */
+    if (mState == STOPPING_1 &&
+        mResumeToStopping)
+        return true;
+
+    return false;
+}
+
+//To be called with thread lock held
+void AudioFlinger::PlaybackThread::Track::resumeAck() {
+
+
+    if (mState == RESUMING)
+        mState = ACTIVE;
+
+    // Other possibility of  pending resume is stopping_1 state
+    // Do not update the state from stopping as this prevents
+    // drain being called.
+    if (mState == STOPPING_1) {
+        mResumeToStopping = false;
+    }
+}
 // ----------------------------------------------------------------------------
 
 sp<AudioFlinger::PlaybackThread::TimedTrack>
@@ -979,7 +1041,8 @@
             size_t frameCount,
             const sp<IMemory>& sharedBuffer,
             int sessionId,
-            int uid) {
+            int uid)
+{
     if (!client->reserveTimedTrack())
         return 0;
 
@@ -1045,15 +1108,14 @@
 
         mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
                                               "AudioFlingerTimed");
-        if (mTimedMemoryDealer == NULL)
+        if (mTimedMemoryDealer == NULL) {
             return NO_MEMORY;
+        }
     }
 
     sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
-    if (newBuffer == NULL) {
-        newBuffer = mTimedMemoryDealer->allocate(size);
-        if (newBuffer == NULL)
-            return NO_MEMORY;
+    if (newBuffer == 0 || newBuffer->pointer() == NULL) {
+        return NO_MEMORY;
     }
 
     *buffer = newBuffer;
@@ -1152,7 +1214,7 @@
 
 void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
         const TimedBuffer& buf,
-        const char* logTag) {
+        const char* logTag __unused) {
     uint32_t bufBytes        = buf.buffer()->size();
     uint32_t consumedAlready = buf.position();
 
@@ -1463,7 +1525,7 @@
             mTrimQueueHeadOnRelease = false;
         }
     } else {
-        LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
+        LOG_ALWAYS_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
                   " buffers in the timed buffer queue");
     }
 
@@ -1504,9 +1566,9 @@
         mOutBuffer.frameCount = 0;
         playbackThread->mTracks.add(this);
         ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
-                "mCblk->frameCount_ %u, mChannelMask 0x%08x",
+                "frameCount %u, mChannelMask 0x%08x",
                 mCblk, mBuffer,
-                mCblk->frameCount_, mChannelMask);
+                frameCount, mChannelMask);
         // since client and server are in the same process,
         // the buffer has the same virtual address on both sides
         mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize);
@@ -1748,7 +1810,7 @@
 
 // ----------------------------------------------------------------------------
 
-// RecordTrack constructor must be called with AudioFlinger::mLock held
+// RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
 AudioFlinger::RecordThread::RecordTrack::RecordTrack(
             RecordThread *thread,
             const sp<Client>& client,
@@ -1760,24 +1822,40 @@
             int uid)
     :   TrackBase(thread, client, sampleRate, format,
                   channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid, false /*isOut*/),
-        mOverflow(false)
+        mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0),
+        // See real initialization of mRsmpInFront at RecordThread::start()
+        mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL)
 {
-    ALOGV("RecordTrack constructor");
-    if (mCblk != NULL) {
-        mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
-                mFrameSize);
-        mServerProxy = mAudioRecordServerProxy;
+    if (mCblk == NULL) {
+        return;
+    }
+
+    mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize);
+
+    uint32_t channelCount = popcount(channelMask);
+    // FIXME I don't understand either of the channel count checks
+    if (thread->mSampleRate != sampleRate && thread->mChannelCount <= FCC_2 &&
+            channelCount <= FCC_2) {
+        // sink SR
+        mResampler = AudioResampler::create(16, thread->mChannelCount, sampleRate);
+        // source SR
+        mResampler->setSampleRate(thread->mSampleRate);
+        mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
+        mResamplerBufferProvider = new ResamplerBufferProvider(this);
     }
 }
 
 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
 {
     ALOGV("%s", __func__);
+    delete mResampler;
+    delete[] mRsmpOutBuffer;
+    delete mResamplerBufferProvider;
 }
 
 // AudioBufferProvider interface
 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
-        int64_t pts)
+        int64_t pts __unused)
 {
     ServerProxy::Buffer buf;
     buf.mFrameCount = buffer->frameCount;
@@ -1845,19 +1923,45 @@
 
 /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
 {
-    result.append("Client Fmt Chn mask Session S   Server fCount\n");
+    result.append("    Active Client Fmt Chn mask Session S   Server fCount Resampling\n");
 }
 
-void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
+void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active)
 {
-    snprintf(buffer, size, "%6u %3u %08X %7u %1d %08X %6zu\n",
+    snprintf(buffer, size, "    %6s %6u %3u %08X %7u %1d %08X %6zu %10d\n",
+            active ? "yes" : "no",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mFormat,
             mChannelMask,
             mSessionId,
             mState,
             mCblk->mServer,
-            mFrameCount);
+            mFrameCount,
+            mResampler != NULL);
+
+}
+
+void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
+{
+    if (event == mSyncStartEvent) {
+        ssize_t framesToDrop = 0;
+        sp<ThreadBase> threadBase = mThread.promote();
+        if (threadBase != 0) {
+            // TODO: use actual buffer filling status instead of 2 buffers when info is available
+            // from audio HAL
+            framesToDrop = threadBase->mFrameCount * 2;
+        }
+        mFramesToDrop = framesToDrop;
+    }
+}
+
+void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent()
+{
+    if (mSyncStartEvent != 0) {
+        mSyncStartEvent->cancel();
+        mSyncStartEvent.clear();
+    }
+    mFramesToDrop = 0;
 }
 
 }; // namespace android
diff --git a/services/audioflinger/test-resample.cpp b/services/audioflinger/test-resample.cpp
index 7a314cf..e14b4ae 100644
--- a/services/audioflinger/test-resample.cpp
+++ b/services/audioflinger/test-resample.cpp
@@ -24,81 +24,112 @@
 #include <sys/mman.h>
 #include <sys/stat.h>
 #include <errno.h>
+#include <inttypes.h>
 #include <time.h>
 #include <math.h>
+#include <audio_utils/primitives.h>
+#include <audio_utils/sndfile.h>
+#include <utils/Vector.h>
 
 using namespace android;
 
-struct HeaderWav {
-    HeaderWav(size_t size, int nc, int sr, int bits) {
-        strncpy(RIFF, "RIFF", 4);
-        chunkSize = size + sizeof(HeaderWav);
-        strncpy(WAVE, "WAVE", 4);
-        strncpy(fmt,  "fmt ", 4);
-        fmtSize = 16;
-        audioFormat = 1;
-        numChannels = nc;
-        samplesRate = sr;
-        byteRate = sr * numChannels * (bits/8);
-        align = nc*(bits/8);
-        bitsPerSample = bits;
-        strncpy(data, "data", 4);
-        dataSize = size;
-    }
-
-    char RIFF[4];           // RIFF
-    uint32_t chunkSize;     // File size
-    char WAVE[4];        // WAVE
-    char fmt[4];            // fmt\0
-    uint32_t fmtSize;       // fmt size
-    uint16_t audioFormat;   // 1=PCM
-    uint16_t numChannels;   // num channels
-    uint32_t samplesRate;   // sample rate in hz
-    uint32_t byteRate;      // Bps
-    uint16_t align;         // 2=16-bit mono, 4=16-bit stereo
-    uint16_t bitsPerSample; // bits per sample
-    char data[4];           // "data"
-    uint32_t dataSize;      // size
-};
+static bool gVerbose = false;
 
 static int usage(const char* name) {
-    fprintf(stderr,"Usage: %s [-p] [-h] [-s] [-q {dq|lq|mq|hq|vhq}] [-i input-sample-rate] "
-                   "[-o output-sample-rate] [<input-file>] <output-file>\n", name);
+    fprintf(stderr,"Usage: %s [-p] [-f] [-F] [-v] [-c channels]"
+                   " [-q {dq|lq|mq|hq|vhq|dlq|dmq|dhq}]"
+                   " [-i input-sample-rate] [-o output-sample-rate]"
+                   " [-O csv] [-P csv] [<input-file>]"
+                   " <output-file>\n", name);
     fprintf(stderr,"    -p    enable profiling\n");
-    fprintf(stderr,"    -h    create wav file\n");
-    fprintf(stderr,"    -s    stereo\n");
+    fprintf(stderr,"    -f    enable filter profiling\n");
+    fprintf(stderr,"    -F    enable floating point -q {dlq|dmq|dhq} only");
+    fprintf(stderr,"    -v    verbose : log buffer provider calls\n");
+    fprintf(stderr,"    -c    # channels (1-2 for lq|mq|hq; 1-8 for dlq|dmq|dhq)\n");
     fprintf(stderr,"    -q    resampler quality\n");
     fprintf(stderr,"              dq  : default quality\n");
     fprintf(stderr,"              lq  : low quality\n");
     fprintf(stderr,"              mq  : medium quality\n");
     fprintf(stderr,"              hq  : high quality\n");
     fprintf(stderr,"              vhq : very high quality\n");
-    fprintf(stderr,"    -i    input file sample rate\n");
+    fprintf(stderr,"              dlq : dynamic low quality\n");
+    fprintf(stderr,"              dmq : dynamic medium quality\n");
+    fprintf(stderr,"              dhq : dynamic high quality\n");
+    fprintf(stderr,"    -i    input file sample rate (ignored if input file is specified)\n");
     fprintf(stderr,"    -o    output file sample rate\n");
+    fprintf(stderr,"    -O    # frames output per call to resample() in CSV format\n");
+    fprintf(stderr,"    -P    # frames provided per call to resample() in CSV format\n");
     return -1;
 }
 
-int main(int argc, char* argv[]) {
+// Convert a list of integers in CSV format to a Vector of those values.
+// Returns the number of elements in the list, or -1 on error.
+int parseCSV(const char *string, Vector<int>& values)
+{
+    // pass 1: count the number of values and do syntax check
+    size_t numValues = 0;
+    bool hadDigit = false;
+    for (const char *p = string; ; ) {
+        switch (*p++) {
+        case '0': case '1': case '2': case '3': case '4':
+        case '5': case '6': case '7': case '8': case '9':
+            hadDigit = true;
+            break;
+        case '\0':
+            if (hadDigit) {
+                // pass 2: allocate and initialize vector of values
+                values.resize(++numValues);
+                values.editItemAt(0) = atoi(p = optarg);
+                for (size_t i = 1; i < numValues; ) {
+                    if (*p++ == ',') {
+                        values.editItemAt(i++) = atoi(p);
+                    }
+                }
+                return numValues;
+            }
+            // fall through
+        case ',':
+            if (hadDigit) {
+                hadDigit = false;
+                numValues++;
+                break;
+            }
+            // fall through
+        default:
+            return -1;
+        }
+    }
+}
 
+int main(int argc, char* argv[]) {
     const char* const progname = argv[0];
-    bool profiling = false;
-    bool writeHeader = false;
+    bool profileResample = false;
+    bool profileFilter = false;
+    bool useFloat = false;
     int channels = 1;
     int input_freq = 0;
     int output_freq = 0;
     AudioResampler::src_quality quality = AudioResampler::DEFAULT_QUALITY;
+    Vector<int> Ovalues;
+    Vector<int> Pvalues;
 
     int ch;
-    while ((ch = getopt(argc, argv, "phsq:i:o:")) != -1) {
+    while ((ch = getopt(argc, argv, "pfFvc:q:i:o:O:P:")) != -1) {
         switch (ch) {
         case 'p':
-            profiling = true;
+            profileResample = true;
             break;
-        case 'h':
-            writeHeader = true;
+        case 'f':
+            profileFilter = true;
             break;
-        case 's':
-            channels = 2;
+        case 'F':
+            useFloat = true;
+            break;
+        case 'v':
+            gVerbose = true;
+            break;
+        case 'c':
+            channels = atoi(optarg);
             break;
         case 'q':
             if (!strcmp(optarg, "dq"))
@@ -111,6 +142,12 @@
                 quality = AudioResampler::HIGH_QUALITY;
             else if (!strcmp(optarg, "vhq"))
                 quality = AudioResampler::VERY_HIGH_QUALITY;
+            else if (!strcmp(optarg, "dlq"))
+                quality = AudioResampler::DYN_LOW_QUALITY;
+            else if (!strcmp(optarg, "dmq"))
+                quality = AudioResampler::DYN_MED_QUALITY;
+            else if (!strcmp(optarg, "dhq"))
+                quality = AudioResampler::DYN_HIGH_QUALITY;
             else {
                 usage(progname);
                 return -1;
@@ -122,12 +159,35 @@
         case 'o':
             output_freq = atoi(optarg);
             break;
+        case 'O':
+            if (parseCSV(optarg, Ovalues) < 0) {
+                fprintf(stderr, "incorrect syntax for -O option\n");
+                return -1;
+            }
+            break;
+        case 'P':
+            if (parseCSV(optarg, Pvalues) < 0) {
+                fprintf(stderr, "incorrect syntax for -P option\n");
+                return -1;
+            }
+            break;
         case '?':
         default:
             usage(progname);
             return -1;
         }
     }
+
+    if (channels < 1
+            || channels > (quality < AudioResampler::DYN_LOW_QUALITY ? 2 : 8)) {
+        fprintf(stderr, "invalid number of audio channels %d\n", channels);
+        return -1;
+    }
+    if (useFloat && quality < AudioResampler::DYN_LOW_QUALITY) {
+        fprintf(stderr, "float processing is only possible for dynamic resamplers\n");
+        return -1;
+    }
+
     argc -= optind;
     argv += optind;
 
@@ -148,25 +208,22 @@
     size_t input_size;
     void* input_vaddr;
     if (argc == 2) {
-        struct stat st;
-        if (stat(file_in, &st) < 0) {
-            fprintf(stderr, "stat: %s\n", strerror(errno));
-            return -1;
+        SF_INFO info;
+        info.format = 0;
+        SNDFILE *sf = sf_open(file_in, SFM_READ, &info);
+        if (sf == NULL) {
+            perror(file_in);
+            return EXIT_FAILURE;
         }
-
-        int input_fd = open(file_in, O_RDONLY);
-        if (input_fd < 0) {
-            fprintf(stderr, "open: %s\n", strerror(errno));
-            return -1;
-        }
-
-        input_size = st.st_size;
-        input_vaddr = mmap(0, input_size, PROT_READ, MAP_PRIVATE, input_fd, 0);
-        if (input_vaddr == MAP_FAILED ) {
-            fprintf(stderr, "mmap: %s\n", strerror(errno));
-            return -1;
-        }
+        input_size = info.frames * info.channels * sizeof(short);
+        input_vaddr = malloc(input_size);
+        (void) sf_readf_short(sf, (short *) input_vaddr, info.frames);
+        sf_close(sf);
+        channels = info.channels;
+        input_freq = info.samplerate;
     } else {
+        // data for testing is exactly (input sampling rate/1000)/2 seconds
+        // so 44.1khz input is 22.05 seconds
         double k = 1000; // Hz / s
         double time = (input_freq / 2) / k;
         size_t input_frames = size_t(input_freq * time);
@@ -177,98 +234,287 @@
             double t = double(i) / input_freq;
             double y = sin(M_PI * k * t * t);
             int16_t yi = floor(y * 32767.0 + 0.5);
-            for (size_t j=0 ; j<(size_t)channels ; j++) {
-                in[i*channels + j] = yi / (1+j);
+            for (int j = 0; j < channels; j++) {
+                in[i*channels + j] = yi / (1 + j);
             }
         }
     }
+    size_t input_framesize = channels * sizeof(int16_t);
+    size_t input_frames = input_size / input_framesize;
+
+    // For float processing, convert input int16_t to float array
+    if (useFloat) {
+        void *new_vaddr;
+
+        input_framesize = channels * sizeof(float);
+        input_size = input_frames * input_framesize;
+        new_vaddr = malloc(input_size);
+        memcpy_to_float_from_i16(reinterpret_cast<float*>(new_vaddr),
+                reinterpret_cast<int16_t*>(input_vaddr), input_frames * channels);
+        free(input_vaddr);
+        input_vaddr = new_vaddr;
+    }
 
     // ----------------------------------------------------------
 
     class Provider: public AudioBufferProvider {
-        int16_t* mAddr;
-        size_t mNumFrames;
+        const void*     mAddr;      // base address
+        const size_t    mNumFrames; // total frames
+        const size_t    mFrameSize; // size of each frame in bytes
+        size_t          mNextFrame; // index of next frame to provide
+        size_t          mUnrel;     // number of frames not yet released
+        const Vector<int> mPvalues; // number of frames provided per call
+        size_t          mNextPidx;  // index of next entry in mPvalues to use
     public:
-        Provider(const void* addr, size_t size, int channels) {
-            mAddr = (int16_t*) addr;
-            mNumFrames = size / (channels*sizeof(int16_t));
+        Provider(const void* addr, size_t frames, size_t frameSize, const Vector<int>& Pvalues)
+          : mAddr(addr),
+            mNumFrames(frames),
+            mFrameSize(frameSize),
+            mNextFrame(0), mUnrel(0), mPvalues(Pvalues), mNextPidx(0) {
         }
         virtual status_t getNextBuffer(Buffer* buffer,
                 int64_t pts = kInvalidPTS) {
-            buffer->frameCount = mNumFrames;
-            buffer->i16 = mAddr;
-            return NO_ERROR;
+            (void)pts; // suppress warning
+            size_t requestedFrames = buffer->frameCount;
+            if (requestedFrames > mNumFrames - mNextFrame) {
+                buffer->frameCount = mNumFrames - mNextFrame;
+            }
+            if (!mPvalues.isEmpty()) {
+                size_t provided = mPvalues[mNextPidx++];
+                printf("mPvalue[%zu]=%zu not %zu\n", mNextPidx-1, provided, buffer->frameCount);
+                if (provided < buffer->frameCount) {
+                    buffer->frameCount = provided;
+                }
+                if (mNextPidx >= mPvalues.size()) {
+                    mNextPidx = 0;
+                }
+            }
+            if (gVerbose) {
+                printf("getNextBuffer() requested %zu frames out of %zu frames available,"
+                        " and returned %zu frames\n",
+                        requestedFrames, (size_t) (mNumFrames - mNextFrame), buffer->frameCount);
+            }
+            mUnrel = buffer->frameCount;
+            if (buffer->frameCount > 0) {
+                buffer->raw = (char *)mAddr + mFrameSize * mNextFrame;
+                return NO_ERROR;
+            } else {
+                buffer->raw = NULL;
+                return NOT_ENOUGH_DATA;
+            }
         }
         virtual void releaseBuffer(Buffer* buffer) {
+            if (buffer->frameCount > mUnrel) {
+                fprintf(stderr, "ERROR releaseBuffer() released %zu frames but only %zu available "
+                        "to release\n", buffer->frameCount, mUnrel);
+                mNextFrame += mUnrel;
+                mUnrel = 0;
+            } else {
+                if (gVerbose) {
+                    printf("releaseBuffer() released %zu frames out of %zu frames available "
+                            "to release\n", buffer->frameCount, mUnrel);
+                }
+                mNextFrame += buffer->frameCount;
+                mUnrel -= buffer->frameCount;
+            }
+            buffer->frameCount = 0;
+            buffer->raw = NULL;
         }
-    } provider(input_vaddr, input_size, channels);
+        void reset() {
+            mNextFrame = 0;
+        }
+    } provider(input_vaddr, input_frames, input_framesize, Pvalues);
 
-    size_t input_frames = input_size / (channels * sizeof(int16_t));
-    size_t output_size = 2 * 4 * ((int64_t) input_frames * output_freq) / input_freq;
-    output_size &= ~7; // always stereo, 32-bits
+    if (gVerbose) {
+        printf("%zu input frames\n", input_frames);
+    }
 
-    void* output_vaddr = malloc(output_size);
+    int bit_depth = useFloat ? 32 : 16;
+    int output_channels = channels > 2 ? channels : 2; // output is at least stereo samples
+    size_t output_framesize = output_channels * (useFloat ? sizeof(float) : sizeof(int32_t));
+    size_t output_frames = ((int64_t) input_frames * output_freq) / input_freq;
+    size_t output_size = output_frames * output_framesize;
 
-    if (profiling) {
-        AudioResampler* resampler = AudioResampler::create(16, channels,
-                output_freq, quality);
-
-        size_t out_frames = output_size/8;
-        resampler->setSampleRate(input_freq);
-        resampler->setVolume(0x1000, 0x1000);
-
-        memset(output_vaddr, 0, output_size);
+    if (profileFilter) {
+        // Check how fast sample rate changes are that require filter changes.
+        // The delta sample rate changes must indicate a downsampling ratio,
+        // and must be larger than 10% changes.
+        //
+        // On fast devices, filters should be generated between 0.1ms - 1ms.
+        // (single threaded).
+        AudioResampler* resampler = AudioResampler::create(bit_depth, channels,
+                8000, quality);
+        int looplimit = 100;
         timespec start, end;
         clock_gettime(CLOCK_MONOTONIC, &start);
-        resampler->resample((int*) output_vaddr, out_frames, &provider);
-        resampler->resample((int*) output_vaddr, out_frames, &provider);
-        resampler->resample((int*) output_vaddr, out_frames, &provider);
-        resampler->resample((int*) output_vaddr, out_frames, &provider);
+        for (int i = 0; i < looplimit; ++i) {
+            resampler->setSampleRate(9000);
+            resampler->setSampleRate(12000);
+            resampler->setSampleRate(20000);
+            resampler->setSampleRate(30000);
+        }
         clock_gettime(CLOCK_MONOTONIC, &end);
         int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
         int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
-        int64_t time = (end_ns - start_ns)/4;
-        printf("%f Mspl/s\n", out_frames/(time/1e9)/1e6);
+        int64_t time = end_ns - start_ns;
+        printf("%.2f sample rate changes with filter calculation/sec\n",
+                looplimit * 4 / (time / 1e9));
 
+        // Check how fast sample rate changes are without filter changes.
+        // This should be very fast, probably 0.1us - 1us per sample rate
+        // change.
+        resampler->setSampleRate(1000);
+        looplimit = 1000;
+        clock_gettime(CLOCK_MONOTONIC, &start);
+        for (int i = 0; i < looplimit; ++i) {
+            resampler->setSampleRate(1000+i);
+        }
+        clock_gettime(CLOCK_MONOTONIC, &end);
+        start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
+        end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
+        time = end_ns - start_ns;
+        printf("%.2f sample rate changes without filter calculation/sec\n",
+                looplimit / (time / 1e9));
+        resampler->reset();
         delete resampler;
     }
 
-    AudioResampler* resampler = AudioResampler::create(16, channels,
+    void* output_vaddr = malloc(output_size);
+    AudioResampler* resampler = AudioResampler::create(bit_depth, channels,
             output_freq, quality);
-    size_t out_frames = output_size/8;
+
+
+    /* set volume precision to 12 bits, so the volume scale is 1<<12.
+     * The output int32_t is represented as Q4.27, with 4 bits of guard
+     * followed by the int16_t Q.15 portion, and then 12 trailing bits of
+     * additional precision.
+     *
+     * Generally 0 < volumePrecision <= 14 (due to the limits of
+     * int16_t values for Volume). volumePrecision cannot be 0 due
+     * to rounding and shifts.
+     */
+    const int volumePrecision = 12; // in bits
+
     resampler->setSampleRate(input_freq);
-    resampler->setVolume(0x1000, 0x1000);
+    resampler->setVolume(1 << volumePrecision, 1 << volumePrecision);
+
+    if (profileResample) {
+        /*
+         * For profiling on mobile devices, upon experimentation
+         * it is better to run a few trials with a shorter loop limit,
+         * and take the minimum time.
+         *
+         * Long tests can cause CPU temperature to build up and thermal throttling
+         * to reduce CPU frequency.
+         *
+         * For frequency checks (index=0, or 1, etc.):
+         * "cat /sys/devices/system/cpu/cpu${index}/cpufreq/scaling_*_freq"
+         *
+         * For temperature checks (index=0, or 1, etc.):
+         * "cat /sys/class/thermal/thermal_zone${index}/temp"
+         *
+         * Another way to avoid thermal throttling is to fix the CPU frequency
+         * at a lower level which prevents excessive temperatures.
+         */
+        const int trials = 4;
+        const int looplimit = 4;
+        timespec start, end;
+        int64_t time = 0;
+
+        for (int n = 0; n < trials; ++n) {
+            clock_gettime(CLOCK_MONOTONIC, &start);
+            for (int i = 0; i < looplimit; ++i) {
+                resampler->resample((int*) output_vaddr, output_frames, &provider);
+                provider.reset(); //  during benchmarking reset only the provider
+            }
+            clock_gettime(CLOCK_MONOTONIC, &end);
+            int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
+            int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
+            int64_t diff_ns = end_ns - start_ns;
+            if (n == 0 || diff_ns < time) {
+                time = diff_ns;   // save the best out of our trials.
+            }
+        }
+        // Mfrms/s is "Millions of output frames per second".
+        printf("quality: %d  channels: %d  msec: %" PRId64 "  Mfrms/s: %.2lf\n",
+                quality, channels, time/1000000, output_frames * looplimit / (time / 1e9) / 1e6);
+        resampler->reset();
+    }
 
     memset(output_vaddr, 0, output_size);
-    resampler->resample((int*) output_vaddr, out_frames, &provider);
+    if (gVerbose) {
+        printf("resample() %zu output frames\n", output_frames);
+    }
+    if (Ovalues.isEmpty()) {
+        Ovalues.push(output_frames);
+    }
+    for (size_t i = 0, j = 0; i < output_frames; ) {
+        size_t thisFrames = Ovalues[j++];
+        if (j >= Ovalues.size()) {
+            j = 0;
+        }
+        if (thisFrames == 0 || thisFrames > output_frames - i) {
+            thisFrames = output_frames - i;
+        }
+        resampler->resample((int*) output_vaddr + output_channels*i, thisFrames, &provider);
+        i += thisFrames;
+    }
+    if (gVerbose) {
+        printf("resample() complete\n");
+    }
+    resampler->reset();
+    if (gVerbose) {
+        printf("reset() complete\n");
+    }
+    delete resampler;
+    resampler = NULL;
 
-    // down-mix (we just truncate and keep the left channel)
+    // For float processing, convert output format from float to Q4.27,
+    // which is then converted to int16_t for final storage.
+    if (useFloat) {
+        memcpy_to_q4_27_from_float(reinterpret_cast<int32_t*>(output_vaddr),
+                reinterpret_cast<float*>(output_vaddr), output_frames * output_channels);
+    }
+
+    // mono takes left channel only (out of stereo output pair)
+    // stereo and multichannel preserve all channels.
     int32_t* out = (int32_t*) output_vaddr;
-    int16_t* convert = (int16_t*) malloc(out_frames * channels * sizeof(int16_t));
-    for (size_t i = 0; i < out_frames; i++) {
-        for (int j=0 ; j<channels ; j++) {
-            int32_t s = out[i * 2 + j] >> 12;
-            if (s > 32767)       s =  32767;
-            else if (s < -32768) s = -32768;
+    int16_t* convert = (int16_t*) malloc(output_frames * channels * sizeof(int16_t));
+
+    // round to half towards zero and saturate at int16 (non-dithered)
+    const int roundVal = (1<<(volumePrecision-1)) - 1; // volumePrecision > 0
+
+    for (size_t i = 0; i < output_frames; i++) {
+        for (int j = 0; j < channels; j++) {
+            int32_t s = out[i * output_channels + j] + roundVal; // add offset here
+            if (s < 0) {
+                s = (s + 1) >> volumePrecision; // round to 0
+                if (s < -32768) {
+                    s = -32768;
+                }
+            } else {
+                s = s >> volumePrecision;
+                if (s > 32767) {
+                    s = 32767;
+                }
+            }
             convert[i * channels + j] = int16_t(s);
         }
     }
 
     // write output to disk
-    int output_fd = open(file_out, O_WRONLY | O_CREAT | O_TRUNC,
-            S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
-    if (output_fd < 0) {
-        fprintf(stderr, "open: %s\n", strerror(errno));
-        return -1;
+    SF_INFO info;
+    info.frames = 0;
+    info.samplerate = output_freq;
+    info.channels = channels;
+    info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
+    SNDFILE *sf = sf_open(file_out, SFM_WRITE, &info);
+    if (sf == NULL) {
+        perror(file_out);
+        return EXIT_FAILURE;
     }
+    (void) sf_writef_short(sf, convert, output_frames);
+    sf_close(sf);
 
-    if (writeHeader) {
-        HeaderWav wav(out_frames * channels * sizeof(int16_t), channels, output_freq, 16);
-        write(output_fd, &wav, sizeof(wav));
-    }
-
-    write(output_fd, convert, out_frames * channels * sizeof(int16_t));
-    close(output_fd);
-
-    return 0;
+    return EXIT_SUCCESS;
 }
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
new file mode 100644
index 0000000..f270bfc
--- /dev/null
+++ b/services/audiopolicy/Android.mk
@@ -0,0 +1,44 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+    AudioPolicyService.cpp
+
+USE_LEGACY_AUDIO_POLICY = 1
+ifeq ($(USE_LEGACY_AUDIO_POLICY), 1)
+LOCAL_SRC_FILES += \
+    AudioPolicyInterfaceImplLegacy.cpp \
+    AudioPolicyClientImplLegacy.cpp
+
+    LOCAL_CFLAGS += -DUSE_LEGACY_AUDIO_POLICY
+else
+LOCAL_SRC_FILES += \
+    AudioPolicyInterfaceImpl.cpp \
+    AudioPolicyClientImpl.cpp \
+    AudioPolicyManager.cpp
+endif
+
+LOCAL_C_INCLUDES := \
+    $(TOPDIR)frameworks/av/services/audioflinger \
+    $(call include-path-for, audio-effects) \
+    $(call include-path-for, audio-utils)
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libutils \
+    liblog \
+    libbinder \
+    libmedia \
+    libhardware \
+    libhardware_legacy
+
+LOCAL_STATIC_LIBRARIES := \
+    libmedia_helper \
+    libserviceutility
+
+LOCAL_MODULE:= libaudiopolicy
+
+LOCAL_CFLAGS += -fvisibility=hidden
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/AudioPolicyClientImpl.cpp b/services/audiopolicy/AudioPolicyClientImpl.cpp
new file mode 100644
index 0000000..44c47c3
--- /dev/null
+++ b/services/audiopolicy/AudioPolicyClientImpl.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyClientImpl"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include "AudioPolicyService.h"
+
+namespace android {
+
+/* implementation of the client interface from the policy manager */
+
+audio_module_handle_t AudioPolicyService::AudioPolicyClient::loadHwModule(const char *name)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+
+    return af->loadHwModule(name);
+}
+
+audio_io_handle_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
+                               audio_devices_t *pDevices,
+                               uint32_t *pSamplingRate,
+                               audio_format_t *pFormat,
+                               audio_channel_mask_t *pChannelMask,
+                               uint32_t *pLatencyMs,
+                               audio_output_flags_t flags,
+                               const audio_offload_info_t *offloadInfo)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+    return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
+                          pLatencyMs, flags, offloadInfo);
+}
+
+audio_io_handle_t AudioPolicyService::AudioPolicyClient::openDuplicateOutput(
+                                                                audio_io_handle_t output1,
+                                                                audio_io_handle_t output2)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+    return af->openDuplicateOutput(output1, output2);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::closeOutput(audio_io_handle_t output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->closeOutput(output);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::suspendOutput(audio_io_handle_t output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return PERMISSION_DENIED;
+    }
+
+    return af->suspendOutput(output);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::restoreOutput(audio_io_handle_t output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return PERMISSION_DENIED;
+    }
+
+    return af->restoreOutput(output);
+}
+
+audio_io_handle_t AudioPolicyService::AudioPolicyClient::openInput(audio_module_handle_t module,
+                              audio_devices_t *pDevices,
+                              uint32_t *pSamplingRate,
+                              audio_format_t *pFormat,
+                              audio_channel_mask_t *pChannelMask)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+
+    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::closeInput(audio_io_handle_t input)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->closeInput(input);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::setStreamVolume(audio_stream_type_t stream,
+                     float volume, audio_io_handle_t output,
+                     int delay_ms)
+{
+    return mAudioPolicyService->setStreamVolume(stream, volume, output,
+                                               delay_ms);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::invalidateStream(audio_stream_type_t stream)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->invalidateStream(stream);
+}
+
+void AudioPolicyService::AudioPolicyClient::setParameters(audio_io_handle_t io_handle,
+                   const String8& keyValuePairs,
+                   int delay_ms)
+{
+    mAudioPolicyService->setParameters(io_handle, keyValuePairs.string(), delay_ms);
+}
+
+String8 AudioPolicyService::AudioPolicyClient::getParameters(audio_io_handle_t io_handle,
+                      const String8& keys)
+{
+    String8 result = AudioSystem::getParameters(io_handle, keys);
+    return result;
+}
+
+status_t AudioPolicyService::AudioPolicyClient::startTone(audio_policy_tone_t tone,
+              audio_stream_type_t stream)
+{
+    return mAudioPolicyService->startTone(tone, stream);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::stopTone()
+{
+    return mAudioPolicyService->stopTone();
+}
+
+status_t AudioPolicyService::AudioPolicyClient::setVoiceVolume(float volume, int delay_ms)
+{
+    return mAudioPolicyService->setVoiceVolume(volume, delay_ms);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::moveEffects(int session,
+                        audio_io_handle_t src_output,
+                        audio_io_handle_t dst_output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->moveEffects(session, src_output, dst_output);
+}
+
+
+
+}; // namespace android
diff --git a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
new file mode 100644
index 0000000..53f3e2d
--- /dev/null
+++ b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyService"
+//#define LOG_NDEBUG 0
+
+#include "Configuration.h"
+#undef __STRICT_ANSI__
+#define __STDINT_LIMITS
+#define __STDC_LIMIT_MACROS
+#include <stdint.h>
+
+#include <sys/time.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+#include <cutils/properties.h>
+#include <binder/IPCThreadState.h>
+#include <utils/String16.h>
+#include <utils/threads.h>
+#include "AudioPolicyService.h"
+#include "ServiceUtilities.h"
+#include <hardware_legacy/power.h>
+#include <media/AudioEffect.h>
+#include <media/EffectsFactoryApi.h>
+//#include <media/IAudioFlinger.h>
+
+#include <hardware/hardware.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <hardware/audio_policy.h>
+#include <audio_effects/audio_effects_conf.h>
+#include <media/AudioParameter.h>
+
+
+namespace android {
+
+/* implementation of the interface to the policy manager */
+extern "C" {
+
+audio_module_handle_t aps_load_hw_module(void *service __unused,
+                                             const char *name)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+
+    return af->loadHwModule(name);
+}
+
+// deprecated: replaced by aps_open_output_on_module()
+audio_io_handle_t aps_open_output(void *service __unused,
+                                         audio_devices_t *pDevices,
+                                         uint32_t *pSamplingRate,
+                                         audio_format_t *pFormat,
+                                         audio_channel_mask_t *pChannelMask,
+                                         uint32_t *pLatencyMs,
+                                         audio_output_flags_t flags)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+
+    return af->openOutput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
+                          pLatencyMs, flags);
+}
+
+audio_io_handle_t aps_open_output_on_module(void *service __unused,
+                                                   audio_module_handle_t module,
+                                                   audio_devices_t *pDevices,
+                                                   uint32_t *pSamplingRate,
+                                                   audio_format_t *pFormat,
+                                                   audio_channel_mask_t *pChannelMask,
+                                                   uint32_t *pLatencyMs,
+                                                   audio_output_flags_t flags,
+                                                   const audio_offload_info_t *offloadInfo)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+    return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
+                          pLatencyMs, flags, offloadInfo);
+}
+
+audio_io_handle_t aps_open_dup_output(void *service __unused,
+                                                 audio_io_handle_t output1,
+                                                 audio_io_handle_t output2)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+    return af->openDuplicateOutput(output1, output2);
+}
+
+int aps_close_output(void *service __unused, audio_io_handle_t output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->closeOutput(output);
+}
+
+int aps_suspend_output(void *service __unused, audio_io_handle_t output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return PERMISSION_DENIED;
+    }
+
+    return af->suspendOutput(output);
+}
+
+int aps_restore_output(void *service __unused, audio_io_handle_t output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return PERMISSION_DENIED;
+    }
+
+    return af->restoreOutput(output);
+}
+
+// deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored
+audio_io_handle_t aps_open_input(void *service __unused,
+                                        audio_devices_t *pDevices,
+                                        uint32_t *pSamplingRate,
+                                        audio_format_t *pFormat,
+                                        audio_channel_mask_t *pChannelMask,
+                                        audio_in_acoustics_t acoustics __unused)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+
+    return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
+}
+
+audio_io_handle_t aps_open_input_on_module(void *service __unused,
+                                                  audio_module_handle_t module,
+                                                  audio_devices_t *pDevices,
+                                                  uint32_t *pSamplingRate,
+                                                  audio_format_t *pFormat,
+                                                  audio_channel_mask_t *pChannelMask)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+
+    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+}
+
+int aps_close_input(void *service __unused, audio_io_handle_t input)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->closeInput(input);
+}
+
+int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->invalidateStream(stream);
+}
+
+int aps_move_effects(void *service __unused, int session,
+                                audio_io_handle_t src_output,
+                                audio_io_handle_t dst_output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->moveEffects(session, src_output, dst_output);
+}
+
+char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle,
+                                     const char *keys)
+{
+    String8 result = AudioSystem::getParameters(io_handle, String8(keys));
+    return strdup(result.string());
+}
+
+void aps_set_parameters(void *service, audio_io_handle_t io_handle,
+                                   const char *kv_pairs, int delay_ms)
+{
+    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+    audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms);
+}
+
+int aps_set_stream_volume(void *service, audio_stream_type_t stream,
+                                     float volume, audio_io_handle_t output,
+                                     int delay_ms)
+{
+    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+    return audioPolicyService->setStreamVolume(stream, volume, output,
+                                               delay_ms);
+}
+
+int aps_start_tone(void *service, audio_policy_tone_t tone,
+                              audio_stream_type_t stream)
+{
+    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+    return audioPolicyService->startTone(tone, stream);
+}
+
+int aps_stop_tone(void *service)
+{
+    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+    return audioPolicyService->stopTone();
+}
+
+int aps_set_voice_volume(void *service, float volume, int delay_ms)
+{
+    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+    return audioPolicyService->setVoiceVolume(volume, delay_ms);
+}
+
+}; // extern "C"
+
+}; // namespace android
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
new file mode 100644
index 0000000..66260e3
--- /dev/null
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOPOLICY_INTERFACE_H
+#define ANDROID_AUDIOPOLICY_INTERFACE_H
+
+#include <media/AudioSystem.h>
+#include <utils/String8.h>
+
+#include <hardware/audio_policy.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication interfaces
+// between the platform specific audio policy manager and Android generic audio policy manager.
+// The platform specific audio policy manager must implement methods of the AudioPolicyInterface class.
+// This implementation makes use of the AudioPolicyClientInterface to control the activity and
+// configuration of audio input and output streams.
+//
+// The platform specific audio policy manager is in charge of the audio routing and volume control
+// policies for a given platform.
+// The main roles of this module are:
+//   - keep track of current system state (removable device connections, phone state, user requests...).
+//   System state changes and user actions are notified to audio policy manager with methods of the AudioPolicyInterface.
+//   - process getOutput() queries received when AudioTrack objects are created: Those queries
+//   return a handler on an output that has been selected, configured and opened by the audio policy manager and that
+//   must be used by the AudioTrack when registering to the AudioFlinger with the createTrack() method.
+//   When the AudioTrack object is released, a putOutput() query is received and the audio policy manager can decide
+//   to close or reconfigure the output depending on other streams using this output and current system state.
+//   - similarly process getInput() and putInput() queries received from AudioRecord objects and configure audio inputs.
+//   - process volume control requests: the stream volume is converted from an index value (received from UI) to a float value
+//   applicable to each output as a function of platform specific settings and current output route (destination device). It
+//   also make sure that streams are not muted if not allowed (e.g. camera shutter sound in some countries).
+//
+// The platform specific audio policy manager is provided as a shared library by platform vendors (as for libaudio.so)
+// and is linked with libaudioflinger.so
+
+
+//    Audio Policy Manager Interface
+class AudioPolicyInterface
+{
+
+public:
+    virtual ~AudioPolicyInterface() {}
+    //
+    // configuration functions
+    //
+
+    // indicate a change in device connection status
+    virtual status_t setDeviceConnectionState(audio_devices_t device,
+                                              audio_policy_dev_state_t state,
+                                          const char *device_address) = 0;
+    // retrieve a device connection status
+    virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
+                                                                          const char *device_address) = 0;
+    // indicate a change in phone state. Valid phones states are defined by audio_mode_t
+    virtual void setPhoneState(audio_mode_t state) = 0;
+    // force using a specific device category for the specified usage
+    virtual void setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) = 0;
+    // retrieve current device category forced for a given usage
+    virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0;
+    // set a system property (e.g. camera sound always audible)
+    virtual void setSystemProperty(const char* property, const char* value) = 0;
+    // check proper initialization
+    virtual status_t initCheck() = 0;
+
+    //
+    // Audio routing query functions
+    //
+
+    // request an output appropriate for playback of the supplied stream type and parameters
+    virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
+                                        uint32_t samplingRate,
+                                        audio_format_t format,
+                                        audio_channel_mask_t channelMask,
+                                        audio_output_flags_t flags,
+                                        const audio_offload_info_t *offloadInfo) = 0;
+    // indicates to the audio policy manager that the output starts being used by corresponding stream.
+    virtual status_t startOutput(audio_io_handle_t output,
+                                 audio_stream_type_t stream,
+                                 int session = 0) = 0;
+    // indicates to the audio policy manager that the output stops being used by corresponding stream.
+    virtual status_t stopOutput(audio_io_handle_t output,
+                                audio_stream_type_t stream,
+                                int session = 0) = 0;
+    // releases the output.
+    virtual void releaseOutput(audio_io_handle_t output) = 0;
+
+    // request an input appropriate for record from the supplied device with supplied parameters.
+    virtual audio_io_handle_t getInput(audio_source_t inputSource,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    audio_in_acoustics_t acoustics) = 0;
+    // indicates to the audio policy manager that the input starts being used.
+    virtual status_t startInput(audio_io_handle_t input) = 0;
+    // indicates to the audio policy manager that the input stops being used.
+    virtual status_t stopInput(audio_io_handle_t input) = 0;
+    // releases the input.
+    virtual void releaseInput(audio_io_handle_t input) = 0;
+
+    //
+    // volume control functions
+    //
+
+    // initialises stream volume conversion parameters by specifying volume index range.
+    virtual void initStreamVolume(audio_stream_type_t stream,
+                                      int indexMin,
+                                      int indexMax) = 0;
+
+    // sets the new stream volume at a level corresponding to the supplied index for the
+    // supplied device. By convention, specifying AUDIO_DEVICE_OUT_DEFAULT means
+    // setting volume for all devices
+    virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
+                                          int index,
+                                          audio_devices_t device) = 0;
+
+    // retrieve current volume index for the specified stream and the
+    // specified device. By convention, specifying AUDIO_DEVICE_OUT_DEFAULT means
+    // querying the volume of the active device.
+    virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
+                                          int *index,
+                                          audio_devices_t device) = 0;
+
+    // return the strategy corresponding to a given stream type
+    virtual uint32_t getStrategyForStream(audio_stream_type_t stream) = 0;
+
+    // return the enabled output devices for the given stream type
+    virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) = 0;
+
+    // Audio effect management
+    virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc) = 0;
+    virtual status_t registerEffect(const effect_descriptor_t *desc,
+                                    audio_io_handle_t io,
+                                    uint32_t strategy,
+                                    int session,
+                                    int id) = 0;
+    virtual status_t unregisterEffect(int id) = 0;
+    virtual status_t setEffectEnabled(int id, bool enabled) = 0;
+
+    virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const = 0;
+    virtual bool isStreamActiveRemotely(audio_stream_type_t stream,
+                                        uint32_t inPastMs = 0) const = 0;
+    virtual bool isSourceActive(audio_source_t source) const = 0;
+
+    //dump state
+    virtual status_t    dump(int fd) = 0;
+
+    virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo) = 0;
+};
+
+
+// Audio Policy client Interface
+class AudioPolicyClientInterface
+{
+public:
+    virtual ~AudioPolicyClientInterface() {}
+
+    //
+    // Audio HW module functions
+    //
+
+    // loads a HW module.
+    virtual audio_module_handle_t loadHwModule(const char *name) = 0;
+
+    //
+    // Audio output Control functions
+    //
+
+    // opens an audio output with the requested parameters. The parameter values can indicate to use the default values
+    // in case the audio policy manager has no specific requirements for the output being opened.
+    // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream.
+    // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
+    virtual audio_io_handle_t openOutput(audio_module_handle_t module,
+                                         audio_devices_t *pDevices,
+                                         uint32_t *pSamplingRate,
+                                         audio_format_t *pFormat,
+                                         audio_channel_mask_t *pChannelMask,
+                                         uint32_t *pLatencyMs,
+                                         audio_output_flags_t flags,
+                                         const audio_offload_info_t *offloadInfo = NULL) = 0;
+    // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
+    // a special mixer thread in the AudioFlinger.
+    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0;
+    // closes the output stream
+    virtual status_t closeOutput(audio_io_handle_t output) = 0;
+    // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in
+    // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded.
+    virtual status_t suspendOutput(audio_io_handle_t output) = 0;
+    // restores a suspended output.
+    virtual status_t restoreOutput(audio_io_handle_t output) = 0;
+
+    //
+    // Audio input Control functions
+    //
+
+    // opens an audio input
+    virtual audio_io_handle_t openInput(audio_module_handle_t module,
+                                        audio_devices_t *pDevices,
+                                        uint32_t *pSamplingRate,
+                                        audio_format_t *pFormat,
+                                        audio_channel_mask_t *pChannelMask) = 0;
+    // closes an audio input
+    virtual status_t closeInput(audio_io_handle_t input) = 0;
+    //
+    // misc control functions
+    //
+
+    // set a stream volume for a particular output. For the same user setting, a given stream type can have different volumes
+    // for each output (destination device) it is attached to.
+    virtual status_t setStreamVolume(audio_stream_type_t stream, float volume, audio_io_handle_t output, int delayMs = 0) = 0;
+
+    // invalidate a stream type, causing a reroute to an unspecified new output
+    virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
+
+    // function enabling to send proprietary informations directly from audio policy manager to audio hardware interface.
+    virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0) = 0;
+    // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
+    virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
+
+    // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
+    // over a telephony device during a phone call.
+    virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream) = 0;
+    virtual status_t stopTone() = 0;
+
+    // set down link audio volume.
+    virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0;
+
+    // move effect to the specified output
+    virtual status_t moveEffects(int session,
+                                     audio_io_handle_t srcOutput,
+                                     audio_io_handle_t dstOutput) = 0;
+
+};
+
+extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface);
+extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface);
+
+
+}; // namespace android
+
+#endif // ANDROID_AUDIOPOLICY_INTERFACE_H
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
new file mode 100644
index 0000000..c57c4fa
--- /dev/null
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyIntefaceImpl"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include "AudioPolicyService.h"
+#include "ServiceUtilities.h"
+
+namespace android {
+
+
+// ----------------------------------------------------------------------------
+
+status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
+                                                  audio_policy_dev_state_t state,
+                                                  const char *device_address)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (!audio_is_output_device(device) && !audio_is_input_device(device)) {
+        return BAD_VALUE;
+    }
+    if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE &&
+            state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
+        return BAD_VALUE;
+    }
+
+    ALOGV("setDeviceConnectionState()");
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->setDeviceConnectionState(device,
+                                                      state, device_address);
+}
+
+audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
+                                                              audio_devices_t device,
+                                                              const char *device_address)
+{
+    if (mAudioPolicyManager == NULL) {
+        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+    }
+    return mAudioPolicyManager->getDeviceConnectionState(device,
+                                                      device_address);
+}
+
+status_t AudioPolicyService::setPhoneState(audio_mode_t state)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (uint32_t(state) >= AUDIO_MODE_CNT) {
+        return BAD_VALUE;
+    }
+
+    ALOGV("setPhoneState()");
+
+    // TODO: check if it is more appropriate to do it in platform specific policy manager
+    AudioSystem::setMode(state);
+
+    Mutex::Autolock _l(mLock);
+    mAudioPolicyManager->setPhoneState(state);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
+                                         audio_policy_forced_cfg_t config)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
+        return BAD_VALUE;
+    }
+    if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) {
+        return BAD_VALUE;
+    }
+    ALOGV("setForceUse()");
+    Mutex::Autolock _l(mLock);
+    mAudioPolicyManager->setForceUse(usage, config);
+    return NO_ERROR;
+}
+
+audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage)
+{
+    if (mAudioPolicyManager == NULL) {
+        return AUDIO_POLICY_FORCE_NONE;
+    }
+    if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
+        return AUDIO_POLICY_FORCE_NONE;
+    }
+    return mAudioPolicyManager->getForceUse(usage);
+}
+
+audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
+{
+    if (mAudioPolicyManager == NULL) {
+        return 0;
+    }
+    ALOGV("getOutput()");
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->getOutput(stream, samplingRate,
+                                    format, channelMask, flags, offloadInfo);
+}
+
+status_t AudioPolicyService::startOutput(audio_io_handle_t output,
+                                         audio_stream_type_t stream,
+                                         int session)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    ALOGV("startOutput()");
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->startOutput(output, stream, session);
+}
+
+status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
+                                        audio_stream_type_t stream,
+                                        int session)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    ALOGV("stopOutput()");
+    mOutputCommandThread->stopOutputCommand(output, stream, session);
+    return NO_ERROR;
+}
+
+status_t  AudioPolicyService::doStopOutput(audio_io_handle_t output,
+                                      audio_stream_type_t stream,
+                                      int session)
+{
+    ALOGV("doStopOutput from tid %d", gettid());
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->stopOutput(output, stream, session);
+}
+
+void AudioPolicyService::releaseOutput(audio_io_handle_t output)
+{
+    if (mAudioPolicyManager == NULL) {
+        return;
+    }
+    ALOGV("releaseOutput()");
+    mOutputCommandThread->releaseOutputCommand(output);
+}
+
+void AudioPolicyService::doReleaseOutput(audio_io_handle_t output)
+{
+    ALOGV("doReleaseOutput from tid %d", gettid());
+    Mutex::Autolock _l(mLock);
+    mAudioPolicyManager->releaseOutput(output);
+}
+
+audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    int audioSession)
+{
+    if (mAudioPolicyManager == NULL) {
+        return 0;
+    }
+    // already checked by client, but double-check in case the client wrapper is bypassed
+    if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD) {
+        return 0;
+    }
+
+    if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
+        return 0;
+    }
+
+    Mutex::Autolock _l(mLock);
+    // the audio_in_acoustics_t parameter is ignored by get_input()
+    audio_io_handle_t input = mAudioPolicyManager->getInput(inputSource, samplingRate,
+                                                   format, channelMask, (audio_in_acoustics_t) 0);
+
+    if (input == 0) {
+        return input;
+    }
+    // create audio pre processors according to input source
+    audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ?
+                                    AUDIO_SOURCE_VOICE_RECOGNITION : inputSource;
+
+    ssize_t index = mInputSources.indexOfKey(aliasSource);
+    if (index < 0) {
+        return input;
+    }
+    ssize_t idx = mInputs.indexOfKey(input);
+    InputDesc *inputDesc;
+    if (idx < 0) {
+        inputDesc = new InputDesc(audioSession);
+        mInputs.add(input, inputDesc);
+    } else {
+        inputDesc = mInputs.valueAt(idx);
+    }
+
+    Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
+    for (size_t i = 0; i < effects.size(); i++) {
+        EffectDesc *effect = effects[i];
+        sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input);
+        status_t status = fx->initCheck();
+        if (status != NO_ERROR && status != ALREADY_EXISTS) {
+            ALOGW("Failed to create Fx %s on input %d", effect->mName, input);
+            // fx goes out of scope and strong ref on AudioEffect is released
+            continue;
+        }
+        for (size_t j = 0; j < effect->mParams.size(); j++) {
+            fx->setParameter(effect->mParams[j]);
+        }
+        inputDesc->mEffects.add(fx);
+    }
+    setPreProcessorEnabled(inputDesc, true);
+    return input;
+}
+
+status_t AudioPolicyService::startInput(audio_io_handle_t input)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+
+    return mAudioPolicyManager->startInput(input);
+}
+
+status_t AudioPolicyService::stopInput(audio_io_handle_t input)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+
+    return mAudioPolicyManager->stopInput(input);
+}
+
+void AudioPolicyService::releaseInput(audio_io_handle_t input)
+{
+    if (mAudioPolicyManager == NULL) {
+        return;
+    }
+    Mutex::Autolock _l(mLock);
+    mAudioPolicyManager->releaseInput(input);
+
+    ssize_t index = mInputs.indexOfKey(input);
+    if (index < 0) {
+        return;
+    }
+    InputDesc *inputDesc = mInputs.valueAt(index);
+    setPreProcessorEnabled(inputDesc, false);
+    delete inputDesc;
+    mInputs.removeItemsAt(index);
+}
+
+status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream,
+                                            int indexMin,
+                                            int indexMax)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
+    Mutex::Autolock _l(mLock);
+    mAudioPolicyManager->initStreamVolume(stream, indexMin, indexMax);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream,
+                                                  int index,
+                                                  audio_devices_t device)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->setStreamVolumeIndex(stream,
+                                                    index,
+                                                    device);
+}
+
+status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream,
+                                                  int *index,
+                                                  audio_devices_t device)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->getStreamVolumeIndex(stream,
+                                                    index,
+                                                    device);
+}
+
+uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
+{
+    if (mAudioPolicyManager == NULL) {
+        return 0;
+    }
+    return mAudioPolicyManager->getStrategyForStream(stream);
+}
+
+//audio policy: use audio_device_t appropriately
+
+audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
+{
+    if (mAudioPolicyManager == NULL) {
+        return (audio_devices_t)0;
+    }
+    return mAudioPolicyManager->getDevicesForStream(stream);
+}
+
+audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc)
+{
+    // FIXME change return type to status_t, and return NO_INIT here
+    if (mAudioPolicyManager == NULL) {
+        return 0;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->getOutputForEffect(desc);
+}
+
+status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
+                                audio_io_handle_t io,
+                                uint32_t strategy,
+                                int session,
+                                int id)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    return mAudioPolicyManager->registerEffect(desc, io, strategy, session, id);
+}
+
+status_t AudioPolicyService::unregisterEffect(int id)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    return mAudioPolicyManager->unregisterEffect(id);
+}
+
+status_t AudioPolicyService::setEffectEnabled(int id, bool enabled)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    return mAudioPolicyManager->setEffectEnabled(id, enabled);
+}
+
+bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    if (mAudioPolicyManager == NULL) {
+        return 0;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->isStreamActive(stream, inPastMs);
+}
+
+bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    if (mAudioPolicyManager == NULL) {
+        return 0;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs);
+}
+
+bool AudioPolicyService::isSourceActive(audio_source_t source) const
+{
+    if (mAudioPolicyManager == NULL) {
+        return false;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->isSourceActive(source);
+}
+
+status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
+                                                       effect_descriptor_t *descriptors,
+                                                       uint32_t *count)
+{
+
+    if (mAudioPolicyManager == NULL) {
+        *count = 0;
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+    status_t status = NO_ERROR;
+
+    size_t index;
+    for (index = 0; index < mInputs.size(); index++) {
+        if (mInputs.valueAt(index)->mSessionId == audioSession) {
+            break;
+        }
+    }
+    if (index == mInputs.size()) {
+        *count = 0;
+        return BAD_VALUE;
+    }
+    Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects;
+
+    for (size_t i = 0; i < effects.size(); i++) {
+        effect_descriptor_t desc = effects[i]->descriptor();
+        if (i < *count) {
+            descriptors[i] = desc;
+        }
+    }
+    if (effects.size() > *count) {
+        status = NO_MEMORY;
+    }
+    *count = effects.size();
+    return status;
+}
+
+bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
+{
+    if (mAudioPolicyManager == NULL) {
+        ALOGV("mAudioPolicyManager == NULL");
+        return false;
+    }
+
+    return mAudioPolicyManager->isOffloadSupported(info);
+}
+
+
+}; // namespace android
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
new file mode 100644
index 0000000..bb62ab3
--- /dev/null
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -0,0 +1,489 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyService"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include "AudioPolicyService.h"
+#include "ServiceUtilities.h"
+
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <hardware/audio_policy.h>
+
+namespace android {
+
+
+// ----------------------------------------------------------------------------
+
+status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
+                                                  audio_policy_dev_state_t state,
+                                                  const char *device_address)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (!audio_is_output_device(device) && !audio_is_input_device(device)) {
+        return BAD_VALUE;
+    }
+    if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE &&
+            state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
+        return BAD_VALUE;
+    }
+
+    ALOGV("setDeviceConnectionState()");
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device,
+                                                      state, device_address);
+}
+
+audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
+                                                              audio_devices_t device,
+                                                              const char *device_address)
+{
+    if (mpAudioPolicy == NULL) {
+        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+    }
+    return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device,
+                                                      device_address);
+}
+
+status_t AudioPolicyService::setPhoneState(audio_mode_t state)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (uint32_t(state) >= AUDIO_MODE_CNT) {
+        return BAD_VALUE;
+    }
+
+    ALOGV("setPhoneState()");
+
+    // TODO: check if it is more appropriate to do it in platform specific policy manager
+    AudioSystem::setMode(state);
+
+    Mutex::Autolock _l(mLock);
+    mpAudioPolicy->set_phone_state(mpAudioPolicy, state);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
+                                         audio_policy_forced_cfg_t config)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
+        return BAD_VALUE;
+    }
+    if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) {
+        return BAD_VALUE;
+    }
+    ALOGV("setForceUse()");
+    Mutex::Autolock _l(mLock);
+    mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config);
+    return NO_ERROR;
+}
+
+audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage)
+{
+    if (mpAudioPolicy == NULL) {
+        return AUDIO_POLICY_FORCE_NONE;
+    }
+    if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
+        return AUDIO_POLICY_FORCE_NONE;
+    }
+    return mpAudioPolicy->get_force_use(mpAudioPolicy, usage);
+}
+
+audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
+{
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    ALOGV("getOutput()");
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate,
+                                    format, channelMask, flags, offloadInfo);
+}
+
+status_t AudioPolicyService::startOutput(audio_io_handle_t output,
+                                         audio_stream_type_t stream,
+                                         int session)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    ALOGV("startOutput()");
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session);
+}
+
+status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
+                                        audio_stream_type_t stream,
+                                        int session)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    ALOGV("stopOutput()");
+    mOutputCommandThread->stopOutputCommand(output, stream, session);
+    return NO_ERROR;
+}
+
+status_t  AudioPolicyService::doStopOutput(audio_io_handle_t output,
+                                      audio_stream_type_t stream,
+                                      int session)
+{
+    ALOGV("doStopOutput from tid %d", gettid());
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
+}
+
+void AudioPolicyService::releaseOutput(audio_io_handle_t output)
+{
+    if (mpAudioPolicy == NULL) {
+        return;
+    }
+    ALOGV("releaseOutput()");
+    mOutputCommandThread->releaseOutputCommand(output);
+}
+
+void AudioPolicyService::doReleaseOutput(audio_io_handle_t output)
+{
+    ALOGV("doReleaseOutput from tid %d", gettid());
+    Mutex::Autolock _l(mLock);
+    mpAudioPolicy->release_output(mpAudioPolicy, output);
+}
+
+audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    int audioSession)
+{
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    // already checked by client, but double-check in case the client wrapper is bypassed
+    if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD) {
+        return 0;
+    }
+
+    if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
+        return 0;
+    }
+
+    Mutex::Autolock _l(mLock);
+    // the audio_in_acoustics_t parameter is ignored by get_input()
+    audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
+                                                   format, channelMask, (audio_in_acoustics_t) 0);
+
+    if (input == 0) {
+        return input;
+    }
+    // create audio pre processors according to input source
+    audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ?
+                                    AUDIO_SOURCE_VOICE_RECOGNITION : inputSource;
+
+    ssize_t index = mInputSources.indexOfKey(aliasSource);
+    if (index < 0) {
+        return input;
+    }
+    ssize_t idx = mInputs.indexOfKey(input);
+    InputDesc *inputDesc;
+    if (idx < 0) {
+        inputDesc = new InputDesc(audioSession);
+        mInputs.add(input, inputDesc);
+    } else {
+        inputDesc = mInputs.valueAt(idx);
+    }
+
+    Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
+    for (size_t i = 0; i < effects.size(); i++) {
+        EffectDesc *effect = effects[i];
+        sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input);
+        status_t status = fx->initCheck();
+        if (status != NO_ERROR && status != ALREADY_EXISTS) {
+            ALOGW("Failed to create Fx %s on input %d", effect->mName, input);
+            // fx goes out of scope and strong ref on AudioEffect is released
+            continue;
+        }
+        for (size_t j = 0; j < effect->mParams.size(); j++) {
+            fx->setParameter(effect->mParams[j]);
+        }
+        inputDesc->mEffects.add(fx);
+    }
+    setPreProcessorEnabled(inputDesc, true);
+    return input;
+}
+
+status_t AudioPolicyService::startInput(audio_io_handle_t input)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+
+    return mpAudioPolicy->start_input(mpAudioPolicy, input);
+}
+
+status_t AudioPolicyService::stopInput(audio_io_handle_t input)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+
+    return mpAudioPolicy->stop_input(mpAudioPolicy, input);
+}
+
+void AudioPolicyService::releaseInput(audio_io_handle_t input)
+{
+    if (mpAudioPolicy == NULL) {
+        return;
+    }
+    Mutex::Autolock _l(mLock);
+    mpAudioPolicy->release_input(mpAudioPolicy, input);
+
+    ssize_t index = mInputs.indexOfKey(input);
+    if (index < 0) {
+        return;
+    }
+    InputDesc *inputDesc = mInputs.valueAt(index);
+    setPreProcessorEnabled(inputDesc, false);
+    delete inputDesc;
+    mInputs.removeItemsAt(index);
+}
+
+status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream,
+                                            int indexMin,
+                                            int indexMax)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
+    Mutex::Autolock _l(mLock);
+    mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream,
+                                                  int index,
+                                                  audio_devices_t device)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
+    Mutex::Autolock _l(mLock);
+    if (mpAudioPolicy->set_stream_volume_index_for_device) {
+        return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy,
+                                                                stream,
+                                                                index,
+                                                                device);
+    } else {
+        return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
+    }
+}
+
+status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream,
+                                                  int *index,
+                                                  audio_devices_t device)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
+    Mutex::Autolock _l(mLock);
+    if (mpAudioPolicy->get_stream_volume_index_for_device) {
+        return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy,
+                                                                stream,
+                                                                index,
+                                                                device);
+    } else {
+        return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
+    }
+}
+
+uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
+{
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream);
+}
+
+//audio policy: use audio_device_t appropriately
+
+audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
+{
+    if (mpAudioPolicy == NULL) {
+        return (audio_devices_t)0;
+    }
+    return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
+}
+
+audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc)
+{
+    // FIXME change return type to status_t, and return NO_INIT here
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc);
+}
+
+status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
+                                audio_io_handle_t io,
+                                uint32_t strategy,
+                                int session,
+                                int id)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id);
+}
+
+status_t AudioPolicyService::unregisterEffect(int id)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    return mpAudioPolicy->unregister_effect(mpAudioPolicy, id);
+}
+
+status_t AudioPolicyService::setEffectEnabled(int id, bool enabled)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled);
+}
+
+bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
+}
+
+bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs);
+}
+
+bool AudioPolicyService::isSourceActive(audio_source_t source) const
+{
+    if (mpAudioPolicy == NULL) {
+        return false;
+    }
+    if (mpAudioPolicy->is_source_active == 0) {
+        return false;
+    }
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->is_source_active(mpAudioPolicy, source);
+}
+
+status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
+                                                       effect_descriptor_t *descriptors,
+                                                       uint32_t *count)
+{
+
+    if (mpAudioPolicy == NULL) {
+        *count = 0;
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+    status_t status = NO_ERROR;
+
+    size_t index;
+    for (index = 0; index < mInputs.size(); index++) {
+        if (mInputs.valueAt(index)->mSessionId == audioSession) {
+            break;
+        }
+    }
+    if (index == mInputs.size()) {
+        *count = 0;
+        return BAD_VALUE;
+    }
+    Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects;
+
+    for (size_t i = 0; i < effects.size(); i++) {
+        effect_descriptor_t desc = effects[i]->descriptor();
+        if (i < *count) {
+            descriptors[i] = desc;
+        }
+    }
+    if (effects.size() > *count) {
+        status = NO_MEMORY;
+    }
+    *count = effects.size();
+    return status;
+}
+
+bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
+{
+    if (mpAudioPolicy == NULL) {
+        ALOGV("mpAudioPolicy == NULL");
+        return false;
+    }
+
+    if (mpAudioPolicy->is_offload_supported == NULL) {
+        ALOGV("HAL does not implement is_offload_supported");
+        return false;
+    }
+
+    return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
+}
+
+
+}; // namespace android
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
new file mode 100644
index 0000000..45f98d2
--- /dev/null
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -0,0 +1,4296 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyManager"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+// A device mask for all audio input devices that are considered "virtual" when evaluating
+// active inputs in getActiveInput()
+#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  AUDIO_DEVICE_IN_REMOTE_SUBMIX
+// A device mask for all audio output devices that are considered "remote" when evaluating
+// active output devices in isStreamActiveRemotely()
+#define APM_AUDIO_OUT_DEVICE_REMOTE_ALL  AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+
+#include <utils/Log.h>
+#include "AudioPolicyManager.h"
+#include <hardware/audio_effect.h>
+#include <hardware/audio.h>
+#include <math.h>
+#include <hardware_legacy/audio_policy_conf.h>
+#include <cutils/properties.h>
+#include <media/AudioParameter.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+// Definitions for audio_policy.conf file parsing
+// ----------------------------------------------------------------------------
+
+struct StringToEnum {
+    const char *name;
+    uint32_t value;
+};
+
+#define STRING_TO_ENUM(string) { #string, string }
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+const StringToEnum sDeviceNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
+};
+
+const StringToEnum sFlagNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_FAST),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
+};
+
+const StringToEnum sFormatNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_16_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_32_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_FLOAT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
+    STRING_TO_ENUM(AUDIO_FORMAT_MP3),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC),
+    STRING_TO_ENUM(AUDIO_FORMAT_VORBIS),
+};
+
+const StringToEnum sOutChannelsNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_MONO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_STEREO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
+};
+
+const StringToEnum sInChannelsNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_CHANNEL_IN_MONO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_IN_STEREO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
+};
+
+
+uint32_t AudioPolicyManager::stringToEnum(const struct StringToEnum *table,
+                                              size_t size,
+                                              const char *name)
+{
+    for (size_t i = 0; i < size; i++) {
+        if (strcmp(table[i].name, name) == 0) {
+            ALOGV("stringToEnum() found %s", table[i].name);
+            return table[i].value;
+        }
+    }
+    return 0;
+}
+
+const char *AudioPolicyManager::enumToString(const struct StringToEnum *table,
+                                              size_t size,
+                                              uint32_t value)
+{
+    for (size_t i = 0; i < size; i++) {
+        if (table[i].value == value) {
+            return table[i].name;
+        }
+    }
+    return "";
+}
+
+bool AudioPolicyManager::stringToBool(const char *value)
+{
+    return ((strcasecmp("true", value) == 0) || (strcmp("1", value) == 0));
+}
+
+
+// ----------------------------------------------------------------------------
+// AudioPolicyInterface implementation
+// ----------------------------------------------------------------------------
+
+
+status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device,
+                                                          audio_policy_dev_state_t state,
+                                                  const char *device_address)
+{
+    SortedVector <audio_io_handle_t> outputs;
+    String8 address = String8(device_address);
+
+    ALOGV("setDeviceConnectionState() device: %x, state %d, address %s", device, state, device_address);
+
+    // connect/disconnect only 1 device at a time
+    if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
+
+    // handle output devices
+    if (audio_is_output_device(device)) {
+        sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device,
+                                                            address,
+                                                            0);
+        ssize_t index = mAvailableOutputDevices.indexOf(devDesc);
+
+        // save a copy of the opened output descriptors before any output is opened or closed
+        // by checkOutputsForDevice(). This will be needed by checkOutputForAllStrategies()
+        mPreviousOutputs = mOutputs;
+        switch (state)
+        {
+        // handle output device connection
+        case AUDIO_POLICY_DEVICE_STATE_AVAILABLE:
+            if (index >= 0) {
+                ALOGW("setDeviceConnectionState() device already connected: %x", device);
+                return INVALID_OPERATION;
+            }
+            ALOGV("setDeviceConnectionState() connecting device %x", device);
+
+            if (checkOutputsForDevice(device, state, outputs, address) != NO_ERROR) {
+                return INVALID_OPERATION;
+            }
+            ALOGV("setDeviceConnectionState() checkOutputsForDevice() returned %d outputs",
+                  outputs.size());
+            // register new device as available
+            index = mAvailableOutputDevices.add(devDesc);
+            if (index >= 0) {
+                mAvailableOutputDevices[index]->mId = nextUniqueId();
+            } else {
+                return NO_MEMORY;
+            }
+
+            break;
+        // handle output device disconnection
+        case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
+            if (index < 0) {
+                ALOGW("setDeviceConnectionState() device not connected: %x", device);
+                return INVALID_OPERATION;
+            }
+
+            ALOGV("setDeviceConnectionState() disconnecting device %x", device);
+            // remove device from available output devices
+            mAvailableOutputDevices.remove(devDesc);
+
+            checkOutputsForDevice(device, state, outputs, address);
+            // not currently handling multiple simultaneous submixes: ignoring remote submix
+            //   case and address
+            } break;
+
+        default:
+            ALOGE("setDeviceConnectionState() invalid state: %x", state);
+            return BAD_VALUE;
+        }
+
+        // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
+        // output is suspended before any tracks are moved to it
+        checkA2dpSuspend();
+        checkOutputForAllStrategies();
+        // outputs must be closed after checkOutputForAllStrategies() is executed
+        if (!outputs.isEmpty()) {
+            for (size_t i = 0; i < outputs.size(); i++) {
+                AudioOutputDescriptor *desc = mOutputs.valueFor(outputs[i]);
+                // close unused outputs after device disconnection or direct outputs that have been
+                // opened by checkOutputsForDevice() to query dynamic parameters
+                if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
+                        (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
+                         (desc->mDirectOpenCount == 0))) {
+                    closeOutput(outputs[i]);
+                }
+            }
+            // check again after closing A2DP output to reset mA2dpSuspended if needed
+            checkA2dpSuspend();
+        }
+
+        updateDevicesAndOutputs();
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            // do not force device change on duplicated output because if device is 0, it will
+            // also force a device 0 for the two outputs it is duplicated to which may override
+            // a valid device selection on those outputs.
+            setOutputDevice(mOutputs.keyAt(i),
+                            getNewDevice(mOutputs.keyAt(i), true /*fromCache*/),
+                            !mOutputs.valueAt(i)->isDuplicated(),
+                            0);
+        }
+
+        if (device == AUDIO_DEVICE_OUT_WIRED_HEADSET) {
+            device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+        } else if (device == AUDIO_DEVICE_OUT_BLUETOOTH_SCO ||
+                   device == AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET ||
+                   device == AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT) {
+            device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+        } else {
+            return NO_ERROR;
+        }
+    }
+    // handle input devices
+    if (audio_is_input_device(device)) {
+        sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device,
+                                                            address,
+                                                            0);
+
+        ssize_t index = mAvailableInputDevices.indexOf(devDesc);
+        switch (state)
+        {
+        // handle input device connection
+        case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
+            if (index >= 0) {
+                ALOGW("setDeviceConnectionState() device already connected: %d", device);
+                return INVALID_OPERATION;
+            }
+            index = mAvailableInputDevices.add(devDesc);
+            if (index >= 0) {
+                mAvailableInputDevices[index]->mId = nextUniqueId();
+            } else {
+                return NO_MEMORY;
+            }
+            }
+            break;
+
+        // handle input device disconnection
+        case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
+            if (index < 0) {
+                ALOGW("setDeviceConnectionState() device not connected: %d", device);
+                return INVALID_OPERATION;
+            }
+            mAvailableInputDevices.remove(devDesc);
+            } break;
+
+        default:
+            ALOGE("setDeviceConnectionState() invalid state: %x", state);
+            return BAD_VALUE;
+        }
+
+        audio_io_handle_t activeInput = getActiveInput();
+        if (activeInput != 0) {
+            AudioInputDescriptor *inputDesc = mInputs.valueFor(activeInput);
+            audio_devices_t newDevice = getDeviceForInputSource(inputDesc->mInputSource);
+            if ((newDevice != AUDIO_DEVICE_NONE) && (newDevice != inputDesc->mDevice)) {
+                ALOGV("setDeviceConnectionState() changing device from %x to %x for input %d",
+                        inputDesc->mDevice, newDevice, activeInput);
+                inputDesc->mDevice = newDevice;
+                AudioParameter param = AudioParameter();
+                param.addInt(String8(AudioParameter::keyRouting), (int)newDevice);
+                mpClientInterface->setParameters(activeInput, param.toString());
+            }
+        }
+
+        return NO_ERROR;
+    }
+
+    ALOGW("setDeviceConnectionState() invalid device: %x", device);
+    return BAD_VALUE;
+}
+
+audio_policy_dev_state_t AudioPolicyManager::getDeviceConnectionState(audio_devices_t device,
+                                                  const char *device_address)
+{
+    audio_policy_dev_state_t state = AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+    String8 address = String8(device_address);
+    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device,
+                                                        String8(device_address),
+                                                        0);
+    ssize_t index;
+    DeviceVector *deviceVector;
+
+    if (audio_is_output_device(device)) {
+        deviceVector = &mAvailableOutputDevices;
+    } else if (audio_is_input_device(device)) {
+        deviceVector = &mAvailableInputDevices;
+    } else {
+        ALOGW("getDeviceConnectionState() invalid device type %08x", device);
+        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+    }
+
+    index = deviceVector->indexOf(devDesc);
+    if (index >= 0) {
+        return AUDIO_POLICY_DEVICE_STATE_AVAILABLE;
+    } else {
+        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+    }
+}
+
+void AudioPolicyManager::setPhoneState(audio_mode_t state)
+{
+    ALOGV("setPhoneState() state %d", state);
+    audio_devices_t newDevice = AUDIO_DEVICE_NONE;
+    if (state < 0 || state >= AUDIO_MODE_CNT) {
+        ALOGW("setPhoneState() invalid state %d", state);
+        return;
+    }
+
+    if (state == mPhoneState ) {
+        ALOGW("setPhoneState() setting same state %d", state);
+        return;
+    }
+
+    // if leaving call state, handle special case of active streams
+    // pertaining to sonification strategy see handleIncallSonification()
+    if (isInCall()) {
+        ALOGV("setPhoneState() in call state management: new state is %d", state);
+        for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
+            handleIncallSonification((audio_stream_type_t)stream, false, true);
+        }
+    }
+
+    // store previous phone state for management of sonification strategy below
+    int oldState = mPhoneState;
+    mPhoneState = state;
+    bool force = false;
+
+    // are we entering or starting a call
+    if (!isStateInCall(oldState) && isStateInCall(state)) {
+        ALOGV("  Entering call in setPhoneState()");
+        // force routing command to audio hardware when starting a call
+        // even if no device change is needed
+        force = true;
+        for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
+            mStreams[AUDIO_STREAM_DTMF].mVolumeCurve[j] =
+                    sVolumeProfiles[AUDIO_STREAM_VOICE_CALL][j];
+        }
+    } else if (isStateInCall(oldState) && !isStateInCall(state)) {
+        ALOGV("  Exiting call in setPhoneState()");
+        // force routing command to audio hardware when exiting a call
+        // even if no device change is needed
+        force = true;
+        for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
+            mStreams[AUDIO_STREAM_DTMF].mVolumeCurve[j] =
+                    sVolumeProfiles[AUDIO_STREAM_DTMF][j];
+        }
+    } else if (isStateInCall(state) && (state != oldState)) {
+        ALOGV("  Switching between telephony and VoIP in setPhoneState()");
+        // force routing command to audio hardware when switching between telephony and VoIP
+        // even if no device change is needed
+        force = true;
+    }
+
+    // check for device and output changes triggered by new phone state
+    newDevice = getNewDevice(mPrimaryOutput, false /*fromCache*/);
+    checkA2dpSuspend();
+    checkOutputForAllStrategies();
+    updateDevicesAndOutputs();
+
+    AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mPrimaryOutput);
+
+    // force routing command to audio hardware when ending call
+    // even if no device change is needed
+    if (isStateInCall(oldState) && newDevice == AUDIO_DEVICE_NONE) {
+        newDevice = hwOutputDesc->device();
+    }
+
+    int delayMs = 0;
+    if (isStateInCall(state)) {
+        nsecs_t sysTime = systemTime();
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            AudioOutputDescriptor *desc = mOutputs.valueAt(i);
+            // mute media and sonification strategies and delay device switch by the largest
+            // latency of any output where either strategy is active.
+            // This avoid sending the ring tone or music tail into the earpiece or headset.
+            if ((desc->isStrategyActive(STRATEGY_MEDIA,
+                                     SONIFICATION_HEADSET_MUSIC_DELAY,
+                                     sysTime) ||
+                    desc->isStrategyActive(STRATEGY_SONIFICATION,
+                                         SONIFICATION_HEADSET_MUSIC_DELAY,
+                                         sysTime)) &&
+                    (delayMs < (int)desc->mLatency*2)) {
+                delayMs = desc->mLatency*2;
+            }
+            setStrategyMute(STRATEGY_MEDIA, true, mOutputs.keyAt(i));
+            setStrategyMute(STRATEGY_MEDIA, false, mOutputs.keyAt(i), MUTE_TIME_MS,
+                getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/));
+            setStrategyMute(STRATEGY_SONIFICATION, true, mOutputs.keyAt(i));
+            setStrategyMute(STRATEGY_SONIFICATION, false, mOutputs.keyAt(i), MUTE_TIME_MS,
+                getDeviceForStrategy(STRATEGY_SONIFICATION, true /*fromCache*/));
+        }
+    }
+
+    // change routing is necessary
+    setOutputDevice(mPrimaryOutput, newDevice, force, delayMs);
+
+    // if entering in call state, handle special case of active streams
+    // pertaining to sonification strategy see handleIncallSonification()
+    if (isStateInCall(state)) {
+        ALOGV("setPhoneState() in call state management: new state is %d", state);
+        for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
+            handleIncallSonification((audio_stream_type_t)stream, true, true);
+        }
+    }
+
+    // Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
+    if (state == AUDIO_MODE_RINGTONE &&
+        isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)) {
+        mLimitRingtoneVolume = true;
+    } else {
+        mLimitRingtoneVolume = false;
+    }
+}
+
+void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage,
+                                         audio_policy_forced_cfg_t config)
+{
+    ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mPhoneState);
+
+    bool forceVolumeReeval = false;
+    switch(usage) {
+    case AUDIO_POLICY_FORCE_FOR_COMMUNICATION:
+        if (config != AUDIO_POLICY_FORCE_SPEAKER && config != AUDIO_POLICY_FORCE_BT_SCO &&
+            config != AUDIO_POLICY_FORCE_NONE) {
+            ALOGW("setForceUse() invalid config %d for FOR_COMMUNICATION", config);
+            return;
+        }
+        forceVolumeReeval = true;
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_MEDIA:
+        if (config != AUDIO_POLICY_FORCE_HEADPHONES && config != AUDIO_POLICY_FORCE_BT_A2DP &&
+            config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
+            config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
+            config != AUDIO_POLICY_FORCE_DIGITAL_DOCK && config != AUDIO_POLICY_FORCE_NONE &&
+            config != AUDIO_POLICY_FORCE_NO_BT_A2DP) {
+            ALOGW("setForceUse() invalid config %d for FOR_MEDIA", config);
+            return;
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_RECORD:
+        if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
+            config != AUDIO_POLICY_FORCE_NONE) {
+            ALOGW("setForceUse() invalid config %d for FOR_RECORD", config);
+            return;
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_DOCK:
+        if (config != AUDIO_POLICY_FORCE_NONE && config != AUDIO_POLICY_FORCE_BT_CAR_DOCK &&
+            config != AUDIO_POLICY_FORCE_BT_DESK_DOCK &&
+            config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
+            config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
+            config != AUDIO_POLICY_FORCE_DIGITAL_DOCK) {
+            ALOGW("setForceUse() invalid config %d for FOR_DOCK", config);
+        }
+        forceVolumeReeval = true;
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_SYSTEM:
+        if (config != AUDIO_POLICY_FORCE_NONE &&
+            config != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
+            ALOGW("setForceUse() invalid config %d for FOR_SYSTEM", config);
+        }
+        forceVolumeReeval = true;
+        mForceUse[usage] = config;
+        break;
+    default:
+        ALOGW("setForceUse() invalid usage %d", usage);
+        break;
+    }
+
+    // check for device and output changes triggered by new force usage
+    checkA2dpSuspend();
+    checkOutputForAllStrategies();
+    updateDevicesAndOutputs();
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        audio_io_handle_t output = mOutputs.keyAt(i);
+        audio_devices_t newDevice = getNewDevice(output, true /*fromCache*/);
+        setOutputDevice(output, newDevice, (newDevice != AUDIO_DEVICE_NONE));
+        if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) {
+            applyStreamVolumes(output, newDevice, 0, true);
+        }
+    }
+
+    audio_io_handle_t activeInput = getActiveInput();
+    if (activeInput != 0) {
+        AudioInputDescriptor *inputDesc = mInputs.valueFor(activeInput);
+        audio_devices_t newDevice = getDeviceForInputSource(inputDesc->mInputSource);
+        if ((newDevice != AUDIO_DEVICE_NONE) && (newDevice != inputDesc->mDevice)) {
+            ALOGV("setForceUse() changing device from %x to %x for input %d",
+                    inputDesc->mDevice, newDevice, activeInput);
+            inputDesc->mDevice = newDevice;
+            AudioParameter param = AudioParameter();
+            param.addInt(String8(AudioParameter::keyRouting), (int)newDevice);
+            mpClientInterface->setParameters(activeInput, param.toString());
+        }
+    }
+
+}
+
+audio_policy_forced_cfg_t AudioPolicyManager::getForceUse(audio_policy_force_use_t usage)
+{
+    return mForceUse[usage];
+}
+
+void AudioPolicyManager::setSystemProperty(const char* property, const char* value)
+{
+    ALOGV("setSystemProperty() property %s, value %s", property, value);
+}
+
+// Find a direct output profile compatible with the parameters passed, even if the input flags do
+// not explicitly request a direct output
+AudioPolicyManager::IOProfile *AudioPolicyManager::getProfileForDirectOutput(
+                                                               audio_devices_t device,
+                                                               uint32_t samplingRate,
+                                                               audio_format_t format,
+                                                               audio_channel_mask_t channelMask,
+                                                               audio_output_flags_t flags)
+{
+    for (size_t i = 0; i < mHwModules.size(); i++) {
+        if (mHwModules[i]->mHandle == 0) {
+            continue;
+        }
+        for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
+            IOProfile *profile = mHwModules[i]->mOutputProfiles[j];
+            bool found = false;
+            if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+                if (profile->isCompatibleProfile(device, samplingRate, format,
+                                           channelMask,
+                                           AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)) {
+                    found = true;
+                }
+            } else {
+                if (profile->isCompatibleProfile(device, samplingRate, format,
+                                           channelMask,
+                                           AUDIO_OUTPUT_FLAG_DIRECT)) {
+                    found = true;
+                }
+            }
+            if (found && (mAvailableOutputDevices.types() & profile->mSupportedDevices.types())) {
+                return profile;
+            }
+        }
+    }
+    return 0;
+}
+
+audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
+{
+    audio_io_handle_t output = 0;
+    uint32_t latency = 0;
+    routing_strategy strategy = getStrategy(stream);
+    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+    ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x",
+          device, stream, samplingRate, format, channelMask, flags);
+
+#ifdef AUDIO_POLICY_TEST
+    if (mCurOutput != 0) {
+        ALOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channelMask %x, mDirectOutput %d",
+                mCurOutput, mTestSamplingRate, mTestFormat, mTestChannels, mDirectOutput);
+
+        if (mTestOutputs[mCurOutput] == 0) {
+            ALOGV("getOutput() opening test output");
+            AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor(NULL);
+            outputDesc->mDevice = mTestDevice;
+            outputDesc->mSamplingRate = mTestSamplingRate;
+            outputDesc->mFormat = mTestFormat;
+            outputDesc->mChannelMask = mTestChannels;
+            outputDesc->mLatency = mTestLatencyMs;
+            outputDesc->mFlags =
+                    (audio_output_flags_t)(mDirectOutput ? AUDIO_OUTPUT_FLAG_DIRECT : 0);
+            outputDesc->mRefCount[stream] = 0;
+            mTestOutputs[mCurOutput] = mpClientInterface->openOutput(0, &outputDesc->mDevice,
+                                            &outputDesc->mSamplingRate,
+                                            &outputDesc->mFormat,
+                                            &outputDesc->mChannelMask,
+                                            &outputDesc->mLatency,
+                                            outputDesc->mFlags,
+                                            offloadInfo);
+            if (mTestOutputs[mCurOutput]) {
+                AudioParameter outputCmd = AudioParameter();
+                outputCmd.addInt(String8("set_id"),mCurOutput);
+                mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString());
+                addOutput(mTestOutputs[mCurOutput], outputDesc);
+            }
+        }
+        return mTestOutputs[mCurOutput];
+    }
+#endif //AUDIO_POLICY_TEST
+
+    // open a direct output if required by specified parameters
+    //force direct flag if offload flag is set: offloading implies a direct output stream
+    // and all common behaviors are driven by checking only the direct flag
+    // this should normally be set appropriately in the policy configuration file
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
+    }
+
+    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
+    // creating an offloaded track and tearing it down immediately after start when audioflinger
+    // detects there is an active non offloadable effect.
+    // FIXME: We should check the audio session here but we do not have it in this context.
+    // This may prevent offloading in rare situations where effects are left active by apps
+    // in the background.
+    IOProfile *profile = NULL;
+    if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
+            !isNonOffloadableEffectEnabled()) {
+        profile = getProfileForDirectOutput(device,
+                                           samplingRate,
+                                           format,
+                                           channelMask,
+                                           (audio_output_flags_t)flags);
+    }
+
+    if (profile != NULL) {
+        AudioOutputDescriptor *outputDesc = NULL;
+
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            AudioOutputDescriptor *desc = mOutputs.valueAt(i);
+            if (!desc->isDuplicated() && (profile == desc->mProfile)) {
+                outputDesc = desc;
+                // reuse direct output if currently open and configured with same parameters
+                if ((samplingRate == outputDesc->mSamplingRate) &&
+                        (format == outputDesc->mFormat) &&
+                        (channelMask == outputDesc->mChannelMask)) {
+                    outputDesc->mDirectOpenCount++;
+                    ALOGV("getOutput() reusing direct output %d", mOutputs.keyAt(i));
+                    return mOutputs.keyAt(i);
+                }
+            }
+        }
+        // close direct output if currently open and configured with different parameters
+        if (outputDesc != NULL) {
+            closeOutput(outputDesc->mId);
+        }
+        outputDesc = new AudioOutputDescriptor(profile);
+        outputDesc->mDevice = device;
+        outputDesc->mSamplingRate = samplingRate;
+        outputDesc->mFormat = format;
+        outputDesc->mChannelMask = channelMask;
+        outputDesc->mLatency = 0;
+        outputDesc->mFlags =(audio_output_flags_t) (outputDesc->mFlags | flags);
+        outputDesc->mRefCount[stream] = 0;
+        outputDesc->mStopTime[stream] = 0;
+        outputDesc->mDirectOpenCount = 1;
+        output = mpClientInterface->openOutput(profile->mModule->mHandle,
+                                        &outputDesc->mDevice,
+                                        &outputDesc->mSamplingRate,
+                                        &outputDesc->mFormat,
+                                        &outputDesc->mChannelMask,
+                                        &outputDesc->mLatency,
+                                        outputDesc->mFlags,
+                                        offloadInfo);
+
+        // only accept an output with the requested parameters
+        if (output == 0 ||
+            (samplingRate != 0 && samplingRate != outputDesc->mSamplingRate) ||
+            (format != AUDIO_FORMAT_DEFAULT && format != outputDesc->mFormat) ||
+            (channelMask != 0 && channelMask != outputDesc->mChannelMask)) {
+            ALOGV("getOutput() failed opening direct output: output %d samplingRate %d %d,"
+                    "format %d %d, channelMask %04x %04x", output, samplingRate,
+                    outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask,
+                    outputDesc->mChannelMask);
+            if (output != 0) {
+                mpClientInterface->closeOutput(output);
+            }
+            delete outputDesc;
+            return 0;
+        }
+        audio_io_handle_t srcOutput = getOutputForEffect();
+        addOutput(output, outputDesc);
+        audio_io_handle_t dstOutput = getOutputForEffect();
+        if (dstOutput == output) {
+            mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, srcOutput, dstOutput);
+        }
+        mPreviousOutputs = mOutputs;
+        ALOGV("getOutput() returns new direct output %d", output);
+        return output;
+    }
+
+    // ignoring channel mask due to downmix capability in mixer
+
+    // open a non direct output
+
+    // for non direct outputs, only PCM is supported
+    if (audio_is_linear_pcm(format)) {
+        // get which output is suitable for the specified stream. The actual
+        // routing change will happen when startOutput() will be called
+        SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
+
+        output = selectOutput(outputs, flags);
+    }
+    ALOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d,"
+            "format %d, channels %x, flags %x", stream, samplingRate, format, channelMask, flags);
+
+    ALOGV("getOutput() returns output %d", output);
+
+    return output;
+}
+
+audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
+                                                       audio_output_flags_t flags)
+{
+    // select one output among several that provide a path to a particular device or set of
+    // devices (the list was previously build by getOutputsForDevice()).
+    // The priority is as follows:
+    // 1: the output with the highest number of requested policy flags
+    // 2: the primary output
+    // 3: the first output in the list
+
+    if (outputs.size() == 0) {
+        return 0;
+    }
+    if (outputs.size() == 1) {
+        return outputs[0];
+    }
+
+    int maxCommonFlags = 0;
+    audio_io_handle_t outputFlags = 0;
+    audio_io_handle_t outputPrimary = 0;
+
+    for (size_t i = 0; i < outputs.size(); i++) {
+        AudioOutputDescriptor *outputDesc = mOutputs.valueFor(outputs[i]);
+        if (!outputDesc->isDuplicated()) {
+            int commonFlags = popcount(outputDesc->mProfile->mFlags & flags);
+            if (commonFlags > maxCommonFlags) {
+                outputFlags = outputs[i];
+                maxCommonFlags = commonFlags;
+                ALOGV("selectOutput() commonFlags for output %d, %04x", outputs[i], commonFlags);
+            }
+            if (outputDesc->mProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
+                outputPrimary = outputs[i];
+            }
+        }
+    }
+
+    if (outputFlags != 0) {
+        return outputFlags;
+    }
+    if (outputPrimary != 0) {
+        return outputPrimary;
+    }
+
+    return outputs[0];
+}
+
+status_t AudioPolicyManager::startOutput(audio_io_handle_t output,
+                                             audio_stream_type_t stream,
+                                             int session)
+{
+    ALOGV("startOutput() output %d, stream %d, session %d", output, stream, session);
+    ssize_t index = mOutputs.indexOfKey(output);
+    if (index < 0) {
+        ALOGW("startOutput() unknown output %d", output);
+        return BAD_VALUE;
+    }
+
+    AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
+
+    // increment usage count for this stream on the requested output:
+    // NOTE that the usage count is the same for duplicated output and hardware output which is
+    // necessary for a correct control of hardware output routing by startOutput() and stopOutput()
+    outputDesc->changeRefCount(stream, 1);
+
+    if (outputDesc->mRefCount[stream] == 1) {
+        audio_devices_t newDevice = getNewDevice(output, false /*fromCache*/);
+        routing_strategy strategy = getStrategy(stream);
+        bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
+                            (strategy == STRATEGY_SONIFICATION_RESPECTFUL);
+        uint32_t waitMs = 0;
+        bool force = false;
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            AudioOutputDescriptor *desc = mOutputs.valueAt(i);
+            if (desc != outputDesc) {
+                // force a device change if any other output is managed by the same hw
+                // module and has a current device selection that differs from selected device.
+                // In this case, the audio HAL must receive the new device selection so that it can
+                // change the device currently selected by the other active output.
+                if (outputDesc->sharesHwModuleWith(desc) &&
+                    desc->device() != newDevice) {
+                    force = true;
+                }
+                // wait for audio on other active outputs to be presented when starting
+                // a notification so that audio focus effect can propagate.
+                uint32_t latency = desc->latency();
+                if (shouldWait && desc->isActive(latency * 2) && (waitMs < latency)) {
+                    waitMs = latency;
+                }
+            }
+        }
+        uint32_t muteWaitMs = setOutputDevice(output, newDevice, force);
+
+        // handle special case for sonification while in call
+        if (isInCall()) {
+            handleIncallSonification(stream, true, false);
+        }
+
+        // apply volume rules for current stream and device if necessary
+        checkAndSetVolume(stream,
+                          mStreams[stream].getVolumeIndex(newDevice),
+                          output,
+                          newDevice);
+
+        // update the outputs if starting an output with a stream that can affect notification
+        // routing
+        handleNotificationRoutingForStream(stream);
+        if (waitMs > muteWaitMs) {
+            usleep((waitMs - muteWaitMs) * 2 * 1000);
+        }
+    }
+    return NO_ERROR;
+}
+
+
+status_t AudioPolicyManager::stopOutput(audio_io_handle_t output,
+                                            audio_stream_type_t stream,
+                                            int session)
+{
+    ALOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
+    ssize_t index = mOutputs.indexOfKey(output);
+    if (index < 0) {
+        ALOGW("stopOutput() unknown output %d", output);
+        return BAD_VALUE;
+    }
+
+    AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
+
+    // handle special case for sonification while in call
+    if (isInCall()) {
+        handleIncallSonification(stream, false, false);
+    }
+
+    if (outputDesc->mRefCount[stream] > 0) {
+        // decrement usage count of this stream on the output
+        outputDesc->changeRefCount(stream, -1);
+        // store time at which the stream was stopped - see isStreamActive()
+        if (outputDesc->mRefCount[stream] == 0) {
+            outputDesc->mStopTime[stream] = systemTime();
+            audio_devices_t newDevice = getNewDevice(output, false /*fromCache*/);
+            // delay the device switch by twice the latency because stopOutput() is executed when
+            // the track stop() command is received and at that time the audio track buffer can
+            // still contain data that needs to be drained. The latency only covers the audio HAL
+            // and kernel buffers. Also the latency does not always include additional delay in the
+            // audio path (audio DSP, CODEC ...)
+            setOutputDevice(output, newDevice, false, outputDesc->mLatency*2);
+
+            // force restoring the device selection on other active outputs if it differs from the
+            // one being selected for this output
+            for (size_t i = 0; i < mOutputs.size(); i++) {
+                audio_io_handle_t curOutput = mOutputs.keyAt(i);
+                AudioOutputDescriptor *desc = mOutputs.valueAt(i);
+                if (curOutput != output &&
+                        desc->isActive() &&
+                        outputDesc->sharesHwModuleWith(desc) &&
+                        (newDevice != desc->device())) {
+                    setOutputDevice(curOutput,
+                                    getNewDevice(curOutput, false /*fromCache*/),
+                                    true,
+                                    outputDesc->mLatency*2);
+                }
+            }
+            // update the outputs if stopping one with a stream that can affect notification routing
+            handleNotificationRoutingForStream(stream);
+        }
+        return NO_ERROR;
+    } else {
+        ALOGW("stopOutput() refcount is already 0 for output %d", output);
+        return INVALID_OPERATION;
+    }
+}
+
+void AudioPolicyManager::releaseOutput(audio_io_handle_t output)
+{
+    ALOGV("releaseOutput() %d", output);
+    ssize_t index = mOutputs.indexOfKey(output);
+    if (index < 0) {
+        ALOGW("releaseOutput() releasing unknown output %d", output);
+        return;
+    }
+
+#ifdef AUDIO_POLICY_TEST
+    int testIndex = testOutputIndex(output);
+    if (testIndex != 0) {
+        AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
+        if (outputDesc->isActive()) {
+            mpClientInterface->closeOutput(output);
+            delete mOutputs.valueAt(index);
+            mOutputs.removeItem(output);
+            mTestOutputs[testIndex] = 0;
+        }
+        return;
+    }
+#endif //AUDIO_POLICY_TEST
+
+    AudioOutputDescriptor *desc = mOutputs.valueAt(index);
+    if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+        if (desc->mDirectOpenCount <= 0) {
+            ALOGW("releaseOutput() invalid open count %d for output %d",
+                                                              desc->mDirectOpenCount, output);
+            return;
+        }
+        if (--desc->mDirectOpenCount == 0) {
+            closeOutput(output);
+            // If effects where present on the output, audioflinger moved them to the primary
+            // output by default: move them back to the appropriate output.
+            audio_io_handle_t dstOutput = getOutputForEffect();
+            if (dstOutput != mPrimaryOutput) {
+                mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, mPrimaryOutput, dstOutput);
+            }
+        }
+    }
+}
+
+
+audio_io_handle_t AudioPolicyManager::getInput(audio_source_t inputSource,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    audio_in_acoustics_t acoustics)
+{
+    audio_io_handle_t input = 0;
+    audio_devices_t device = getDeviceForInputSource(inputSource);
+
+    ALOGV("getInput() inputSource %d, samplingRate %d, format %d, channelMask %x, acoustics %x",
+          inputSource, samplingRate, format, channelMask, acoustics);
+
+    if (device == AUDIO_DEVICE_NONE) {
+        ALOGW("getInput() could not find device for inputSource %d", inputSource);
+        return 0;
+    }
+
+    // adapt channel selection to input source
+    switch(inputSource) {
+    case AUDIO_SOURCE_VOICE_UPLINK:
+        channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK;
+        break;
+    case AUDIO_SOURCE_VOICE_DOWNLINK:
+        channelMask = AUDIO_CHANNEL_IN_VOICE_DNLINK;
+        break;
+    case AUDIO_SOURCE_VOICE_CALL:
+        channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK;
+        break;
+    default:
+        break;
+    }
+
+    IOProfile *profile = getInputProfile(device,
+                                         samplingRate,
+                                         format,
+                                         channelMask);
+    if (profile == NULL) {
+        ALOGW("getInput() could not find profile for device %04x, samplingRate %d, format %d, "
+                "channelMask %04x",
+                device, samplingRate, format, channelMask);
+        return 0;
+    }
+
+    if (profile->mModule->mHandle == 0) {
+        ALOGE("getInput(): HW module %s not opened", profile->mModule->mName);
+        return 0;
+    }
+
+    AudioInputDescriptor *inputDesc = new AudioInputDescriptor(profile);
+
+    inputDesc->mInputSource = inputSource;
+    inputDesc->mDevice = device;
+    inputDesc->mSamplingRate = samplingRate;
+    inputDesc->mFormat = format;
+    inputDesc->mChannelMask = channelMask;
+    inputDesc->mRefCount = 0;
+    input = mpClientInterface->openInput(profile->mModule->mHandle,
+                                    &inputDesc->mDevice,
+                                    &inputDesc->mSamplingRate,
+                                    &inputDesc->mFormat,
+                                    &inputDesc->mChannelMask);
+
+    // only accept input with the exact requested set of parameters
+    if (input == 0 ||
+        (samplingRate != inputDesc->mSamplingRate) ||
+        (format != inputDesc->mFormat) ||
+        (channelMask != inputDesc->mChannelMask)) {
+        ALOGI("getInput() failed opening input: samplingRate %d, format %d, channelMask %x",
+                samplingRate, format, channelMask);
+        if (input != 0) {
+            mpClientInterface->closeInput(input);
+        }
+        delete inputDesc;
+        return 0;
+    }
+    mInputs.add(input, inputDesc);
+    return input;
+}
+
+status_t AudioPolicyManager::startInput(audio_io_handle_t input)
+{
+    ALOGV("startInput() input %d", input);
+    ssize_t index = mInputs.indexOfKey(input);
+    if (index < 0) {
+        ALOGW("startInput() unknown input %d", input);
+        return BAD_VALUE;
+    }
+    AudioInputDescriptor *inputDesc = mInputs.valueAt(index);
+
+#ifdef AUDIO_POLICY_TEST
+    if (mTestInput == 0)
+#endif //AUDIO_POLICY_TEST
+    {
+        // refuse 2 active AudioRecord clients at the same time except if the active input
+        // uses AUDIO_SOURCE_HOTWORD in which case it is closed.
+        audio_io_handle_t activeInput = getActiveInput();
+        if (!isVirtualInputDevice(inputDesc->mDevice) && activeInput != 0) {
+            AudioInputDescriptor *activeDesc = mInputs.valueFor(activeInput);
+            if (activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) {
+                ALOGW("startInput() preempting already started low-priority input %d", activeInput);
+                stopInput(activeInput);
+                releaseInput(activeInput);
+            } else {
+                ALOGW("startInput() input %d failed: other input already started", input);
+                return INVALID_OPERATION;
+            }
+        }
+    }
+
+    audio_devices_t newDevice = getDeviceForInputSource(inputDesc->mInputSource);
+    if ((newDevice != AUDIO_DEVICE_NONE) && (newDevice != inputDesc->mDevice)) {
+        inputDesc->mDevice = newDevice;
+    }
+
+    // automatically enable the remote submix output when input is started
+    if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+        setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                AUDIO_POLICY_DEVICE_STATE_AVAILABLE, AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS);
+    }
+
+    AudioParameter param = AudioParameter();
+    param.addInt(String8(AudioParameter::keyRouting), (int)inputDesc->mDevice);
+
+    int aliasSource = (inputDesc->mInputSource == AUDIO_SOURCE_HOTWORD) ?
+                                        AUDIO_SOURCE_VOICE_RECOGNITION : inputDesc->mInputSource;
+
+    param.addInt(String8(AudioParameter::keyInputSource), aliasSource);
+    ALOGV("AudioPolicyManager::startInput() input source = %d", inputDesc->mInputSource);
+
+    mpClientInterface->setParameters(input, param.toString());
+
+    inputDesc->mRefCount = 1;
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::stopInput(audio_io_handle_t input)
+{
+    ALOGV("stopInput() input %d", input);
+    ssize_t index = mInputs.indexOfKey(input);
+    if (index < 0) {
+        ALOGW("stopInput() unknown input %d", input);
+        return BAD_VALUE;
+    }
+    AudioInputDescriptor *inputDesc = mInputs.valueAt(index);
+
+    if (inputDesc->mRefCount == 0) {
+        ALOGW("stopInput() input %d already stopped", input);
+        return INVALID_OPERATION;
+    } else {
+        // automatically disable the remote submix output when input is stopped
+        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+            setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                    AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE, AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS);
+        }
+
+        AudioParameter param = AudioParameter();
+        param.addInt(String8(AudioParameter::keyRouting), 0);
+        mpClientInterface->setParameters(input, param.toString());
+        inputDesc->mRefCount = 0;
+        return NO_ERROR;
+    }
+}
+
+void AudioPolicyManager::releaseInput(audio_io_handle_t input)
+{
+    ALOGV("releaseInput() %d", input);
+    ssize_t index = mInputs.indexOfKey(input);
+    if (index < 0) {
+        ALOGW("releaseInput() releasing unknown input %d", input);
+        return;
+    }
+    mpClientInterface->closeInput(input);
+    delete mInputs.valueAt(index);
+    mInputs.removeItem(input);
+    ALOGV("releaseInput() exit");
+}
+
+void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream,
+                                            int indexMin,
+                                            int indexMax)
+{
+    ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
+    if (indexMin < 0 || indexMin >= indexMax) {
+        ALOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d", stream , indexMin, indexMax);
+        return;
+    }
+    mStreams[stream].mIndexMin = indexMin;
+    mStreams[stream].mIndexMax = indexMax;
+}
+
+status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream,
+                                                      int index,
+                                                      audio_devices_t device)
+{
+
+    if ((index < mStreams[stream].mIndexMin) || (index > mStreams[stream].mIndexMax)) {
+        return BAD_VALUE;
+    }
+    if (!audio_is_output_device(device)) {
+        return BAD_VALUE;
+    }
+
+    // Force max volume if stream cannot be muted
+    if (!mStreams[stream].mCanBeMuted) index = mStreams[stream].mIndexMax;
+
+    ALOGV("setStreamVolumeIndex() stream %d, device %04x, index %d",
+          stream, device, index);
+
+    // if device is AUDIO_DEVICE_OUT_DEFAULT set default value and
+    // clear all device specific values
+    if (device == AUDIO_DEVICE_OUT_DEFAULT) {
+        mStreams[stream].mIndexCur.clear();
+    }
+    mStreams[stream].mIndexCur.add(device, index);
+
+    // compute and apply stream volume on all outputs according to connected device
+    status_t status = NO_ERROR;
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        audio_devices_t curDevice =
+                getDeviceForVolume(mOutputs.valueAt(i)->device());
+        if ((device == AUDIO_DEVICE_OUT_DEFAULT) || (device == curDevice)) {
+            status_t volStatus = checkAndSetVolume(stream, index, mOutputs.keyAt(i), curDevice);
+            if (volStatus != NO_ERROR) {
+                status = volStatus;
+            }
+        }
+    }
+    return status;
+}
+
+status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream,
+                                                      int *index,
+                                                      audio_devices_t device)
+{
+    if (index == NULL) {
+        return BAD_VALUE;
+    }
+    if (!audio_is_output_device(device)) {
+        return BAD_VALUE;
+    }
+    // if device is AUDIO_DEVICE_OUT_DEFAULT, return volume for device corresponding to
+    // the strategy the stream belongs to.
+    if (device == AUDIO_DEVICE_OUT_DEFAULT) {
+        device = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
+    }
+    device = getDeviceForVolume(device);
+
+    *index =  mStreams[stream].getVolumeIndex(device);
+    ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
+    return NO_ERROR;
+}
+
+audio_io_handle_t AudioPolicyManager::selectOutputForEffects(
+                                            const SortedVector<audio_io_handle_t>& outputs)
+{
+    // select one output among several suitable for global effects.
+    // The priority is as follows:
+    // 1: An offloaded output. If the effect ends up not being offloadable,
+    //    AudioFlinger will invalidate the track and the offloaded output
+    //    will be closed causing the effect to be moved to a PCM output.
+    // 2: A deep buffer output
+    // 3: the first output in the list
+
+    if (outputs.size() == 0) {
+        return 0;
+    }
+
+    audio_io_handle_t outputOffloaded = 0;
+    audio_io_handle_t outputDeepBuffer = 0;
+
+    for (size_t i = 0; i < outputs.size(); i++) {
+        AudioOutputDescriptor *desc = mOutputs.valueFor(outputs[i]);
+        ALOGV("selectOutputForEffects outputs[%d] flags %x", i, desc->mFlags);
+        if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+            outputOffloaded = outputs[i];
+        }
+        if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
+            outputDeepBuffer = outputs[i];
+        }
+    }
+
+    ALOGV("selectOutputForEffects outputOffloaded %d outputDeepBuffer %d",
+          outputOffloaded, outputDeepBuffer);
+    if (outputOffloaded != 0) {
+        return outputOffloaded;
+    }
+    if (outputDeepBuffer != 0) {
+        return outputDeepBuffer;
+    }
+
+    return outputs[0];
+}
+
+audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc)
+{
+    // apply simple rule where global effects are attached to the same output as MUSIC streams
+
+    routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC);
+    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+    SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(device, mOutputs);
+
+    audio_io_handle_t output = selectOutputForEffects(dstOutputs);
+    ALOGV("getOutputForEffect() got output %d for fx %s flags %x",
+          output, (desc == NULL) ? "unspecified" : desc->name,  (desc == NULL) ? 0 : desc->flags);
+
+    return output;
+}
+
+status_t AudioPolicyManager::registerEffect(const effect_descriptor_t *desc,
+                                audio_io_handle_t io,
+                                uint32_t strategy,
+                                int session,
+                                int id)
+{
+    ssize_t index = mOutputs.indexOfKey(io);
+    if (index < 0) {
+        index = mInputs.indexOfKey(io);
+        if (index < 0) {
+            ALOGW("registerEffect() unknown io %d", io);
+            return INVALID_OPERATION;
+        }
+    }
+
+    if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
+        ALOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
+                desc->name, desc->memoryUsage);
+        return INVALID_OPERATION;
+    }
+    mTotalEffectsMemory += desc->memoryUsage;
+    ALOGV("registerEffect() effect %s, io %d, strategy %d session %d id %d",
+            desc->name, io, strategy, session, id);
+    ALOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory);
+
+    EffectDescriptor *pDesc = new EffectDescriptor();
+    memcpy (&pDesc->mDesc, desc, sizeof(effect_descriptor_t));
+    pDesc->mIo = io;
+    pDesc->mStrategy = (routing_strategy)strategy;
+    pDesc->mSession = session;
+    pDesc->mEnabled = false;
+
+    mEffects.add(id, pDesc);
+
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::unregisterEffect(int id)
+{
+    ssize_t index = mEffects.indexOfKey(id);
+    if (index < 0) {
+        ALOGW("unregisterEffect() unknown effect ID %d", id);
+        return INVALID_OPERATION;
+    }
+
+    EffectDescriptor *pDesc = mEffects.valueAt(index);
+
+    setEffectEnabled(pDesc, false);
+
+    if (mTotalEffectsMemory < pDesc->mDesc.memoryUsage) {
+        ALOGW("unregisterEffect() memory %d too big for total %d",
+                pDesc->mDesc.memoryUsage, mTotalEffectsMemory);
+        pDesc->mDesc.memoryUsage = mTotalEffectsMemory;
+    }
+    mTotalEffectsMemory -= pDesc->mDesc.memoryUsage;
+    ALOGV("unregisterEffect() effect %s, ID %d, memory %d total memory %d",
+            pDesc->mDesc.name, id, pDesc->mDesc.memoryUsage, mTotalEffectsMemory);
+
+    mEffects.removeItem(id);
+    delete pDesc;
+
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::setEffectEnabled(int id, bool enabled)
+{
+    ssize_t index = mEffects.indexOfKey(id);
+    if (index < 0) {
+        ALOGW("unregisterEffect() unknown effect ID %d", id);
+        return INVALID_OPERATION;
+    }
+
+    return setEffectEnabled(mEffects.valueAt(index), enabled);
+}
+
+status_t AudioPolicyManager::setEffectEnabled(EffectDescriptor *pDesc, bool enabled)
+{
+    if (enabled == pDesc->mEnabled) {
+        ALOGV("setEffectEnabled(%s) effect already %s",
+             enabled?"true":"false", enabled?"enabled":"disabled");
+        return INVALID_OPERATION;
+    }
+
+    if (enabled) {
+        if (mTotalEffectsCpuLoad + pDesc->mDesc.cpuLoad > getMaxEffectsCpuLoad()) {
+            ALOGW("setEffectEnabled(true) CPU Load limit exceeded for Fx %s, CPU %f MIPS",
+                 pDesc->mDesc.name, (float)pDesc->mDesc.cpuLoad/10);
+            return INVALID_OPERATION;
+        }
+        mTotalEffectsCpuLoad += pDesc->mDesc.cpuLoad;
+        ALOGV("setEffectEnabled(true) total CPU %d", mTotalEffectsCpuLoad);
+    } else {
+        if (mTotalEffectsCpuLoad < pDesc->mDesc.cpuLoad) {
+            ALOGW("setEffectEnabled(false) CPU load %d too high for total %d",
+                    pDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad);
+            pDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad;
+        }
+        mTotalEffectsCpuLoad -= pDesc->mDesc.cpuLoad;
+        ALOGV("setEffectEnabled(false) total CPU %d", mTotalEffectsCpuLoad);
+    }
+    pDesc->mEnabled = enabled;
+    return NO_ERROR;
+}
+
+bool AudioPolicyManager::isNonOffloadableEffectEnabled()
+{
+    for (size_t i = 0; i < mEffects.size(); i++) {
+        const EffectDescriptor * const pDesc = mEffects.valueAt(i);
+        if (pDesc->mEnabled && (pDesc->mStrategy == STRATEGY_MEDIA) &&
+                ((pDesc->mDesc.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) == 0)) {
+            ALOGV("isNonOffloadableEffectEnabled() non offloadable effect %s enabled on session %d",
+                  pDesc->mDesc.name, pDesc->mSession);
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    nsecs_t sysTime = systemTime();
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        const AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i);
+        if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream,
+                                                    uint32_t inPastMs) const
+{
+    nsecs_t sysTime = systemTime();
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        const AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i);
+        if (((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
+                outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioPolicyManager::isSourceActive(audio_source_t source) const
+{
+    for (size_t i = 0; i < mInputs.size(); i++) {
+        const AudioInputDescriptor * inputDescriptor = mInputs.valueAt(i);
+        if ((inputDescriptor->mInputSource == (int)source ||
+                (source == AUDIO_SOURCE_VOICE_RECOGNITION &&
+                 inputDescriptor->mInputSource == AUDIO_SOURCE_HOTWORD))
+             && (inputDescriptor->mRefCount > 0)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+
+status_t AudioPolicyManager::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
+    result.append(buffer);
+
+    snprintf(buffer, SIZE, " Primary Output: %d\n", mPrimaryOutput);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Phone state: %d\n", mPhoneState);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for communications %d\n",
+             mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for media %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA]);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for record %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD]);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for dock %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK]);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for system %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM]);
+    result.append(buffer);
+
+    snprintf(buffer, SIZE, " Available output devices:\n");
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+    DeviceDescriptor::dumpHeader(fd, 2);
+    for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
+        mAvailableOutputDevices[i]->dump(fd, 2);
+    }
+    snprintf(buffer, SIZE, "\n Available input devices:\n");
+    write(fd, buffer, strlen(buffer));
+    DeviceDescriptor::dumpHeader(fd, 2);
+    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
+        mAvailableInputDevices[i]->dump(fd, 2);
+    }
+
+    snprintf(buffer, SIZE, "\nHW Modules dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < mHwModules.size(); i++) {
+        snprintf(buffer, SIZE, "- HW Module %d:\n", i + 1);
+        write(fd, buffer, strlen(buffer));
+        mHwModules[i]->dump(fd);
+    }
+
+    snprintf(buffer, SIZE, "\nOutputs dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        snprintf(buffer, SIZE, "- Output %d dump:\n", mOutputs.keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        mOutputs.valueAt(i)->dump(fd);
+    }
+
+    snprintf(buffer, SIZE, "\nInputs dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < mInputs.size(); i++) {
+        snprintf(buffer, SIZE, "- Input %d dump:\n", mInputs.keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        mInputs.valueAt(i)->dump(fd);
+    }
+
+    snprintf(buffer, SIZE, "\nStreams dump:\n");
+    write(fd, buffer, strlen(buffer));
+    snprintf(buffer, SIZE,
+             " Stream  Can be muted  Index Min  Index Max  Index Cur [device : index]...\n");
+    write(fd, buffer, strlen(buffer));
+    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
+        snprintf(buffer, SIZE, " %02d      ", i);
+        write(fd, buffer, strlen(buffer));
+        mStreams[i].dump(fd);
+    }
+
+    snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n",
+            (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory);
+    write(fd, buffer, strlen(buffer));
+
+    snprintf(buffer, SIZE, "Registered effects:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < mEffects.size(); i++) {
+        snprintf(buffer, SIZE, "- Effect %d dump:\n", mEffects.keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        mEffects.valueAt(i)->dump(fd);
+    }
+
+
+    return NO_ERROR;
+}
+
+// This function checks for the parameters which can be offloaded.
+// This can be enhanced depending on the capability of the DSP and policy
+// of the system.
+bool AudioPolicyManager::isOffloadSupported(const audio_offload_info_t& offloadInfo)
+{
+    ALOGV("isOffloadSupported: SR=%u, CM=0x%x, Format=0x%x, StreamType=%d,"
+     " BitRate=%u, duration=%lld us, has_video=%d",
+     offloadInfo.sample_rate, offloadInfo.channel_mask,
+     offloadInfo.format,
+     offloadInfo.stream_type, offloadInfo.bit_rate, offloadInfo.duration_us,
+     offloadInfo.has_video);
+
+    // Check if offload has been disabled
+    char propValue[PROPERTY_VALUE_MAX];
+    if (property_get("audio.offload.disable", propValue, "0")) {
+        if (atoi(propValue) != 0) {
+            ALOGV("offload disabled by audio.offload.disable=%s", propValue );
+            return false;
+        }
+    }
+
+    // Check if stream type is music, then only allow offload as of now.
+    if (offloadInfo.stream_type != AUDIO_STREAM_MUSIC)
+    {
+        ALOGV("isOffloadSupported: stream_type != MUSIC, returning false");
+        return false;
+    }
+
+    //TODO: enable audio offloading with video when ready
+    if (offloadInfo.has_video)
+    {
+        ALOGV("isOffloadSupported: has_video == true, returning false");
+        return false;
+    }
+
+    //If duration is less than minimum value defined in property, return false
+    if (property_get("audio.offload.min.duration.secs", propValue, NULL)) {
+        if (offloadInfo.duration_us < (atoi(propValue) * 1000000 )) {
+            ALOGV("Offload denied by duration < audio.offload.min.duration.secs(=%s)", propValue);
+            return false;
+        }
+    } else if (offloadInfo.duration_us < OFFLOAD_DEFAULT_MIN_DURATION_SECS * 1000000) {
+        ALOGV("Offload denied by duration < default min(=%u)", OFFLOAD_DEFAULT_MIN_DURATION_SECS);
+        return false;
+    }
+
+    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
+    // creating an offloaded track and tearing it down immediately after start when audioflinger
+    // detects there is an active non offloadable effect.
+    // FIXME: We should check the audio session here but we do not have it in this context.
+    // This may prevent offloading in rare situations where effects are left active by apps
+    // in the background.
+    if (isNonOffloadableEffectEnabled()) {
+        return false;
+    }
+
+    // See if there is a profile to support this.
+    // AUDIO_DEVICE_NONE
+    IOProfile *profile = getProfileForDirectOutput(AUDIO_DEVICE_NONE /*ignore device */,
+                                            offloadInfo.sample_rate,
+                                            offloadInfo.format,
+                                            offloadInfo.channel_mask,
+                                            AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+    ALOGV("isOffloadSupported() profile %sfound", profile != NULL ? "" : "NOT ");
+    return (profile != NULL);
+}
+
+// ----------------------------------------------------------------------------
+// AudioPolicyManager
+// ----------------------------------------------------------------------------
+
+uint32_t AudioPolicyManager::nextUniqueId()
+{
+    return android_atomic_inc(&mNextUniqueId);
+}
+
+AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
+    :
+#ifdef AUDIO_POLICY_TEST
+    Thread(false),
+#endif //AUDIO_POLICY_TEST
+    mPrimaryOutput((audio_io_handle_t)0),
+    mPhoneState(AUDIO_MODE_NORMAL),
+    mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
+    mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),
+    mA2dpSuspended(false),
+    mSpeakerDrcEnabled(false), mNextUniqueId(0)
+{
+    mpClientInterface = clientInterface;
+
+    for (int i = 0; i < AUDIO_POLICY_FORCE_USE_CNT; i++) {
+        mForceUse[i] = AUDIO_POLICY_FORCE_NONE;
+    }
+
+    mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
+    if (loadAudioPolicyConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE) != NO_ERROR) {
+        if (loadAudioPolicyConfig(AUDIO_POLICY_CONFIG_FILE) != NO_ERROR) {
+            ALOGE("could not load audio policy configuration file, setting defaults");
+            defaultAudioPolicyConfig();
+        }
+    }
+    // mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
+
+    // must be done after reading the policy
+    initializeVolumeCurves();
+
+    // open all output streams needed to access attached devices
+    audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
+    audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
+    for (size_t i = 0; i < mHwModules.size(); i++) {
+        mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->mName);
+        if (mHwModules[i]->mHandle == 0) {
+            ALOGW("could not open HW module %s", mHwModules[i]->mName);
+            continue;
+        }
+        // open all output streams needed to access attached devices
+        // except for direct output streams that are only opened when they are actually
+        // required by an app.
+        // This also validates mAvailableOutputDevices list
+        for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
+        {
+            const IOProfile *outProfile = mHwModules[i]->mOutputProfiles[j];
+
+            if (outProfile->mSupportedDevices.isEmpty()) {
+                ALOGW("Output profile contains no device on module %s", mHwModules[i]->mName);
+                continue;
+            }
+
+            audio_devices_t profileTypes = outProfile->mSupportedDevices.types();
+            if ((profileTypes & outputDeviceTypes) &&
+                    ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0)) {
+                AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor(outProfile);
+
+                outputDesc->mDevice = (audio_devices_t)(mDefaultOutputDevice->mType & profileTypes);
+                audio_io_handle_t output = mpClientInterface->openOutput(
+                                                outProfile->mModule->mHandle,
+                                                &outputDesc->mDevice,
+                                                &outputDesc->mSamplingRate,
+                                                &outputDesc->mFormat,
+                                                &outputDesc->mChannelMask,
+                                                &outputDesc->mLatency,
+                                                outputDesc->mFlags);
+                if (output == 0) {
+                    ALOGW("Cannot open output stream for device %08x on hw module %s",
+                          outputDesc->mDevice,
+                          mHwModules[i]->mName);
+                    delete outputDesc;
+                } else {
+                    for (size_t i = 0; i  < outProfile->mSupportedDevices.size(); i++) {
+                        audio_devices_t type = outProfile->mSupportedDevices[i]->mType;
+                        ssize_t index =
+                                mAvailableOutputDevices.indexOf(outProfile->mSupportedDevices[i]);
+                        // give a valid ID to an attached device once confirmed it is reachable
+                        if ((index >= 0) && (mAvailableOutputDevices[index]->mId == 0)) {
+                            mAvailableOutputDevices[index]->mId = nextUniqueId();
+                        }
+                    }
+                    if (mPrimaryOutput == 0 &&
+                            outProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
+                        mPrimaryOutput = output;
+                    }
+                    addOutput(output, outputDesc);
+                    setOutputDevice(output,
+                                    outputDesc->mDevice,
+                                    true);
+                }
+            }
+        }
+        // open input streams needed to access attached devices to validate
+        // mAvailableInputDevices list
+        for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
+        {
+            const IOProfile *inProfile = mHwModules[i]->mInputProfiles[j];
+
+            if (inProfile->mSupportedDevices.isEmpty()) {
+                ALOGW("Input profile contains no device on module %s", mHwModules[i]->mName);
+                continue;
+            }
+
+            audio_devices_t profileTypes = inProfile->mSupportedDevices.types();
+            if (profileTypes & inputDeviceTypes) {
+                AudioInputDescriptor *inputDesc = new AudioInputDescriptor(inProfile);
+
+                inputDesc->mInputSource = AUDIO_SOURCE_MIC;
+                inputDesc->mDevice = inProfile->mSupportedDevices[0]->mType;
+                audio_io_handle_t input = mpClientInterface->openInput(
+                                                    inProfile->mModule->mHandle,
+                                                    &inputDesc->mDevice,
+                                                    &inputDesc->mSamplingRate,
+                                                    &inputDesc->mFormat,
+                                                    &inputDesc->mChannelMask);
+
+                if (input != 0) {
+                    for (size_t i = 0; i  < inProfile->mSupportedDevices.size(); i++) {
+                        audio_devices_t type = inProfile->mSupportedDevices[i]->mType;
+                        ssize_t index =
+                                mAvailableInputDevices.indexOf(inProfile->mSupportedDevices[i]);
+                        // give a valid ID to an attached device once confirmed it is reachable
+                        if ((index >= 0) && (mAvailableInputDevices[index]->mId == 0)) {
+                            mAvailableInputDevices[index]->mId = nextUniqueId();
+                        }
+                    }
+                    mpClientInterface->closeInput(input);
+                } else {
+                    ALOGW("Cannot open input stream for device %08x on hw module %s",
+                          inputDesc->mDevice,
+                          mHwModules[i]->mName);
+                }
+                delete inputDesc;
+            }
+        }
+    }
+    // make sure all attached devices have been allocated a unique ID
+    for (size_t i = 0; i  < mAvailableOutputDevices.size();) {
+        if (mAvailableOutputDevices[i]->mId == 0) {
+            ALOGW("Input device %08x unreachable", mAvailableOutputDevices[i]->mType);
+            mAvailableOutputDevices.remove(mAvailableOutputDevices[i]);
+            continue;
+        }
+        i++;
+    }
+    for (size_t i = 0; i  < mAvailableInputDevices.size();) {
+        if (mAvailableInputDevices[i]->mId == 0) {
+            ALOGW("Input device %08x unreachable", mAvailableInputDevices[i]->mType);
+            mAvailableInputDevices.remove(mAvailableInputDevices[i]);
+            continue;
+        }
+        i++;
+    }
+    // make sure default device is reachable
+    if (mAvailableOutputDevices.indexOf(mDefaultOutputDevice) < 0) {
+        ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->mType);
+    }
+
+    ALOGE_IF((mPrimaryOutput == 0), "Failed to open primary output");
+
+    updateDevicesAndOutputs();
+
+#ifdef AUDIO_POLICY_TEST
+    if (mPrimaryOutput != 0) {
+        AudioParameter outputCmd = AudioParameter();
+        outputCmd.addInt(String8("set_id"), 0);
+        mpClientInterface->setParameters(mPrimaryOutput, outputCmd.toString());
+
+        mTestDevice = AUDIO_DEVICE_OUT_SPEAKER;
+        mTestSamplingRate = 44100;
+        mTestFormat = AUDIO_FORMAT_PCM_16_BIT;
+        mTestChannels =  AUDIO_CHANNEL_OUT_STEREO;
+        mTestLatencyMs = 0;
+        mCurOutput = 0;
+        mDirectOutput = false;
+        for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
+            mTestOutputs[i] = 0;
+        }
+
+        const size_t SIZE = 256;
+        char buffer[SIZE];
+        snprintf(buffer, SIZE, "AudioPolicyManagerTest");
+        run(buffer, ANDROID_PRIORITY_AUDIO);
+    }
+#endif //AUDIO_POLICY_TEST
+}
+
+AudioPolicyManager::~AudioPolicyManager()
+{
+#ifdef AUDIO_POLICY_TEST
+    exit();
+#endif //AUDIO_POLICY_TEST
+   for (size_t i = 0; i < mOutputs.size(); i++) {
+        mpClientInterface->closeOutput(mOutputs.keyAt(i));
+        delete mOutputs.valueAt(i);
+   }
+   for (size_t i = 0; i < mInputs.size(); i++) {
+        mpClientInterface->closeInput(mInputs.keyAt(i));
+        delete mInputs.valueAt(i);
+   }
+   for (size_t i = 0; i < mHwModules.size(); i++) {
+        delete mHwModules[i];
+   }
+   mAvailableOutputDevices.clear();
+   mAvailableInputDevices.clear();
+}
+
+status_t AudioPolicyManager::initCheck()
+{
+    return (mPrimaryOutput == 0) ? NO_INIT : NO_ERROR;
+}
+
+#ifdef AUDIO_POLICY_TEST
+bool AudioPolicyManager::threadLoop()
+{
+    ALOGV("entering threadLoop()");
+    while (!exitPending())
+    {
+        String8 command;
+        int valueInt;
+        String8 value;
+
+        Mutex::Autolock _l(mLock);
+        mWaitWorkCV.waitRelative(mLock, milliseconds(50));
+
+        command = mpClientInterface->getParameters(0, String8("test_cmd_policy"));
+        AudioParameter param = AudioParameter(command);
+
+        if (param.getInt(String8("test_cmd_policy"), valueInt) == NO_ERROR &&
+            valueInt != 0) {
+            ALOGV("Test command %s received", command.string());
+            String8 target;
+            if (param.get(String8("target"), target) != NO_ERROR) {
+                target = "Manager";
+            }
+            if (param.getInt(String8("test_cmd_policy_output"), valueInt) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_output"));
+                mCurOutput = valueInt;
+            }
+            if (param.get(String8("test_cmd_policy_direct"), value) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_direct"));
+                if (value == "false") {
+                    mDirectOutput = false;
+                } else if (value == "true") {
+                    mDirectOutput = true;
+                }
+            }
+            if (param.getInt(String8("test_cmd_policy_input"), valueInt) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_input"));
+                mTestInput = valueInt;
+            }
+
+            if (param.get(String8("test_cmd_policy_format"), value) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_format"));
+                int format = AUDIO_FORMAT_INVALID;
+                if (value == "PCM 16 bits") {
+                    format = AUDIO_FORMAT_PCM_16_BIT;
+                } else if (value == "PCM 8 bits") {
+                    format = AUDIO_FORMAT_PCM_8_BIT;
+                } else if (value == "Compressed MP3") {
+                    format = AUDIO_FORMAT_MP3;
+                }
+                if (format != AUDIO_FORMAT_INVALID) {
+                    if (target == "Manager") {
+                        mTestFormat = format;
+                    } else if (mTestOutputs[mCurOutput] != 0) {
+                        AudioParameter outputParam = AudioParameter();
+                        outputParam.addInt(String8("format"), format);
+                        mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
+                    }
+                }
+            }
+            if (param.get(String8("test_cmd_policy_channels"), value) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_channels"));
+                int channels = 0;
+
+                if (value == "Channels Stereo") {
+                    channels =  AUDIO_CHANNEL_OUT_STEREO;
+                } else if (value == "Channels Mono") {
+                    channels =  AUDIO_CHANNEL_OUT_MONO;
+                }
+                if (channels != 0) {
+                    if (target == "Manager") {
+                        mTestChannels = channels;
+                    } else if (mTestOutputs[mCurOutput] != 0) {
+                        AudioParameter outputParam = AudioParameter();
+                        outputParam.addInt(String8("channels"), channels);
+                        mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
+                    }
+                }
+            }
+            if (param.getInt(String8("test_cmd_policy_sampleRate"), valueInt) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_sampleRate"));
+                if (valueInt >= 0 && valueInt <= 96000) {
+                    int samplingRate = valueInt;
+                    if (target == "Manager") {
+                        mTestSamplingRate = samplingRate;
+                    } else if (mTestOutputs[mCurOutput] != 0) {
+                        AudioParameter outputParam = AudioParameter();
+                        outputParam.addInt(String8("sampling_rate"), samplingRate);
+                        mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
+                    }
+                }
+            }
+
+            if (param.get(String8("test_cmd_policy_reopen"), value) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_reopen"));
+
+                AudioOutputDescriptor *outputDesc = mOutputs.valueFor(mPrimaryOutput);
+                mpClientInterface->closeOutput(mPrimaryOutput);
+
+                audio_module_handle_t moduleHandle = outputDesc->mModule->mHandle;
+
+                delete mOutputs.valueFor(mPrimaryOutput);
+                mOutputs.removeItem(mPrimaryOutput);
+
+                AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor(NULL);
+                outputDesc->mDevice = AUDIO_DEVICE_OUT_SPEAKER;
+                mPrimaryOutput = mpClientInterface->openOutput(moduleHandle,
+                                                &outputDesc->mDevice,
+                                                &outputDesc->mSamplingRate,
+                                                &outputDesc->mFormat,
+                                                &outputDesc->mChannelMask,
+                                                &outputDesc->mLatency,
+                                                outputDesc->mFlags);
+                if (mPrimaryOutput == 0) {
+                    ALOGE("Failed to reopen hardware output stream, samplingRate: %d, format %d, channels %d",
+                            outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannelMask);
+                } else {
+                    AudioParameter outputCmd = AudioParameter();
+                    outputCmd.addInt(String8("set_id"), 0);
+                    mpClientInterface->setParameters(mPrimaryOutput, outputCmd.toString());
+                    addOutput(mPrimaryOutput, outputDesc);
+                }
+            }
+
+
+            mpClientInterface->setParameters(0, String8("test_cmd_policy="));
+        }
+    }
+    return false;
+}
+
+void AudioPolicyManager::exit()
+{
+    {
+        AutoMutex _l(mLock);
+        requestExit();
+        mWaitWorkCV.signal();
+    }
+    requestExitAndWait();
+}
+
+int AudioPolicyManager::testOutputIndex(audio_io_handle_t output)
+{
+    for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
+        if (output == mTestOutputs[i]) return i;
+    }
+    return 0;
+}
+#endif //AUDIO_POLICY_TEST
+
+// ---
+
+void AudioPolicyManager::addOutput(audio_io_handle_t id, AudioOutputDescriptor *outputDesc)
+{
+    outputDesc->mId = id;
+    mOutputs.add(id, outputDesc);
+}
+
+
+String8 AudioPolicyManager::addressToParameter(audio_devices_t device, const String8 address)
+{
+    if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
+        return String8("a2dp_sink_address=")+address;
+    }
+    return address;
+}
+
+status_t AudioPolicyManager::checkOutputsForDevice(audio_devices_t device,
+                                                       audio_policy_dev_state_t state,
+                                                       SortedVector<audio_io_handle_t>& outputs,
+                                                       const String8 address)
+{
+    AudioOutputDescriptor *desc;
+
+    if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
+        // first list already open outputs that can be routed to this device
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            desc = mOutputs.valueAt(i);
+            if (!desc->isDuplicated() && (desc->mProfile->mSupportedDevices.types() & device)) {
+                ALOGV("checkOutputsForDevice(): adding opened output %d", mOutputs.keyAt(i));
+                outputs.add(mOutputs.keyAt(i));
+            }
+        }
+        // then look for output profiles that can be routed to this device
+        SortedVector<IOProfile *> profiles;
+        for (size_t i = 0; i < mHwModules.size(); i++)
+        {
+            if (mHwModules[i]->mHandle == 0) {
+                continue;
+            }
+            for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
+            {
+                if (mHwModules[i]->mOutputProfiles[j]->mSupportedDevices.types() & device) {
+                    ALOGV("checkOutputsForDevice(): adding profile %d from module %d", j, i);
+                    profiles.add(mHwModules[i]->mOutputProfiles[j]);
+                }
+            }
+        }
+
+        if (profiles.isEmpty() && outputs.isEmpty()) {
+            ALOGW("checkOutputsForDevice(): No output available for device %04x", device);
+            return BAD_VALUE;
+        }
+
+        // open outputs for matching profiles if needed. Direct outputs are also opened to
+        // query for dynamic parameters and will be closed later by setDeviceConnectionState()
+        for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) {
+            IOProfile *profile = profiles[profile_index];
+
+            // nothing to do if one output is already opened for this profile
+            size_t j;
+            for (j = 0; j < mOutputs.size(); j++) {
+                desc = mOutputs.valueAt(j);
+                if (!desc->isDuplicated() && desc->mProfile == profile) {
+                    break;
+                }
+            }
+            if (j != mOutputs.size()) {
+                continue;
+            }
+
+            ALOGV("opening output for device %08x with params %s", device, address.string());
+            desc = new AudioOutputDescriptor(profile);
+            desc->mDevice = device;
+            audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+            offloadInfo.sample_rate = desc->mSamplingRate;
+            offloadInfo.format = desc->mFormat;
+            offloadInfo.channel_mask = desc->mChannelMask;
+
+            audio_io_handle_t output = mpClientInterface->openOutput(profile->mModule->mHandle,
+                                                                       &desc->mDevice,
+                                                                       &desc->mSamplingRate,
+                                                                       &desc->mFormat,
+                                                                       &desc->mChannelMask,
+                                                                       &desc->mLatency,
+                                                                       desc->mFlags,
+                                                                       &offloadInfo);
+            if (output != 0) {
+                if (!address.isEmpty()) {
+                    mpClientInterface->setParameters(output, addressToParameter(device, address));
+                }
+
+                if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+                    String8 reply;
+                    char *value;
+                    if (profile->mSamplingRates[0] == 0) {
+                        reply = mpClientInterface->getParameters(output,
+                                                String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES));
+                        ALOGV("checkOutputsForDevice() direct output sup sampling rates %s",
+                                  reply.string());
+                        value = strpbrk((char *)reply.string(), "=");
+                        if (value != NULL) {
+                            loadSamplingRates(value + 1, profile);
+                        }
+                    }
+                    if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
+                        reply = mpClientInterface->getParameters(output,
+                                                       String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
+                        ALOGV("checkOutputsForDevice() direct output sup formats %s",
+                                  reply.string());
+                        value = strpbrk((char *)reply.string(), "=");
+                        if (value != NULL) {
+                            loadFormats(value + 1, profile);
+                        }
+                    }
+                    if (profile->mChannelMasks[0] == 0) {
+                        reply = mpClientInterface->getParameters(output,
+                                                      String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS));
+                        ALOGV("checkOutputsForDevice() direct output sup channel masks %s",
+                                  reply.string());
+                        value = strpbrk((char *)reply.string(), "=");
+                        if (value != NULL) {
+                            loadOutChannels(value + 1, profile);
+                        }
+                    }
+                    if (((profile->mSamplingRates[0] == 0) &&
+                             (profile->mSamplingRates.size() < 2)) ||
+                         ((profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) &&
+                             (profile->mFormats.size() < 2)) ||
+                         ((profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) &&
+                             (profile->mChannelMasks.size() < 2))) {
+                        ALOGW("checkOutputsForDevice() direct output missing param");
+                        mpClientInterface->closeOutput(output);
+                        output = 0;
+                    } else {
+                        addOutput(output, desc);
+                    }
+                } else {
+                    audio_io_handle_t duplicatedOutput = 0;
+                    // add output descriptor
+                    addOutput(output, desc);
+                    // set initial stream volume for device
+                    applyStreamVolumes(output, device, 0, true);
+
+                    //TODO: configure audio effect output stage here
+
+                    // open a duplicating output thread for the new output and the primary output
+                    duplicatedOutput = mpClientInterface->openDuplicateOutput(output,
+                                                                              mPrimaryOutput);
+                    if (duplicatedOutput != 0) {
+                        // add duplicated output descriptor
+                        AudioOutputDescriptor *dupOutputDesc = new AudioOutputDescriptor(NULL);
+                        dupOutputDesc->mOutput1 = mOutputs.valueFor(mPrimaryOutput);
+                        dupOutputDesc->mOutput2 = mOutputs.valueFor(output);
+                        dupOutputDesc->mSamplingRate = desc->mSamplingRate;
+                        dupOutputDesc->mFormat = desc->mFormat;
+                        dupOutputDesc->mChannelMask = desc->mChannelMask;
+                        dupOutputDesc->mLatency = desc->mLatency;
+                        addOutput(duplicatedOutput, dupOutputDesc);
+                        applyStreamVolumes(duplicatedOutput, device, 0, true);
+                    } else {
+                        ALOGW("checkOutputsForDevice() could not open dup output for %d and %d",
+                                mPrimaryOutput, output);
+                        mpClientInterface->closeOutput(output);
+                        mOutputs.removeItem(output);
+                        output = 0;
+                    }
+                }
+            }
+            if (output == 0) {
+                ALOGW("checkOutputsForDevice() could not open output for device %x", device);
+                delete desc;
+                profiles.removeAt(profile_index);
+                profile_index--;
+            } else {
+                outputs.add(output);
+                ALOGV("checkOutputsForDevice(): adding output %d", output);
+            }
+        }
+
+        if (profiles.isEmpty()) {
+            ALOGW("checkOutputsForDevice(): No output available for device %04x", device);
+            return BAD_VALUE;
+        }
+    } else {
+        // check if one opened output is not needed any more after disconnecting one device
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            desc = mOutputs.valueAt(i);
+            if (!desc->isDuplicated() &&
+                    !(desc->mProfile->mSupportedDevices.types() &
+                            mAvailableOutputDevices.types())) {
+                ALOGV("checkOutputsForDevice(): disconnecting adding output %d", mOutputs.keyAt(i));
+                outputs.add(mOutputs.keyAt(i));
+            }
+        }
+        for (size_t i = 0; i < mHwModules.size(); i++)
+        {
+            if (mHwModules[i]->mHandle == 0) {
+                continue;
+            }
+            for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
+            {
+                IOProfile *profile = mHwModules[i]->mOutputProfiles[j];
+                if ((profile->mSupportedDevices.types() & device) &&
+                        (profile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
+                    ALOGV("checkOutputsForDevice(): clearing direct output profile %d on module %d",
+                          j, i);
+                    if (profile->mSamplingRates[0] == 0) {
+                        profile->mSamplingRates.clear();
+                        profile->mSamplingRates.add(0);
+                    }
+                    if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
+                        profile->mFormats.clear();
+                        profile->mFormats.add(AUDIO_FORMAT_DEFAULT);
+                    }
+                    if (profile->mChannelMasks[0] == 0) {
+                        profile->mChannelMasks.clear();
+                        profile->mChannelMasks.add(0);
+                    }
+                }
+            }
+        }
+    }
+    return NO_ERROR;
+}
+
+void AudioPolicyManager::closeOutput(audio_io_handle_t output)
+{
+    ALOGV("closeOutput(%d)", output);
+
+    AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
+    if (outputDesc == NULL) {
+        ALOGW("closeOutput() unknown output %d", output);
+        return;
+    }
+
+    // look for duplicated outputs connected to the output being removed.
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        AudioOutputDescriptor *dupOutputDesc = mOutputs.valueAt(i);
+        if (dupOutputDesc->isDuplicated() &&
+                (dupOutputDesc->mOutput1 == outputDesc ||
+                dupOutputDesc->mOutput2 == outputDesc)) {
+            AudioOutputDescriptor *outputDesc2;
+            if (dupOutputDesc->mOutput1 == outputDesc) {
+                outputDesc2 = dupOutputDesc->mOutput2;
+            } else {
+                outputDesc2 = dupOutputDesc->mOutput1;
+            }
+            // As all active tracks on duplicated output will be deleted,
+            // and as they were also referenced on the other output, the reference
+            // count for their stream type must be adjusted accordingly on
+            // the other output.
+            for (int j = 0; j < AUDIO_STREAM_CNT; j++) {
+                int refCount = dupOutputDesc->mRefCount[j];
+                outputDesc2->changeRefCount((audio_stream_type_t)j,-refCount);
+            }
+            audio_io_handle_t duplicatedOutput = mOutputs.keyAt(i);
+            ALOGV("closeOutput() closing also duplicated output %d", duplicatedOutput);
+
+            mpClientInterface->closeOutput(duplicatedOutput);
+            delete mOutputs.valueFor(duplicatedOutput);
+            mOutputs.removeItem(duplicatedOutput);
+        }
+    }
+
+    AudioParameter param;
+    param.add(String8("closing"), String8("true"));
+    mpClientInterface->setParameters(output, param.toString());
+
+    mpClientInterface->closeOutput(output);
+    delete outputDesc;
+    mOutputs.removeItem(output);
+    mPreviousOutputs = mOutputs;
+}
+
+SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(audio_devices_t device,
+                        DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> openOutputs)
+{
+    SortedVector<audio_io_handle_t> outputs;
+
+    ALOGVV("getOutputsForDevice() device %04x", device);
+    for (size_t i = 0; i < openOutputs.size(); i++) {
+        ALOGVV("output %d isDuplicated=%d device=%04x",
+                i, openOutputs.valueAt(i)->isDuplicated(), openOutputs.valueAt(i)->supportedDevices());
+        if ((device & openOutputs.valueAt(i)->supportedDevices()) == device) {
+            ALOGVV("getOutputsForDevice() found output %d", openOutputs.keyAt(i));
+            outputs.add(openOutputs.keyAt(i));
+        }
+    }
+    return outputs;
+}
+
+bool AudioPolicyManager::vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
+                                   SortedVector<audio_io_handle_t>& outputs2)
+{
+    if (outputs1.size() != outputs2.size()) {
+        return false;
+    }
+    for (size_t i = 0; i < outputs1.size(); i++) {
+        if (outputs1[i] != outputs2[i]) {
+            return false;
+        }
+    }
+    return true;
+}
+
+void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
+{
+    audio_devices_t oldDevice = getDeviceForStrategy(strategy, true /*fromCache*/);
+    audio_devices_t newDevice = getDeviceForStrategy(strategy, false /*fromCache*/);
+    SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevice(oldDevice, mPreviousOutputs);
+    SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(newDevice, mOutputs);
+
+    if (!vectorsEqual(srcOutputs,dstOutputs)) {
+        ALOGV("checkOutputForStrategy() strategy %d, moving from output %d to output %d",
+              strategy, srcOutputs[0], dstOutputs[0]);
+        // mute strategy while moving tracks from one output to another
+        for (size_t i = 0; i < srcOutputs.size(); i++) {
+            AudioOutputDescriptor *desc = mOutputs.valueFor(srcOutputs[i]);
+            if (desc->isStrategyActive(strategy)) {
+                setStrategyMute(strategy, true, srcOutputs[i]);
+                setStrategyMute(strategy, false, srcOutputs[i], MUTE_TIME_MS, newDevice);
+            }
+        }
+
+        // Move effects associated to this strategy from previous output to new output
+        if (strategy == STRATEGY_MEDIA) {
+            audio_io_handle_t fxOutput = selectOutputForEffects(dstOutputs);
+            SortedVector<audio_io_handle_t> moved;
+            for (size_t i = 0; i < mEffects.size(); i++) {
+                EffectDescriptor *desc = mEffects.valueAt(i);
+                if (desc->mSession == AUDIO_SESSION_OUTPUT_MIX &&
+                        desc->mIo != fxOutput) {
+                    if (moved.indexOf(desc->mIo) < 0) {
+                        ALOGV("checkOutputForStrategy() moving effect %d to output %d",
+                              mEffects.keyAt(i), fxOutput);
+                        mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, desc->mIo,
+                                                       fxOutput);
+                        moved.add(desc->mIo);
+                    }
+                    desc->mIo = fxOutput;
+                }
+            }
+        }
+        // Move tracks associated to this strategy from previous output to new output
+        for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
+            if (getStrategy((audio_stream_type_t)i) == strategy) {
+                mpClientInterface->invalidateStream((audio_stream_type_t)i);
+            }
+        }
+    }
+}
+
+void AudioPolicyManager::checkOutputForAllStrategies()
+{
+    checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
+    checkOutputForStrategy(STRATEGY_PHONE);
+    checkOutputForStrategy(STRATEGY_SONIFICATION);
+    checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
+    checkOutputForStrategy(STRATEGY_MEDIA);
+    checkOutputForStrategy(STRATEGY_DTMF);
+}
+
+audio_io_handle_t AudioPolicyManager::getA2dpOutput()
+{
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i);
+        if (!outputDesc->isDuplicated() && outputDesc->device() & AUDIO_DEVICE_OUT_ALL_A2DP) {
+            return mOutputs.keyAt(i);
+        }
+    }
+
+    return 0;
+}
+
+void AudioPolicyManager::checkA2dpSuspend()
+{
+    audio_io_handle_t a2dpOutput = getA2dpOutput();
+    if (a2dpOutput == 0) {
+        mA2dpSuspended = false;
+        return;
+    }
+
+    bool isScoConnected =
+            (mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) != 0;
+    // suspend A2DP output if:
+    //      (NOT already suspended) &&
+    //      ((SCO device is connected &&
+    //       (forced usage for communication || for record is SCO))) ||
+    //      (phone state is ringing || in call)
+    //
+    // restore A2DP output if:
+    //      (Already suspended) &&
+    //      ((SCO device is NOT connected ||
+    //       (forced usage NOT for communication && NOT for record is SCO))) &&
+    //      (phone state is NOT ringing && NOT in call)
+    //
+    if (mA2dpSuspended) {
+        if ((!isScoConnected ||
+             ((mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] != AUDIO_POLICY_FORCE_BT_SCO) &&
+              (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] != AUDIO_POLICY_FORCE_BT_SCO))) &&
+             ((mPhoneState != AUDIO_MODE_IN_CALL) &&
+              (mPhoneState != AUDIO_MODE_RINGTONE))) {
+
+            mpClientInterface->restoreOutput(a2dpOutput);
+            mA2dpSuspended = false;
+        }
+    } else {
+        if ((isScoConnected &&
+             ((mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) ||
+              (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO))) ||
+             ((mPhoneState == AUDIO_MODE_IN_CALL) ||
+              (mPhoneState == AUDIO_MODE_RINGTONE))) {
+
+            mpClientInterface->suspendOutput(a2dpOutput);
+            mA2dpSuspended = true;
+        }
+    }
+}
+
+audio_devices_t AudioPolicyManager::getNewDevice(audio_io_handle_t output, bool fromCache)
+{
+    audio_devices_t device = AUDIO_DEVICE_NONE;
+
+    AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
+    // check the following by order of priority to request a routing change if necessary:
+    // 1: the strategy enforced audible is active on the output:
+    //      use device for strategy enforced audible
+    // 2: we are in call or the strategy phone is active on the output:
+    //      use device for strategy phone
+    // 3: the strategy sonification is active on the output:
+    //      use device for strategy sonification
+    // 4: the strategy "respectful" sonification is active on the output:
+    //      use device for strategy "respectful" sonification
+    // 5: the strategy media is active on the output:
+    //      use device for strategy media
+    // 6: the strategy DTMF is active on the output:
+    //      use device for strategy DTMF
+    if (outputDesc->isStrategyActive(STRATEGY_ENFORCED_AUDIBLE)) {
+        device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
+    } else if (isInCall() ||
+                    outputDesc->isStrategyActive(STRATEGY_PHONE)) {
+        device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
+    } else if (outputDesc->isStrategyActive(STRATEGY_SONIFICATION)) {
+        device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
+    } else if (outputDesc->isStrategyActive(STRATEGY_SONIFICATION_RESPECTFUL)) {
+        device = getDeviceForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
+    } else if (outputDesc->isStrategyActive(STRATEGY_MEDIA)) {
+        device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
+    } else if (outputDesc->isStrategyActive(STRATEGY_DTMF)) {
+        device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
+    }
+
+    ALOGV("getNewDevice() selected device %x", device);
+    return device;
+}
+
+uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
+    return (uint32_t)getStrategy(stream);
+}
+
+audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
+    audio_devices_t devices;
+    // By checking the range of stream before calling getStrategy, we avoid
+    // getStrategy's behavior for invalid streams.  getStrategy would do a ALOGE
+    // and then return STRATEGY_MEDIA, but we want to return the empty set.
+    if (stream < (audio_stream_type_t) 0 || stream >= AUDIO_STREAM_CNT) {
+        devices = AUDIO_DEVICE_NONE;
+    } else {
+        AudioPolicyManager::routing_strategy strategy = getStrategy(stream);
+        devices = getDeviceForStrategy(strategy, true /*fromCache*/);
+    }
+    return devices;
+}
+
+AudioPolicyManager::routing_strategy AudioPolicyManager::getStrategy(
+        audio_stream_type_t stream) {
+    // stream to strategy mapping
+    switch (stream) {
+    case AUDIO_STREAM_VOICE_CALL:
+    case AUDIO_STREAM_BLUETOOTH_SCO:
+        return STRATEGY_PHONE;
+    case AUDIO_STREAM_RING:
+    case AUDIO_STREAM_ALARM:
+        return STRATEGY_SONIFICATION;
+    case AUDIO_STREAM_NOTIFICATION:
+        return STRATEGY_SONIFICATION_RESPECTFUL;
+    case AUDIO_STREAM_DTMF:
+        return STRATEGY_DTMF;
+    default:
+        ALOGE("unknown stream type");
+    case AUDIO_STREAM_SYSTEM:
+        // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
+        // while key clicks are played produces a poor result
+    case AUDIO_STREAM_TTS:
+    case AUDIO_STREAM_MUSIC:
+        return STRATEGY_MEDIA;
+    case AUDIO_STREAM_ENFORCED_AUDIBLE:
+        return STRATEGY_ENFORCED_AUDIBLE;
+    }
+}
+
+void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) {
+    switch(stream) {
+    case AUDIO_STREAM_MUSIC:
+        checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
+        updateDevicesAndOutputs();
+        break;
+    default:
+        break;
+    }
+}
+
+audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
+                                                             bool fromCache)
+{
+    uint32_t device = AUDIO_DEVICE_NONE;
+
+    if (fromCache) {
+        ALOGVV("getDeviceForStrategy() from cache strategy %d, device %x",
+              strategy, mDeviceForStrategy[strategy]);
+        return mDeviceForStrategy[strategy];
+    }
+    audio_devices_t availableOutputDeviceTypes = mAvailableOutputDevices.types();
+    switch (strategy) {
+
+    case STRATEGY_SONIFICATION_RESPECTFUL:
+        if (isInCall()) {
+            device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
+        } else if (isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
+                SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+            // while media is playing on a remote device, use the the sonification behavior.
+            // Note that we test this usecase before testing if media is playing because
+            //   the isStreamActive() method only informs about the activity of a stream, not
+            //   if it's for local playback. Note also that we use the same delay between both tests
+            device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
+        } else if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+            // while media is playing (or has recently played), use the same device
+            device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
+        } else {
+            // when media is not playing anymore, fall back on the sonification behavior
+            device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
+        }
+
+        break;
+
+    case STRATEGY_DTMF:
+        if (!isInCall()) {
+            // when off call, DTMF strategy follows the same rules as MEDIA strategy
+            device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
+            break;
+        }
+        // when in call, DTMF and PHONE strategies follow the same rules
+        // FALL THROUGH
+
+    case STRATEGY_PHONE:
+        // for phone strategy, we first consider the forced use and then the available devices by order
+        // of priority
+        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+        case AUDIO_POLICY_FORCE_BT_SCO:
+            if (!isInCall() || strategy != STRATEGY_DTMF) {
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
+                if (device) break;
+            }
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
+            if (device) break;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
+            if (device) break;
+            // if SCO device is requested but no SCO device is available, fall back to default case
+            // FALL THROUGH
+
+        default:    // FORCE_NONE
+            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
+            if (!isInCall() &&
+                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                    (getA2dpOutput() != 0) && !mA2dpSuspended) {
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
+                if (device) break;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+                if (device) break;
+            }
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+            if (device) break;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
+            if (device) break;
+            if (mPhoneState != AUDIO_MODE_IN_CALL) {
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+                if (device) break;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
+                if (device) break;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+                if (device) break;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+                if (device) break;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+                if (device) break;
+            }
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_EARPIECE;
+            if (device) break;
+            device = mDefaultOutputDevice->mType;
+            if (device == AUDIO_DEVICE_NONE) {
+                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE");
+            }
+            break;
+
+        case AUDIO_POLICY_FORCE_SPEAKER:
+            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
+            // A2DP speaker when forcing to speaker output
+            if (!isInCall() &&
+                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                    (getA2dpOutput() != 0) && !mA2dpSuspended) {
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+                if (device) break;
+            }
+            if (mPhoneState != AUDIO_MODE_IN_CALL) {
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+                if (device) break;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
+                if (device) break;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+                if (device) break;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+                if (device) break;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+                if (device) break;
+            }
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
+            if (device) break;
+            device = mDefaultOutputDevice->mType;
+            if (device == AUDIO_DEVICE_NONE) {
+                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER");
+            }
+            break;
+        }
+    break;
+
+    case STRATEGY_SONIFICATION:
+
+        // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
+        // handleIncallSonification().
+        if (isInCall()) {
+            device = getDeviceForStrategy(STRATEGY_PHONE, false /*fromCache*/);
+            break;
+        }
+        // FALL THROUGH
+
+    case STRATEGY_ENFORCED_AUDIBLE:
+        // strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION
+        // except:
+        //   - when in call where it doesn't default to STRATEGY_PHONE behavior
+        //   - in countries where not enforced in which case it follows STRATEGY_MEDIA
+
+        if ((strategy == STRATEGY_SONIFICATION) ||
+                (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
+            if (device == AUDIO_DEVICE_NONE) {
+                ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION");
+            }
+        }
+        // The second device used for sonification is the same as the device used by media strategy
+        // FALL THROUGH
+
+    case STRATEGY_MEDIA: {
+        uint32_t device2 = AUDIO_DEVICE_NONE;
+        if (strategy != STRATEGY_SONIFICATION) {
+            // no sonification on remote submix (e.g. WFD)
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) &&
+                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                (getA2dpOutput() != 0) && !mA2dpSuspended) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
+            if (device2 == AUDIO_DEVICE_NONE) {
+                device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+            }
+            if (device2 == AUDIO_DEVICE_NONE) {
+                device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+            }
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
+            // no sonification on aux digital (e.g. HDMI)
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) &&
+                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
+        }
+
+        // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
+        // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
+        device |= device2;
+        if (device) break;
+        device = mDefaultOutputDevice->mType;
+        if (device == AUDIO_DEVICE_NONE) {
+            ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA");
+        }
+        } break;
+
+    default:
+        ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
+        break;
+    }
+
+    ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
+    return device;
+}
+
+void AudioPolicyManager::updateDevicesAndOutputs()
+{
+    for (int i = 0; i < NUM_STRATEGIES; i++) {
+        mDeviceForStrategy[i] = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
+    }
+    mPreviousOutputs = mOutputs;
+}
+
+uint32_t AudioPolicyManager::checkDeviceMuteStrategies(AudioOutputDescriptor *outputDesc,
+                                                       audio_devices_t prevDevice,
+                                                       uint32_t delayMs)
+{
+    // mute/unmute strategies using an incompatible device combination
+    // if muting, wait for the audio in pcm buffer to be drained before proceeding
+    // if unmuting, unmute only after the specified delay
+    if (outputDesc->isDuplicated()) {
+        return 0;
+    }
+
+    uint32_t muteWaitMs = 0;
+    audio_devices_t device = outputDesc->device();
+    bool shouldMute = outputDesc->isActive() && (popcount(device) >= 2);
+    // temporary mute output if device selection changes to avoid volume bursts due to
+    // different per device volumes
+    bool tempMute = outputDesc->isActive() && (device != prevDevice);
+
+    for (size_t i = 0; i < NUM_STRATEGIES; i++) {
+        audio_devices_t curDevice = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
+        bool mute = shouldMute && (curDevice & device) && (curDevice != device);
+        bool doMute = false;
+
+        if (mute && !outputDesc->mStrategyMutedByDevice[i]) {
+            doMute = true;
+            outputDesc->mStrategyMutedByDevice[i] = true;
+        } else if (!mute && outputDesc->mStrategyMutedByDevice[i]){
+            doMute = true;
+            outputDesc->mStrategyMutedByDevice[i] = false;
+        }
+        if (doMute || tempMute) {
+            for (size_t j = 0; j < mOutputs.size(); j++) {
+                AudioOutputDescriptor *desc = mOutputs.valueAt(j);
+                // skip output if it does not share any device with current output
+                if ((desc->supportedDevices() & outputDesc->supportedDevices())
+                        == AUDIO_DEVICE_NONE) {
+                    continue;
+                }
+                audio_io_handle_t curOutput = mOutputs.keyAt(j);
+                ALOGVV("checkDeviceMuteStrategies() %s strategy %d (curDevice %04x) on output %d",
+                      mute ? "muting" : "unmuting", i, curDevice, curOutput);
+                setStrategyMute((routing_strategy)i, mute, curOutput, mute ? 0 : delayMs);
+                if (desc->isStrategyActive((routing_strategy)i)) {
+                    // do tempMute only for current output
+                    if (tempMute && (desc == outputDesc)) {
+                        setStrategyMute((routing_strategy)i, true, curOutput);
+                        setStrategyMute((routing_strategy)i, false, curOutput,
+                                            desc->latency() * 2, device);
+                    }
+                    if ((tempMute && (desc == outputDesc)) || mute) {
+                        if (muteWaitMs < desc->latency()) {
+                            muteWaitMs = desc->latency();
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    // FIXME: should not need to double latency if volume could be applied immediately by the
+    // audioflinger mixer. We must account for the delay between now and the next time
+    // the audioflinger thread for this output will process a buffer (which corresponds to
+    // one buffer size, usually 1/2 or 1/4 of the latency).
+    muteWaitMs *= 2;
+    // wait for the PCM output buffers to empty before proceeding with the rest of the command
+    if (muteWaitMs > delayMs) {
+        muteWaitMs -= delayMs;
+        usleep(muteWaitMs * 1000);
+        return muteWaitMs;
+    }
+    return 0;
+}
+
+uint32_t AudioPolicyManager::setOutputDevice(audio_io_handle_t output,
+                                             audio_devices_t device,
+                                             bool force,
+                                             int delayMs)
+{
+    ALOGV("setOutputDevice() output %d device %04x delayMs %d", output, device, delayMs);
+    AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
+    AudioParameter param;
+    uint32_t muteWaitMs;
+
+    if (outputDesc->isDuplicated()) {
+        muteWaitMs = setOutputDevice(outputDesc->mOutput1->mId, device, force, delayMs);
+        muteWaitMs += setOutputDevice(outputDesc->mOutput2->mId, device, force, delayMs);
+        return muteWaitMs;
+    }
+    // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
+    // output profile
+    if ((device != AUDIO_DEVICE_NONE) &&
+            ((device & outputDesc->mProfile->mSupportedDevices.types()) == 0)) {
+        return 0;
+    }
+
+    // filter devices according to output selected
+    device = (audio_devices_t)(device & outputDesc->mProfile->mSupportedDevices.types());
+
+    audio_devices_t prevDevice = outputDesc->mDevice;
+
+    ALOGV("setOutputDevice() prevDevice %04x", prevDevice);
+
+    if (device != AUDIO_DEVICE_NONE) {
+        outputDesc->mDevice = device;
+    }
+    muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevice, delayMs);
+
+    // Do not change the routing if:
+    //  - the requested device is AUDIO_DEVICE_NONE
+    //  - the requested device is the same as current device and force is not specified.
+    // Doing this check here allows the caller to call setOutputDevice() without conditions
+    if ((device == AUDIO_DEVICE_NONE || device == prevDevice) && !force) {
+        ALOGV("setOutputDevice() setting same device %04x or null device for output %d", device, output);
+        return muteWaitMs;
+    }
+
+    ALOGV("setOutputDevice() changing device");
+    // do the routing
+    param.addInt(String8(AudioParameter::keyRouting), (int)device);
+    mpClientInterface->setParameters(output, param.toString(), delayMs);
+
+    // update stream volumes according to new device
+    applyStreamVolumes(output, device, delayMs);
+
+    return muteWaitMs;
+}
+
+AudioPolicyManager::IOProfile *AudioPolicyManager::getInputProfile(audio_devices_t device,
+                                                   uint32_t samplingRate,
+                                                   audio_format_t format,
+                                                   audio_channel_mask_t channelMask)
+{
+    // Choose an input profile based on the requested capture parameters: select the first available
+    // profile supporting all requested parameters.
+
+    for (size_t i = 0; i < mHwModules.size(); i++)
+    {
+        if (mHwModules[i]->mHandle == 0) {
+            continue;
+        }
+        for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
+        {
+            IOProfile *profile = mHwModules[i]->mInputProfiles[j];
+            if (profile->isCompatibleProfile(device, samplingRate, format,
+                                             channelMask, AUDIO_OUTPUT_FLAG_NONE)) {
+                return profile;
+            }
+        }
+    }
+    return NULL;
+}
+
+audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t inputSource)
+{
+    uint32_t device = AUDIO_DEVICE_NONE;
+    audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() &
+                                            ~AUDIO_DEVICE_BIT_IN;
+    switch (inputSource) {
+    case AUDIO_SOURCE_VOICE_UPLINK:
+      if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
+          device = AUDIO_DEVICE_IN_VOICE_CALL;
+          break;
+      }
+      // FALL THROUGH
+
+    case AUDIO_SOURCE_DEFAULT:
+    case AUDIO_SOURCE_MIC:
+    case AUDIO_SOURCE_VOICE_RECOGNITION:
+    case AUDIO_SOURCE_HOTWORD:
+    case AUDIO_SOURCE_VOICE_COMMUNICATION:
+        if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
+                availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
+            device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+            device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+        }
+        break;
+    case AUDIO_SOURCE_CAMCORDER:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
+            device = AUDIO_DEVICE_IN_BACK_MIC;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+        }
+        break;
+    case AUDIO_SOURCE_VOICE_DOWNLINK:
+    case AUDIO_SOURCE_VOICE_CALL:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
+            device = AUDIO_DEVICE_IN_VOICE_CALL;
+        }
+        break;
+    case AUDIO_SOURCE_REMOTE_SUBMIX:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
+            device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
+        }
+        break;
+    default:
+        ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
+        break;
+    }
+    ALOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
+    return device;
+}
+
+bool AudioPolicyManager::isVirtualInputDevice(audio_devices_t device)
+{
+    if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
+        device &= ~AUDIO_DEVICE_BIT_IN;
+        if ((popcount(device) == 1) && ((device & ~APM_AUDIO_IN_DEVICE_VIRTUAL_ALL) == 0))
+            return true;
+    }
+    return false;
+}
+
+audio_io_handle_t AudioPolicyManager::getActiveInput(bool ignoreVirtualInputs)
+{
+    for (size_t i = 0; i < mInputs.size(); i++) {
+        const AudioInputDescriptor * input_descriptor = mInputs.valueAt(i);
+        if ((input_descriptor->mRefCount > 0)
+                && (!ignoreVirtualInputs || !isVirtualInputDevice(input_descriptor->mDevice))) {
+            return mInputs.keyAt(i);
+        }
+    }
+    return 0;
+}
+
+
+audio_devices_t AudioPolicyManager::getDeviceForVolume(audio_devices_t device)
+{
+    if (device == AUDIO_DEVICE_NONE) {
+        // this happens when forcing a route update and no track is active on an output.
+        // In this case the returned category is not important.
+        device =  AUDIO_DEVICE_OUT_SPEAKER;
+    } else if (popcount(device) > 1) {
+        // Multiple device selection is either:
+        //  - speaker + one other device: give priority to speaker in this case.
+        //  - one A2DP device + another device: happens with duplicated output. In this case
+        // retain the device on the A2DP output as the other must not correspond to an active
+        // selection if not the speaker.
+        if (device & AUDIO_DEVICE_OUT_SPEAKER) {
+            device = AUDIO_DEVICE_OUT_SPEAKER;
+        } else {
+            device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP);
+        }
+    }
+
+    ALOGW_IF(popcount(device) != 1,
+            "getDeviceForVolume() invalid device combination: %08x",
+            device);
+
+    return device;
+}
+
+AudioPolicyManager::device_category AudioPolicyManager::getDeviceCategory(audio_devices_t device)
+{
+    switch(getDeviceForVolume(device)) {
+        case AUDIO_DEVICE_OUT_EARPIECE:
+            return DEVICE_CATEGORY_EARPIECE;
+        case AUDIO_DEVICE_OUT_WIRED_HEADSET:
+        case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
+            return DEVICE_CATEGORY_HEADSET;
+        case AUDIO_DEVICE_OUT_SPEAKER:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
+        case AUDIO_DEVICE_OUT_AUX_DIGITAL:
+        case AUDIO_DEVICE_OUT_USB_ACCESSORY:
+        case AUDIO_DEVICE_OUT_USB_DEVICE:
+        case AUDIO_DEVICE_OUT_REMOTE_SUBMIX:
+        default:
+            return DEVICE_CATEGORY_SPEAKER;
+    }
+}
+
+float AudioPolicyManager::volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc,
+        int indexInUi)
+{
+    device_category deviceCategory = getDeviceCategory(device);
+    const VolumeCurvePoint *curve = streamDesc.mVolumeCurve[deviceCategory];
+
+    // the volume index in the UI is relative to the min and max volume indices for this stream type
+    int nbSteps = 1 + curve[VOLMAX].mIndex -
+            curve[VOLMIN].mIndex;
+    int volIdx = (nbSteps * (indexInUi - streamDesc.mIndexMin)) /
+            (streamDesc.mIndexMax - streamDesc.mIndexMin);
+
+    // find what part of the curve this index volume belongs to, or if it's out of bounds
+    int segment = 0;
+    if (volIdx < curve[VOLMIN].mIndex) {         // out of bounds
+        return 0.0f;
+    } else if (volIdx < curve[VOLKNEE1].mIndex) {
+        segment = 0;
+    } else if (volIdx < curve[VOLKNEE2].mIndex) {
+        segment = 1;
+    } else if (volIdx <= curve[VOLMAX].mIndex) {
+        segment = 2;
+    } else {                                                               // out of bounds
+        return 1.0f;
+    }
+
+    // linear interpolation in the attenuation table in dB
+    float decibels = curve[segment].mDBAttenuation +
+            ((float)(volIdx - curve[segment].mIndex)) *
+                ( (curve[segment+1].mDBAttenuation -
+                        curve[segment].mDBAttenuation) /
+                    ((float)(curve[segment+1].mIndex -
+                            curve[segment].mIndex)) );
+
+    float amplification = exp( decibels * 0.115129f); // exp( dB * ln(10) / 20 )
+
+    ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f] ampl=%.5f",
+            curve[segment].mIndex, volIdx,
+            curve[segment+1].mIndex,
+            curve[segment].mDBAttenuation,
+            decibels,
+            curve[segment+1].mDBAttenuation,
+            amplification);
+
+    return amplification;
+}
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sDefaultVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {1, -49.5f}, {33, -33.5f}, {66, -17.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sDefaultMediaVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {1, -58.0f}, {20, -40.0f}, {60, -17.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sSpeakerSonificationVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {1, -29.7f}, {33, -20.1f}, {66, -10.2f}, {100, 0.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sSpeakerSonificationVolumeCurveDrc[AudioPolicyManager::VOLCNT] = {
+    {1, -35.7f}, {33, -26.1f}, {66, -13.2f}, {100, 0.0f}
+};
+
+// AUDIO_STREAM_SYSTEM, AUDIO_STREAM_ENFORCED_AUDIBLE and AUDIO_STREAM_DTMF volume tracks
+// AUDIO_STREAM_RING on phones and AUDIO_STREAM_MUSIC on tablets.
+// AUDIO_STREAM_DTMF tracks AUDIO_STREAM_VOICE_CALL while in call (See AudioService.java).
+// The range is constrained between -24dB and -6dB over speaker and -30dB and -18dB over headset.
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sDefaultSystemVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {1, -24.0f}, {33, -18.0f}, {66, -12.0f}, {100, -6.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sDefaultSystemVolumeCurveDrc[AudioPolicyManager::VOLCNT] = {
+    {1, -34.0f}, {33, -24.0f}, {66, -15.0f}, {100, -6.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sHeadsetSystemVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {1, -30.0f}, {33, -26.0f}, {66, -22.0f}, {100, -18.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sDefaultVoiceVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {0, -42.0f}, {33, -28.0f}, {66, -14.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+    AudioPolicyManager::sSpeakerVoiceVolumeCurve[AudioPolicyManager::VOLCNT] = {
+    {0, -24.0f}, {33, -16.0f}, {66, -8.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManager::VolumeCurvePoint
+            *AudioPolicyManager::sVolumeProfiles[AUDIO_STREAM_CNT]
+                                                   [AudioPolicyManager::DEVICE_CATEGORY_CNT] = {
+    { // AUDIO_STREAM_VOICE_CALL
+        sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sDefaultVoiceVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+    },
+    { // AUDIO_STREAM_SYSTEM
+        sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sDefaultSystemVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+    },
+    { // AUDIO_STREAM_RING
+        sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sDefaultVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+    },
+    { // AUDIO_STREAM_MUSIC
+        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+    },
+    { // AUDIO_STREAM_ALARM
+        sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sDefaultVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+    },
+    { // AUDIO_STREAM_NOTIFICATION
+        sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sDefaultVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+    },
+    { // AUDIO_STREAM_BLUETOOTH_SCO
+        sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sDefaultVoiceVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+    },
+    { // AUDIO_STREAM_ENFORCED_AUDIBLE
+        sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sDefaultSystemVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+    },
+    {  // AUDIO_STREAM_DTMF
+        sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sDefaultSystemVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+    },
+    { // AUDIO_STREAM_TTS
+        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EARPIECE
+    },
+};
+
+void AudioPolicyManager::initializeVolumeCurves()
+{
+    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
+        for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
+            mStreams[i].mVolumeCurve[j] =
+                    sVolumeProfiles[i][j];
+        }
+    }
+
+    // Check availability of DRC on speaker path: if available, override some of the speaker curves
+    if (mSpeakerDrcEnabled) {
+        mStreams[AUDIO_STREAM_SYSTEM].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] =
+                sDefaultSystemVolumeCurveDrc;
+        mStreams[AUDIO_STREAM_RING].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] =
+                sSpeakerSonificationVolumeCurveDrc;
+        mStreams[AUDIO_STREAM_ALARM].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] =
+                sSpeakerSonificationVolumeCurveDrc;
+        mStreams[AUDIO_STREAM_NOTIFICATION].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] =
+                sSpeakerSonificationVolumeCurveDrc;
+    }
+}
+
+float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
+                                            int index,
+                                            audio_io_handle_t output,
+                                            audio_devices_t device)
+{
+    float volume = 1.0;
+    AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
+    StreamDescriptor &streamDesc = mStreams[stream];
+
+    if (device == AUDIO_DEVICE_NONE) {
+        device = outputDesc->device();
+    }
+
+    // if volume is not 0 (not muted), force media volume to max on digital output
+    if (stream == AUDIO_STREAM_MUSIC &&
+        index != mStreams[stream].mIndexMin &&
+        (device == AUDIO_DEVICE_OUT_AUX_DIGITAL ||
+         device == AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET ||
+         device == AUDIO_DEVICE_OUT_USB_ACCESSORY ||
+         device == AUDIO_DEVICE_OUT_USB_DEVICE)) {
+        return 1.0;
+    }
+
+    volume = volIndexToAmpl(device, streamDesc, index);
+
+    // if a headset is connected, apply the following rules to ring tones and notifications
+    // to avoid sound level bursts in user's ears:
+    // - always attenuate ring tones and notifications volume by 6dB
+    // - if music is playing, always limit the volume to current music volume,
+    // with a minimum threshold at -36dB so that notification is always perceived.
+    const routing_strategy stream_strategy = getStrategy(stream);
+    if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
+            AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+            AUDIO_DEVICE_OUT_WIRED_HEADSET |
+            AUDIO_DEVICE_OUT_WIRED_HEADPHONE)) &&
+        ((stream_strategy == STRATEGY_SONIFICATION)
+                || (stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL)
+                || (stream == AUDIO_STREAM_SYSTEM)
+                || ((stream_strategy == STRATEGY_ENFORCED_AUDIBLE) &&
+                    (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_NONE))) &&
+        streamDesc.mCanBeMuted) {
+        volume *= SONIFICATION_HEADSET_VOLUME_FACTOR;
+        // when the phone is ringing we must consider that music could have been paused just before
+        // by the music application and behave as if music was active if the last music track was
+        // just stopped
+        if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) ||
+                mLimitRingtoneVolume) {
+            audio_devices_t musicDevice = getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/);
+            float musicVol = computeVolume(AUDIO_STREAM_MUSIC,
+                               mStreams[AUDIO_STREAM_MUSIC].getVolumeIndex(musicDevice),
+                               output,
+                               musicDevice);
+            float minVol = (musicVol > SONIFICATION_HEADSET_VOLUME_MIN) ?
+                                musicVol : SONIFICATION_HEADSET_VOLUME_MIN;
+            if (volume > minVol) {
+                volume = minVol;
+                ALOGV("computeVolume limiting volume to %f musicVol %f", minVol, musicVol);
+            }
+        }
+    }
+
+    return volume;
+}
+
+status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
+                                                   int index,
+                                                   audio_io_handle_t output,
+                                                   audio_devices_t device,
+                                                   int delayMs,
+                                                   bool force)
+{
+
+    // do not change actual stream volume if the stream is muted
+    if (mOutputs.valueFor(output)->mMuteCount[stream] != 0) {
+        ALOGVV("checkAndSetVolume() stream %d muted count %d",
+              stream, mOutputs.valueFor(output)->mMuteCount[stream]);
+        return NO_ERROR;
+    }
+
+    // do not change in call volume if bluetooth is connected and vice versa
+    if ((stream == AUDIO_STREAM_VOICE_CALL &&
+            mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) ||
+        (stream == AUDIO_STREAM_BLUETOOTH_SCO &&
+                mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] != AUDIO_POLICY_FORCE_BT_SCO)) {
+        ALOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
+             stream, mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]);
+        return INVALID_OPERATION;
+    }
+
+    float volume = computeVolume(stream, index, output, device);
+    // We actually change the volume if:
+    // - the float value returned by computeVolume() changed
+    // - the force flag is set
+    if (volume != mOutputs.valueFor(output)->mCurVolume[stream] ||
+            force) {
+        mOutputs.valueFor(output)->mCurVolume[stream] = volume;
+        ALOGVV("checkAndSetVolume() for output %d stream %d, volume %f, delay %d", output, stream, volume, delayMs);
+        // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is
+        // enabled
+        if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
+            mpClientInterface->setStreamVolume(AUDIO_STREAM_VOICE_CALL, volume, output, delayMs);
+        }
+        mpClientInterface->setStreamVolume(stream, volume, output, delayMs);
+    }
+
+    if (stream == AUDIO_STREAM_VOICE_CALL ||
+        stream == AUDIO_STREAM_BLUETOOTH_SCO) {
+        float voiceVolume;
+        // Force voice volume to max for bluetooth SCO as volume is managed by the headset
+        if (stream == AUDIO_STREAM_VOICE_CALL) {
+            voiceVolume = (float)index/(float)mStreams[stream].mIndexMax;
+        } else {
+            voiceVolume = 1.0;
+        }
+
+        if (voiceVolume != mLastVoiceVolume && output == mPrimaryOutput) {
+            mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
+            mLastVoiceVolume = voiceVolume;
+        }
+    }
+
+    return NO_ERROR;
+}
+
+void AudioPolicyManager::applyStreamVolumes(audio_io_handle_t output,
+                                                audio_devices_t device,
+                                                int delayMs,
+                                                bool force)
+{
+    ALOGVV("applyStreamVolumes() for output %d and device %x", output, device);
+
+    for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
+        checkAndSetVolume((audio_stream_type_t)stream,
+                          mStreams[stream].getVolumeIndex(device),
+                          output,
+                          device,
+                          delayMs,
+                          force);
+    }
+}
+
+void AudioPolicyManager::setStrategyMute(routing_strategy strategy,
+                                             bool on,
+                                             audio_io_handle_t output,
+                                             int delayMs,
+                                             audio_devices_t device)
+{
+    ALOGVV("setStrategyMute() strategy %d, mute %d, output %d", strategy, on, output);
+    for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
+        if (getStrategy((audio_stream_type_t)stream) == strategy) {
+            setStreamMute((audio_stream_type_t)stream, on, output, delayMs, device);
+        }
+    }
+}
+
+void AudioPolicyManager::setStreamMute(audio_stream_type_t stream,
+                                           bool on,
+                                           audio_io_handle_t output,
+                                           int delayMs,
+                                           audio_devices_t device)
+{
+    StreamDescriptor &streamDesc = mStreams[stream];
+    AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
+    if (device == AUDIO_DEVICE_NONE) {
+        device = outputDesc->device();
+    }
+
+    ALOGVV("setStreamMute() stream %d, mute %d, output %d, mMuteCount %d device %04x",
+          stream, on, output, outputDesc->mMuteCount[stream], device);
+
+    if (on) {
+        if (outputDesc->mMuteCount[stream] == 0) {
+            if (streamDesc.mCanBeMuted &&
+                    ((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) ||
+                     (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_NONE))) {
+                checkAndSetVolume(stream, 0, output, device, delayMs);
+            }
+        }
+        // increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
+        outputDesc->mMuteCount[stream]++;
+    } else {
+        if (outputDesc->mMuteCount[stream] == 0) {
+            ALOGV("setStreamMute() unmuting non muted stream!");
+            return;
+        }
+        if (--outputDesc->mMuteCount[stream] == 0) {
+            checkAndSetVolume(stream,
+                              streamDesc.getVolumeIndex(device),
+                              output,
+                              device,
+                              delayMs);
+        }
+    }
+}
+
+void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,
+                                                      bool starting, bool stateChange)
+{
+    // if the stream pertains to sonification strategy and we are in call we must
+    // mute the stream if it is low visibility. If it is high visibility, we must play a tone
+    // in the device used for phone strategy and play the tone if the selected device does not
+    // interfere with the device used for phone strategy
+    // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
+    // many times as there are active tracks on the output
+    const routing_strategy stream_strategy = getStrategy(stream);
+    if ((stream_strategy == STRATEGY_SONIFICATION) ||
+            ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) {
+        AudioOutputDescriptor *outputDesc = mOutputs.valueFor(mPrimaryOutput);
+        ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
+                stream, starting, outputDesc->mDevice, stateChange);
+        if (outputDesc->mRefCount[stream]) {
+            int muteCount = 1;
+            if (stateChange) {
+                muteCount = outputDesc->mRefCount[stream];
+            }
+            if (audio_is_low_visibility(stream)) {
+                ALOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
+                for (int i = 0; i < muteCount; i++) {
+                    setStreamMute(stream, starting, mPrimaryOutput);
+                }
+            } else {
+                ALOGV("handleIncallSonification() high visibility");
+                if (outputDesc->device() &
+                        getDeviceForStrategy(STRATEGY_PHONE, true /*fromCache*/)) {
+                    ALOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
+                    for (int i = 0; i < muteCount; i++) {
+                        setStreamMute(stream, starting, mPrimaryOutput);
+                    }
+                }
+                if (starting) {
+                    mpClientInterface->startTone(AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION,
+                                                 AUDIO_STREAM_VOICE_CALL);
+                } else {
+                    mpClientInterface->stopTone();
+                }
+            }
+        }
+    }
+}
+
+bool AudioPolicyManager::isInCall()
+{
+    return isStateInCall(mPhoneState);
+}
+
+bool AudioPolicyManager::isStateInCall(int state) {
+    return ((state == AUDIO_MODE_IN_CALL) ||
+            (state == AUDIO_MODE_IN_COMMUNICATION));
+}
+
+uint32_t AudioPolicyManager::getMaxEffectsCpuLoad()
+{
+    return MAX_EFFECTS_CPU_LOAD;
+}
+
+uint32_t AudioPolicyManager::getMaxEffectsMemory()
+{
+    return MAX_EFFECTS_MEMORY;
+}
+
+// --- AudioOutputDescriptor class implementation
+
+AudioPolicyManager::AudioOutputDescriptor::AudioOutputDescriptor(
+        const IOProfile *profile)
+    : mId(0), mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT),
+      mChannelMask(0), mLatency(0),
+    mFlags((audio_output_flags_t)0), mDevice(AUDIO_DEVICE_NONE),
+    mOutput1(0), mOutput2(0), mProfile(profile), mDirectOpenCount(0)
+{
+    // clear usage count for all stream types
+    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
+        mRefCount[i] = 0;
+        mCurVolume[i] = -1.0;
+        mMuteCount[i] = 0;
+        mStopTime[i] = 0;
+    }
+    for (int i = 0; i < NUM_STRATEGIES; i++) {
+        mStrategyMutedByDevice[i] = false;
+    }
+    if (profile != NULL) {
+        mSamplingRate = profile->mSamplingRates[0];
+        mFormat = profile->mFormats[0];
+        mChannelMask = profile->mChannelMasks[0];
+        mFlags = profile->mFlags;
+    }
+}
+
+audio_devices_t AudioPolicyManager::AudioOutputDescriptor::device() const
+{
+    if (isDuplicated()) {
+        return (audio_devices_t)(mOutput1->mDevice | mOutput2->mDevice);
+    } else {
+        return mDevice;
+    }
+}
+
+uint32_t AudioPolicyManager::AudioOutputDescriptor::latency()
+{
+    if (isDuplicated()) {
+        return (mOutput1->mLatency > mOutput2->mLatency) ? mOutput1->mLatency : mOutput2->mLatency;
+    } else {
+        return mLatency;
+    }
+}
+
+bool AudioPolicyManager::AudioOutputDescriptor::sharesHwModuleWith(
+        const AudioOutputDescriptor *outputDesc)
+{
+    if (isDuplicated()) {
+        return mOutput1->sharesHwModuleWith(outputDesc) || mOutput2->sharesHwModuleWith(outputDesc);
+    } else if (outputDesc->isDuplicated()){
+        return sharesHwModuleWith(outputDesc->mOutput1) || sharesHwModuleWith(outputDesc->mOutput2);
+    } else {
+        return (mProfile->mModule == outputDesc->mProfile->mModule);
+    }
+}
+
+void AudioPolicyManager::AudioOutputDescriptor::changeRefCount(audio_stream_type_t stream,
+                                                                   int delta)
+{
+    // forward usage count change to attached outputs
+    if (isDuplicated()) {
+        mOutput1->changeRefCount(stream, delta);
+        mOutput2->changeRefCount(stream, delta);
+    }
+    if ((delta + (int)mRefCount[stream]) < 0) {
+        ALOGW("changeRefCount() invalid delta %d for stream %d, refCount %d",
+              delta, stream, mRefCount[stream]);
+        mRefCount[stream] = 0;
+        return;
+    }
+    mRefCount[stream] += delta;
+    ALOGV("changeRefCount() stream %d, count %d", stream, mRefCount[stream]);
+}
+
+audio_devices_t AudioPolicyManager::AudioOutputDescriptor::supportedDevices()
+{
+    if (isDuplicated()) {
+        return (audio_devices_t)(mOutput1->supportedDevices() | mOutput2->supportedDevices());
+    } else {
+        return mProfile->mSupportedDevices.types() ;
+    }
+}
+
+bool AudioPolicyManager::AudioOutputDescriptor::isActive(uint32_t inPastMs) const
+{
+    return isStrategyActive(NUM_STRATEGIES, inPastMs);
+}
+
+bool AudioPolicyManager::AudioOutputDescriptor::isStrategyActive(routing_strategy strategy,
+                                                                       uint32_t inPastMs,
+                                                                       nsecs_t sysTime) const
+{
+    if ((sysTime == 0) && (inPastMs != 0)) {
+        sysTime = systemTime();
+    }
+    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
+        if (((getStrategy((audio_stream_type_t)i) == strategy) ||
+                (NUM_STRATEGIES == strategy)) &&
+                isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioPolicyManager::AudioOutputDescriptor::isStreamActive(audio_stream_type_t stream,
+                                                                       uint32_t inPastMs,
+                                                                       nsecs_t sysTime) const
+{
+    if (mRefCount[stream] != 0) {
+        return true;
+    }
+    if (inPastMs == 0) {
+        return false;
+    }
+    if (sysTime == 0) {
+        sysTime = systemTime();
+    }
+    if (ns2ms(sysTime - mStopTime[stream]) < inPastMs) {
+        return true;
+    }
+    return false;
+}
+
+
+status_t AudioPolicyManager::AudioOutputDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Format: %08x\n", mFormat);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Latency: %d\n", mLatency);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Flags %08x\n", mFlags);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Devices %08x\n", device());
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Stream volume refCount muteCount\n");
+    result.append(buffer);
+    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
+        snprintf(buffer, SIZE, " %02d     %.03f     %02d       %02d\n",
+                 i, mCurVolume[i], mRefCount[i], mMuteCount[i]);
+        result.append(buffer);
+    }
+    write(fd, result.string(), result.size());
+
+    return NO_ERROR;
+}
+
+// --- AudioInputDescriptor class implementation
+
+AudioPolicyManager::AudioInputDescriptor::AudioInputDescriptor(const IOProfile *profile)
+    : mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), mChannelMask(0),
+      mDevice(AUDIO_DEVICE_NONE), mRefCount(0),
+      mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile)
+{
+    if (profile != NULL) {
+        mSamplingRate = profile->mSamplingRates[0];
+        mFormat = profile->mFormats[0];
+        mChannelMask = profile->mChannelMasks[0];
+    }
+}
+
+status_t AudioPolicyManager::AudioInputDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Format: %d\n", mFormat);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Devices %08x\n", mDevice);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount);
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+
+    return NO_ERROR;
+}
+
+// --- StreamDescriptor class implementation
+
+AudioPolicyManager::StreamDescriptor::StreamDescriptor()
+    :   mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
+{
+    mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, 0);
+}
+
+int AudioPolicyManager::StreamDescriptor::getVolumeIndex(audio_devices_t device)
+{
+    device = AudioPolicyManager::getDeviceForVolume(device);
+    // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT
+    if (mIndexCur.indexOfKey(device) < 0) {
+        device = AUDIO_DEVICE_OUT_DEFAULT;
+    }
+    return mIndexCur.valueFor(device);
+}
+
+void AudioPolicyManager::StreamDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%s         %02d         %02d         ",
+             mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+    result.append(buffer);
+    for (size_t i = 0; i < mIndexCur.size(); i++) {
+        snprintf(buffer, SIZE, "%04x : %02d, ",
+                 mIndexCur.keyAt(i),
+                 mIndexCur.valueAt(i));
+        result.append(buffer);
+    }
+    result.append("\n");
+
+    write(fd, result.string(), result.size());
+}
+
+// --- EffectDescriptor class implementation
+
+status_t AudioPolicyManager::EffectDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, " I/O: %d\n", mIo);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Strategy: %d\n", mStrategy);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Session: %d\n", mSession);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Name: %s\n",  mDesc.name);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " %s\n",  mEnabled ? "Enabled" : "Disabled");
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+
+    return NO_ERROR;
+}
+
+// --- IOProfile class implementation
+
+AudioPolicyManager::HwModule::HwModule(const char *name)
+    : mName(strndup(name, AUDIO_HARDWARE_MODULE_ID_MAX_LEN)), mHandle(0)
+{
+}
+
+AudioPolicyManager::HwModule::~HwModule()
+{
+    for (size_t i = 0; i < mOutputProfiles.size(); i++) {
+        mOutputProfiles[i]->mSupportedDevices.clear();
+        delete mOutputProfiles[i];
+    }
+    for (size_t i = 0; i < mInputProfiles.size(); i++) {
+        mInputProfiles[i]->mSupportedDevices.clear();
+        delete mInputProfiles[i];
+    }
+    free((void *)mName);
+}
+
+void AudioPolicyManager::HwModule::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "  - name: %s\n", mName);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "  - handle: %d\n", mHandle);
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+    if (mOutputProfiles.size()) {
+        write(fd, "  - outputs:\n", strlen("  - outputs:\n"));
+        for (size_t i = 0; i < mOutputProfiles.size(); i++) {
+            snprintf(buffer, SIZE, "    output %d:\n", i);
+            write(fd, buffer, strlen(buffer));
+            mOutputProfiles[i]->dump(fd);
+        }
+    }
+    if (mInputProfiles.size()) {
+        write(fd, "  - inputs:\n", strlen("  - inputs:\n"));
+        for (size_t i = 0; i < mInputProfiles.size(); i++) {
+            snprintf(buffer, SIZE, "    input %d:\n", i);
+            write(fd, buffer, strlen(buffer));
+            mInputProfiles[i]->dump(fd);
+        }
+    }
+}
+
+AudioPolicyManager::IOProfile::IOProfile(HwModule *module)
+    : mFlags((audio_output_flags_t)0), mModule(module)
+{
+}
+
+AudioPolicyManager::IOProfile::~IOProfile()
+{
+}
+
+// checks if the IO profile is compatible with specified parameters.
+// Sampling rate, format and channel mask must be specified in order to
+// get a valid a match
+bool AudioPolicyManager::IOProfile::isCompatibleProfile(audio_devices_t device,
+                                                            uint32_t samplingRate,
+                                                            audio_format_t format,
+                                                            audio_channel_mask_t channelMask,
+                                                            audio_output_flags_t flags) const
+{
+    if (samplingRate == 0 || !audio_is_valid_format(format) || channelMask == 0) {
+         return false;
+     }
+
+     if ((mSupportedDevices.types() & device) != device) {
+         return false;
+     }
+     if ((mFlags & flags) != flags) {
+         return false;
+     }
+     size_t i;
+     for (i = 0; i < mSamplingRates.size(); i++)
+     {
+         if (mSamplingRates[i] == samplingRate) {
+             break;
+         }
+     }
+     if (i == mSamplingRates.size()) {
+         return false;
+     }
+     for (i = 0; i < mFormats.size(); i++)
+     {
+         if (mFormats[i] == format) {
+             break;
+         }
+     }
+     if (i == mFormats.size()) {
+         return false;
+     }
+     for (i = 0; i < mChannelMasks.size(); i++)
+     {
+         if (mChannelMasks[i] == channelMask) {
+             break;
+         }
+     }
+     if (i == mChannelMasks.size()) {
+         return false;
+     }
+     return true;
+}
+
+void AudioPolicyManager::IOProfile::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "    - sampling rates: ");
+    result.append(buffer);
+    for (size_t i = 0; i < mSamplingRates.size(); i++) {
+        snprintf(buffer, SIZE, "%d", mSamplingRates[i]);
+        result.append(buffer);
+        result.append(i == (mSamplingRates.size() - 1) ? "\n" : ", ");
+    }
+
+    snprintf(buffer, SIZE, "    - channel masks: ");
+    result.append(buffer);
+    for (size_t i = 0; i < mChannelMasks.size(); i++) {
+        snprintf(buffer, SIZE, "0x%04x", mChannelMasks[i]);
+        result.append(buffer);
+        result.append(i == (mChannelMasks.size() - 1) ? "\n" : ", ");
+    }
+
+    snprintf(buffer, SIZE, "    - formats: ");
+    result.append(buffer);
+    for (size_t i = 0; i < mFormats.size(); i++) {
+        snprintf(buffer, SIZE, "0x%08x", mFormats[i]);
+        result.append(buffer);
+        result.append(i == (mFormats.size() - 1) ? "\n" : ", ");
+    }
+
+    snprintf(buffer, SIZE, "    - devices:\n");
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+    DeviceDescriptor::dumpHeader(fd, 6);
+    for (size_t i = 0; i < mSupportedDevices.size(); i++) {
+        mSupportedDevices[i]->dump(fd, 6);
+    }
+
+    snprintf(buffer, SIZE, "    - flags: 0x%04x\n", mFlags);
+    result.append(buffer);
+
+    write(fd, result.string(), result.size());
+}
+
+// --- DeviceDescriptor implementation
+
+bool AudioPolicyManager::DeviceDescriptor::equals(const sp<DeviceDescriptor>& other) const
+{
+    // Devices are considered equal if they:
+    // - are of the same type (a device type cannot be AUDIO_DEVICE_NONE)
+    // - have the same address or one device does not specify the address
+    // - have the same channel mask or one device does not specify the channel mask
+    return (mType == other->mType) &&
+           (mAddress == "" || other->mAddress == "" || mAddress == other->mAddress) &&
+           (mChannelMask == 0 || other->mChannelMask == 0 ||
+                mChannelMask == other->mChannelMask);
+}
+
+void AudioPolicyManager::DeviceVector::refreshTypes()
+{
+    mTypes = AUDIO_DEVICE_NONE;
+    for(size_t i = 0; i < size(); i++) {
+        mTypes |= itemAt(i)->mType;
+    }
+    ALOGV("DeviceVector::refreshTypes() mTypes %08x", mTypes);
+}
+
+ssize_t AudioPolicyManager::DeviceVector::indexOf(const sp<DeviceDescriptor>& item) const
+{
+    for(size_t i = 0; i < size(); i++) {
+        if (item->equals(itemAt(i))) {
+            return i;
+        }
+    }
+    return -1;
+}
+
+ssize_t AudioPolicyManager::DeviceVector::add(const sp<DeviceDescriptor>& item)
+{
+    ssize_t ret = indexOf(item);
+
+    if (ret < 0) {
+        ret = SortedVector::add(item);
+        if (ret >= 0) {
+            refreshTypes();
+        }
+    } else {
+        ALOGW("DeviceVector::add device %08x already in", item->mType);
+        ret = -1;
+    }
+    return ret;
+}
+
+ssize_t AudioPolicyManager::DeviceVector::remove(const sp<DeviceDescriptor>& item)
+{
+    size_t i;
+    ssize_t ret = indexOf(item);
+
+    if (ret < 0) {
+        ALOGW("DeviceVector::remove device %08x not in", item->mType);
+    } else {
+        ret = SortedVector::removeAt(ret);
+        if (ret >= 0) {
+            refreshTypes();
+        }
+    }
+    return ret;
+}
+
+void AudioPolicyManager::DeviceVector::loadDevicesFromType(audio_devices_t types)
+{
+    DeviceVector deviceList;
+
+    uint32_t role_bit = AUDIO_DEVICE_BIT_IN & types;
+    types &= ~role_bit;
+
+    while (types) {
+        uint32_t i = 31 - __builtin_clz(types);
+        uint32_t type = 1 << i;
+        types &= ~type;
+        add(new DeviceDescriptor(type | role_bit));
+    }
+}
+
+void AudioPolicyManager::DeviceDescriptor::dumpHeader(int fd, int spaces)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "%*s%-48s %-2s %-8s %-32s \n",
+                         spaces, "", "Type", "ID", "Cnl Mask", "Address");
+    write(fd, buffer, strlen(buffer));
+}
+
+status_t AudioPolicyManager::DeviceDescriptor::dump(int fd, int spaces) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "%*s%-48s %2d %08x %-32s \n",
+                         spaces, "",
+                         enumToString(sDeviceNameToEnumTable,
+                                      ARRAY_SIZE(sDeviceNameToEnumTable),
+                                      mType),
+                         mId, mChannelMask, mAddress.string());
+    write(fd, buffer, strlen(buffer));
+
+    return NO_ERROR;
+}
+
+
+// --- audio_policy.conf file parsing
+
+audio_output_flags_t AudioPolicyManager::parseFlagNames(char *name)
+{
+    uint32_t flag = 0;
+
+    // it is OK to cast name to non const here as we are not going to use it after
+    // strtok() modifies it
+    char *flagName = strtok(name, "|");
+    while (flagName != NULL) {
+        if (strlen(flagName) != 0) {
+            flag |= stringToEnum(sFlagNameToEnumTable,
+                               ARRAY_SIZE(sFlagNameToEnumTable),
+                               flagName);
+        }
+        flagName = strtok(NULL, "|");
+    }
+    //force direct flag if offload flag is set: offloading implies a direct output stream
+    // and all common behaviors are driven by checking only the direct flag
+    // this should normally be set appropriately in the policy configuration file
+    if ((flag & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+        flag |= AUDIO_OUTPUT_FLAG_DIRECT;
+    }
+
+    return (audio_output_flags_t)flag;
+}
+
+audio_devices_t AudioPolicyManager::parseDeviceNames(char *name)
+{
+    uint32_t device = 0;
+
+    char *devName = strtok(name, "|");
+    while (devName != NULL) {
+        if (strlen(devName) != 0) {
+            device |= stringToEnum(sDeviceNameToEnumTable,
+                                 ARRAY_SIZE(sDeviceNameToEnumTable),
+                                 devName);
+         }
+        devName = strtok(NULL, "|");
+     }
+    return device;
+}
+
+void AudioPolicyManager::loadSamplingRates(char *name, IOProfile *profile)
+{
+    char *str = strtok(name, "|");
+
+    // by convention, "0' in the first entry in mSamplingRates indicates the supported sampling
+    // rates should be read from the output stream after it is opened for the first time
+    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
+        profile->mSamplingRates.add(0);
+        return;
+    }
+
+    while (str != NULL) {
+        uint32_t rate = atoi(str);
+        if (rate != 0) {
+            ALOGV("loadSamplingRates() adding rate %d", rate);
+            profile->mSamplingRates.add(rate);
+        }
+        str = strtok(NULL, "|");
+    }
+    return;
+}
+
+void AudioPolicyManager::loadFormats(char *name, IOProfile *profile)
+{
+    char *str = strtok(name, "|");
+
+    // by convention, "0' in the first entry in mFormats indicates the supported formats
+    // should be read from the output stream after it is opened for the first time
+    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
+        profile->mFormats.add(AUDIO_FORMAT_DEFAULT);
+        return;
+    }
+
+    while (str != NULL) {
+        audio_format_t format = (audio_format_t)stringToEnum(sFormatNameToEnumTable,
+                                                             ARRAY_SIZE(sFormatNameToEnumTable),
+                                                             str);
+        if (format != AUDIO_FORMAT_DEFAULT) {
+            profile->mFormats.add(format);
+        }
+        str = strtok(NULL, "|");
+    }
+    return;
+}
+
+void AudioPolicyManager::loadInChannels(char *name, IOProfile *profile)
+{
+    const char *str = strtok(name, "|");
+
+    ALOGV("loadInChannels() %s", name);
+
+    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
+        profile->mChannelMasks.add(0);
+        return;
+    }
+
+    while (str != NULL) {
+        audio_channel_mask_t channelMask =
+                (audio_channel_mask_t)stringToEnum(sInChannelsNameToEnumTable,
+                                                   ARRAY_SIZE(sInChannelsNameToEnumTable),
+                                                   str);
+        if (channelMask != 0) {
+            ALOGV("loadInChannels() adding channelMask %04x", channelMask);
+            profile->mChannelMasks.add(channelMask);
+        }
+        str = strtok(NULL, "|");
+    }
+    return;
+}
+
+void AudioPolicyManager::loadOutChannels(char *name, IOProfile *profile)
+{
+    const char *str = strtok(name, "|");
+
+    ALOGV("loadOutChannels() %s", name);
+
+    // by convention, "0' in the first entry in mChannelMasks indicates the supported channel
+    // masks should be read from the output stream after it is opened for the first time
+    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
+        profile->mChannelMasks.add(0);
+        return;
+    }
+
+    while (str != NULL) {
+        audio_channel_mask_t channelMask =
+                (audio_channel_mask_t)stringToEnum(sOutChannelsNameToEnumTable,
+                                                   ARRAY_SIZE(sOutChannelsNameToEnumTable),
+                                                   str);
+        if (channelMask != 0) {
+            profile->mChannelMasks.add(channelMask);
+        }
+        str = strtok(NULL, "|");
+    }
+    return;
+}
+
+status_t AudioPolicyManager::loadInput(cnode *root, HwModule *module)
+{
+    cnode *node = root->first_child;
+
+    IOProfile *profile = new IOProfile(module);
+
+    while (node) {
+        if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) {
+            loadSamplingRates((char *)node->value, profile);
+        } else if (strcmp(node->name, FORMATS_TAG) == 0) {
+            loadFormats((char *)node->value, profile);
+        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
+            loadInChannels((char *)node->value, profile);
+        } else if (strcmp(node->name, DEVICES_TAG) == 0) {
+            profile->mSupportedDevices.loadDevicesFromType(parseDeviceNames((char *)node->value));
+        }
+        node = node->next;
+    }
+    ALOGW_IF(profile->mSupportedDevices.isEmpty(),
+            "loadInput() invalid supported devices");
+    ALOGW_IF(profile->mChannelMasks.size() == 0,
+            "loadInput() invalid supported channel masks");
+    ALOGW_IF(profile->mSamplingRates.size() == 0,
+            "loadInput() invalid supported sampling rates");
+    ALOGW_IF(profile->mFormats.size() == 0,
+            "loadInput() invalid supported formats");
+    if (!profile->mSupportedDevices.isEmpty() &&
+            (profile->mChannelMasks.size() != 0) &&
+            (profile->mSamplingRates.size() != 0) &&
+            (profile->mFormats.size() != 0)) {
+
+        ALOGV("loadInput() adding input Supported Devices %04x",
+              profile->mSupportedDevices.types());
+
+        module->mInputProfiles.add(profile);
+        return NO_ERROR;
+    } else {
+        delete profile;
+        return BAD_VALUE;
+    }
+}
+
+status_t AudioPolicyManager::loadOutput(cnode *root, HwModule *module)
+{
+    cnode *node = root->first_child;
+
+    IOProfile *profile = new IOProfile(module);
+
+    while (node) {
+        if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) {
+            loadSamplingRates((char *)node->value, profile);
+        } else if (strcmp(node->name, FORMATS_TAG) == 0) {
+            loadFormats((char *)node->value, profile);
+        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
+            loadOutChannels((char *)node->value, profile);
+        } else if (strcmp(node->name, DEVICES_TAG) == 0) {
+            profile->mSupportedDevices.loadDevicesFromType(parseDeviceNames((char *)node->value));
+        } else if (strcmp(node->name, FLAGS_TAG) == 0) {
+            profile->mFlags = parseFlagNames((char *)node->value);
+        }
+        node = node->next;
+    }
+    ALOGW_IF(profile->mSupportedDevices.isEmpty(),
+            "loadOutput() invalid supported devices");
+    ALOGW_IF(profile->mChannelMasks.size() == 0,
+            "loadOutput() invalid supported channel masks");
+    ALOGW_IF(profile->mSamplingRates.size() == 0,
+            "loadOutput() invalid supported sampling rates");
+    ALOGW_IF(profile->mFormats.size() == 0,
+            "loadOutput() invalid supported formats");
+    if (!profile->mSupportedDevices.isEmpty() &&
+            (profile->mChannelMasks.size() != 0) &&
+            (profile->mSamplingRates.size() != 0) &&
+            (profile->mFormats.size() != 0)) {
+
+        ALOGV("loadOutput() adding output Supported Devices %04x, mFlags %04x",
+              profile->mSupportedDevices.types(), profile->mFlags);
+
+        module->mOutputProfiles.add(profile);
+        return NO_ERROR;
+    } else {
+        delete profile;
+        return BAD_VALUE;
+    }
+}
+
+void AudioPolicyManager::loadHwModule(cnode *root)
+{
+    cnode *node = config_find(root, OUTPUTS_TAG);
+    status_t status = NAME_NOT_FOUND;
+
+    HwModule *module = new HwModule(root->name);
+
+    if (node != NULL) {
+        node = node->first_child;
+        while (node) {
+            ALOGV("loadHwModule() loading output %s", node->name);
+            status_t tmpStatus = loadOutput(node, module);
+            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
+                status = tmpStatus;
+            }
+            node = node->next;
+        }
+    }
+    node = config_find(root, INPUTS_TAG);
+    if (node != NULL) {
+        node = node->first_child;
+        while (node) {
+            ALOGV("loadHwModule() loading input %s", node->name);
+            status_t tmpStatus = loadInput(node, module);
+            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
+                status = tmpStatus;
+            }
+            node = node->next;
+        }
+    }
+    if (status == NO_ERROR) {
+        mHwModules.add(module);
+    } else {
+        delete module;
+    }
+}
+
+void AudioPolicyManager::loadHwModules(cnode *root)
+{
+    cnode *node = config_find(root, AUDIO_HW_MODULE_TAG);
+    if (node == NULL) {
+        return;
+    }
+
+    node = node->first_child;
+    while (node) {
+        ALOGV("loadHwModules() loading module %s", node->name);
+        loadHwModule(node);
+        node = node->next;
+    }
+}
+
+void AudioPolicyManager::loadGlobalConfig(cnode *root)
+{
+    cnode *node = config_find(root, GLOBAL_CONFIG_TAG);
+    if (node == NULL) {
+        return;
+    }
+    node = node->first_child;
+    while (node) {
+        if (strcmp(ATTACHED_OUTPUT_DEVICES_TAG, node->name) == 0) {
+            mAvailableOutputDevices.loadDevicesFromType(parseDeviceNames((char *)node->value));
+            ALOGV("loadGlobalConfig() Attached Output Devices %08x",
+                  mAvailableOutputDevices.types());
+        } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
+            audio_devices_t device = (audio_devices_t)stringToEnum(sDeviceNameToEnumTable,
+                                              ARRAY_SIZE(sDeviceNameToEnumTable),
+                                              (char *)node->value);
+            if (device != AUDIO_DEVICE_NONE) {
+                mDefaultOutputDevice = new DeviceDescriptor(device);
+            } else {
+                ALOGW("loadGlobalConfig() default device not specified");
+            }
+            ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", mDefaultOutputDevice->mType);
+        } else if (strcmp(ATTACHED_INPUT_DEVICES_TAG, node->name) == 0) {
+            mAvailableInputDevices.loadDevicesFromType(parseDeviceNames((char *)node->value));
+            ALOGV("loadGlobalConfig() Available InputDevices %08x", mAvailableInputDevices.types());
+        } else if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) {
+            mSpeakerDrcEnabled = stringToBool((char *)node->value);
+            ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", mSpeakerDrcEnabled);
+        }
+        node = node->next;
+    }
+}
+
+status_t AudioPolicyManager::loadAudioPolicyConfig(const char *path)
+{
+    cnode *root;
+    char *data;
+
+    data = (char *)load_file(path, NULL);
+    if (data == NULL) {
+        return -ENODEV;
+    }
+    root = config_node("", "");
+    config_load(root, data);
+
+    loadGlobalConfig(root);
+    loadHwModules(root);
+
+    config_free(root);
+    free(root);
+    free(data);
+
+    ALOGI("loadAudioPolicyConfig() loaded %s\n", path);
+
+    return NO_ERROR;
+}
+
+void AudioPolicyManager::defaultAudioPolicyConfig(void)
+{
+    HwModule *module;
+    IOProfile *profile;
+    sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
+    mAvailableOutputDevices.add(mDefaultOutputDevice);
+    mAvailableInputDevices.add(defaultInputDevice);
+
+    module = new HwModule("primary");
+
+    profile = new IOProfile(module);
+    profile->mSamplingRates.add(44100);
+    profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT);
+    profile->mChannelMasks.add(AUDIO_CHANNEL_OUT_STEREO);
+    profile->mSupportedDevices.add(mDefaultOutputDevice);
+    profile->mFlags = AUDIO_OUTPUT_FLAG_PRIMARY;
+    module->mOutputProfiles.add(profile);
+
+    profile = new IOProfile(module);
+    profile->mSamplingRates.add(8000);
+    profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT);
+    profile->mChannelMasks.add(AUDIO_CHANNEL_IN_MONO);
+    profile->mSupportedDevices.add(defaultInputDevice);
+    module->mInputProfiles.add(profile);
+
+    mHwModules.add(module);
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
new file mode 100644
index 0000000..8a631ba
--- /dev/null
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -0,0 +1,620 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <cutils/config_utils.h>
+#include <cutils/misc.h>
+#include <utils/Timers.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/SortedVector.h>
+#include "AudioPolicyInterface.h"
+
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+// Attenuation applied to STRATEGY_SONIFICATION streams when a headset is connected: 6dB
+#define SONIFICATION_HEADSET_VOLUME_FACTOR 0.5
+// Min volume for STRATEGY_SONIFICATION streams when limited by music volume: -36dB
+#define SONIFICATION_HEADSET_VOLUME_MIN  0.016
+// Time in milliseconds during which we consider that music is still active after a music
+// track was stopped - see computeVolume()
+#define SONIFICATION_HEADSET_MUSIC_DELAY  5000
+// Time in milliseconds after media stopped playing during which we consider that the
+// sonification should be as unobtrusive as during the time media was playing.
+#define SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY 5000
+// Time in milliseconds during witch some streams are muted while the audio path
+// is switched
+#define MUTE_TIME_MS 2000
+
+#define NUM_TEST_OUTPUTS 5
+
+#define NUM_VOL_CURVE_KNEES 2
+
+// Default minimum length allowed for offloading a compressed track
+// Can be overridden by the audio.offload.min.duration.secs property
+#define OFFLOAD_DEFAULT_MIN_DURATION_SECS 60
+
+// ----------------------------------------------------------------------------
+// AudioPolicyManager implements audio policy manager behavior common to all platforms.
+// ----------------------------------------------------------------------------
+
+class AudioPolicyManager: public AudioPolicyInterface
+#ifdef AUDIO_POLICY_TEST
+    , public Thread
+#endif //AUDIO_POLICY_TEST
+{
+
+public:
+                AudioPolicyManager(AudioPolicyClientInterface *clientInterface);
+        virtual ~AudioPolicyManager();
+
+        // AudioPolicyInterface
+        virtual status_t setDeviceConnectionState(audio_devices_t device,
+                                                          audio_policy_dev_state_t state,
+                                                          const char *device_address);
+        virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
+                                                                              const char *device_address);
+        virtual void setPhoneState(audio_mode_t state);
+        virtual void setForceUse(audio_policy_force_use_t usage,
+                                 audio_policy_forced_cfg_t config);
+        virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
+        virtual void setSystemProperty(const char* property, const char* value);
+        virtual status_t initCheck();
+        virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
+                                            uint32_t samplingRate,
+                                            audio_format_t format,
+                                            audio_channel_mask_t channelMask,
+                                            audio_output_flags_t flags,
+                                            const audio_offload_info_t *offloadInfo);
+        virtual status_t startOutput(audio_io_handle_t output,
+                                     audio_stream_type_t stream,
+                                     int session = 0);
+        virtual status_t stopOutput(audio_io_handle_t output,
+                                    audio_stream_type_t stream,
+                                    int session = 0);
+        virtual void releaseOutput(audio_io_handle_t output);
+        virtual audio_io_handle_t getInput(audio_source_t inputSource,
+                                            uint32_t samplingRate,
+                                            audio_format_t format,
+                                            audio_channel_mask_t channelMask,
+                                            audio_in_acoustics_t acoustics);
+
+        // indicates to the audio policy manager that the input starts being used.
+        virtual status_t startInput(audio_io_handle_t input);
+
+        // indicates to the audio policy manager that the input stops being used.
+        virtual status_t stopInput(audio_io_handle_t input);
+        virtual void releaseInput(audio_io_handle_t input);
+        virtual void initStreamVolume(audio_stream_type_t stream,
+                                                    int indexMin,
+                                                    int indexMax);
+        virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
+                                              int index,
+                                              audio_devices_t device);
+        virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
+                                              int *index,
+                                              audio_devices_t device);
+
+        // return the strategy corresponding to a given stream type
+        virtual uint32_t getStrategyForStream(audio_stream_type_t stream);
+
+        // return the enabled output devices for the given stream type
+        virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+
+        virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc = NULL);
+        virtual status_t registerEffect(const effect_descriptor_t *desc,
+                                        audio_io_handle_t io,
+                                        uint32_t strategy,
+                                        int session,
+                                        int id);
+        virtual status_t unregisterEffect(int id);
+        virtual status_t setEffectEnabled(int id, bool enabled);
+
+        virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+        // return whether a stream is playing remotely, override to change the definition of
+        //   local/remote playback, used for instance by notification manager to not make
+        //   media players lose audio focus when not playing locally
+        virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+        virtual bool isSourceActive(audio_source_t source) const;
+
+        virtual status_t dump(int fd);
+
+        virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo);
+
+protected:
+
+        enum routing_strategy {
+            STRATEGY_MEDIA,
+            STRATEGY_PHONE,
+            STRATEGY_SONIFICATION,
+            STRATEGY_SONIFICATION_RESPECTFUL,
+            STRATEGY_DTMF,
+            STRATEGY_ENFORCED_AUDIBLE,
+            NUM_STRATEGIES
+        };
+
+        // 4 points to define the volume attenuation curve, each characterized by the volume
+        // index (from 0 to 100) at which they apply, and the attenuation in dB at that index.
+        // we use 100 steps to avoid rounding errors when computing the volume in volIndexToAmpl()
+
+        enum { VOLMIN = 0, VOLKNEE1 = 1, VOLKNEE2 = 2, VOLMAX = 3, VOLCNT = 4};
+
+        class VolumeCurvePoint
+        {
+        public:
+            int mIndex;
+            float mDBAttenuation;
+        };
+
+        // device categories used for volume curve management.
+        enum device_category {
+            DEVICE_CATEGORY_HEADSET,
+            DEVICE_CATEGORY_SPEAKER,
+            DEVICE_CATEGORY_EARPIECE,
+            DEVICE_CATEGORY_CNT
+        };
+
+        class IOProfile;
+
+        class DeviceDescriptor: public RefBase
+        {
+        public:
+            DeviceDescriptor(audio_devices_t type, String8 address,
+                             audio_channel_mask_t channelMask) :
+                                 mType(type), mAddress(address),
+                                 mChannelMask(channelMask), mId(0) {}
+
+            DeviceDescriptor(audio_devices_t type) :
+                                 mType(type), mAddress(""),
+                                 mChannelMask(0), mId(0) {}
+
+            status_t dump(int fd, int spaces) const;
+            static void dumpHeader(int fd, int spaces);
+
+            bool equals(const sp<DeviceDescriptor>& other) const;
+
+            audio_devices_t mType;
+            String8 mAddress;
+            audio_channel_mask_t mChannelMask;
+            uint32_t mId;
+        };
+
+        class DeviceVector : public SortedVector< sp<DeviceDescriptor> >
+        {
+        public:
+            DeviceVector() : SortedVector(), mTypes(AUDIO_DEVICE_NONE) {}
+
+            ssize_t         add(const sp<DeviceDescriptor>& item);
+            ssize_t         remove(const sp<DeviceDescriptor>& item);
+            ssize_t         indexOf(const sp<DeviceDescriptor>& item) const;
+
+            audio_devices_t types() const { return mTypes; }
+
+            void loadDevicesFromType(audio_devices_t types);
+
+        private:
+            void refreshTypes();
+            audio_devices_t mTypes;
+        };
+
+        class HwModule {
+        public:
+                    HwModule(const char *name);
+                    ~HwModule();
+
+            void dump(int fd);
+
+            const char *const mName; // base name of the audio HW module (primary, a2dp ...)
+            audio_module_handle_t mHandle;
+            Vector <IOProfile *> mOutputProfiles; // output profiles exposed by this module
+            Vector <IOProfile *> mInputProfiles;  // input profiles exposed by this module
+        };
+
+        // the IOProfile class describes the capabilities of an output or input stream.
+        // It is currently assumed that all combination of listed parameters are supported.
+        // It is used by the policy manager to determine if an output or input is suitable for
+        // a given use case,  open/close it accordingly and connect/disconnect audio tracks
+        // to/from it.
+        class IOProfile
+        {
+        public:
+            IOProfile(HwModule *module);
+            ~IOProfile();
+
+            bool isCompatibleProfile(audio_devices_t device,
+                                     uint32_t samplingRate,
+                                     audio_format_t format,
+                                     audio_channel_mask_t channelMask,
+                                     audio_output_flags_t flags) const;
+
+            void dump(int fd);
+
+            // by convention, "0' in the first entry in mSamplingRates, mChannelMasks or mFormats
+            // indicates the supported parameters should be read from the output stream
+            // after it is opened for the first time
+            Vector <uint32_t> mSamplingRates; // supported sampling rates
+            Vector <audio_channel_mask_t> mChannelMasks; // supported channel masks
+            Vector <audio_format_t> mFormats; // supported audio formats
+            DeviceVector  mSupportedDevices; // supported devices
+                                             // (devices this output can be routed to)
+            audio_output_flags_t mFlags; // attribute flags (e.g primary output,
+                                                // direct output...). For outputs only.
+            HwModule *mModule;                     // audio HW module exposing this I/O stream
+        };
+
+        // default volume curve
+        static const VolumeCurvePoint sDefaultVolumeCurve[AudioPolicyManager::VOLCNT];
+        // default volume curve for media strategy
+        static const VolumeCurvePoint sDefaultMediaVolumeCurve[AudioPolicyManager::VOLCNT];
+        // volume curve for media strategy on speakers
+        static const VolumeCurvePoint sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT];
+        // volume curve for sonification strategy on speakers
+        static const VolumeCurvePoint sSpeakerSonificationVolumeCurve[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sSpeakerSonificationVolumeCurveDrc[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sDefaultSystemVolumeCurve[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sDefaultSystemVolumeCurveDrc[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sHeadsetSystemVolumeCurve[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sDefaultVoiceVolumeCurve[AudioPolicyManager::VOLCNT];
+        static const VolumeCurvePoint sSpeakerVoiceVolumeCurve[AudioPolicyManager::VOLCNT];
+        // default volume curves per stream and device category. See initializeVolumeCurves()
+        static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT];
+
+        // descriptor for audio outputs. Used to maintain current configuration of each opened audio output
+        // and keep track of the usage of this output by each audio stream type.
+        class AudioOutputDescriptor
+        {
+        public:
+            AudioOutputDescriptor(const IOProfile *profile);
+
+            status_t    dump(int fd);
+
+            audio_devices_t device() const;
+            void changeRefCount(audio_stream_type_t stream, int delta);
+
+            bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); }
+            audio_devices_t supportedDevices();
+            uint32_t latency();
+            bool sharesHwModuleWith(const AudioOutputDescriptor *outputDesc);
+            bool isActive(uint32_t inPastMs = 0) const;
+            bool isStreamActive(audio_stream_type_t stream,
+                                uint32_t inPastMs = 0,
+                                nsecs_t sysTime = 0) const;
+            bool isStrategyActive(routing_strategy strategy,
+                             uint32_t inPastMs = 0,
+                             nsecs_t sysTime = 0) const;
+
+            audio_io_handle_t mId;              // output handle
+            uint32_t mSamplingRate;             //
+            audio_format_t mFormat;             //
+            audio_channel_mask_t mChannelMask;     // output configuration
+            uint32_t mLatency;                  //
+            audio_output_flags_t mFlags;   //
+            audio_devices_t mDevice;                   // current device this output is routed to
+            uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output
+            nsecs_t mStopTime[AUDIO_STREAM_CNT];
+            AudioOutputDescriptor *mOutput1;    // used by duplicated outputs: first output
+            AudioOutputDescriptor *mOutput2;    // used by duplicated outputs: second output
+            float mCurVolume[AUDIO_STREAM_CNT];   // current stream volume
+            int mMuteCount[AUDIO_STREAM_CNT];     // mute request counter
+            const IOProfile *mProfile;          // I/O profile this output derives from
+            bool mStrategyMutedByDevice[NUM_STRATEGIES]; // strategies muted because of incompatible
+                                                // device selection. See checkDeviceMuteStrategies()
+            uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only)
+        };
+
+        // descriptor for audio inputs. Used to maintain current configuration of each opened audio input
+        // and keep track of the usage of this input.
+        class AudioInputDescriptor
+        {
+        public:
+            AudioInputDescriptor(const IOProfile *profile);
+
+            status_t    dump(int fd);
+
+            uint32_t mSamplingRate;                     //
+            audio_format_t mFormat;                     // input configuration
+            audio_channel_mask_t mChannelMask;             //
+            audio_devices_t mDevice;                    // current device this input is routed to
+            uint32_t mRefCount;                         // number of AudioRecord clients using this output
+            audio_source_t mInputSource;                // input source selected by application (mediarecorder.h)
+            const IOProfile *mProfile;                  // I/O profile this output derives from
+        };
+
+        // stream descriptor used for volume control
+        class StreamDescriptor
+        {
+        public:
+            StreamDescriptor();
+
+            int getVolumeIndex(audio_devices_t device);
+            void dump(int fd);
+
+            int mIndexMin;      // min volume index
+            int mIndexMax;      // max volume index
+            KeyedVector<audio_devices_t, int> mIndexCur;   // current volume index per device
+            bool mCanBeMuted;   // true is the stream can be muted
+
+            const VolumeCurvePoint *mVolumeCurve[DEVICE_CATEGORY_CNT];
+        };
+
+        // stream descriptor used for volume control
+        class EffectDescriptor
+        {
+        public:
+
+            status_t dump(int fd);
+
+            int mIo;                // io the effect is attached to
+            routing_strategy mStrategy; // routing strategy the effect is associated to
+            int mSession;               // audio session the effect is on
+            effect_descriptor_t mDesc;  // effect descriptor
+            bool mEnabled;              // enabled state: CPU load being used or not
+        };
+
+        void addOutput(audio_io_handle_t id, AudioOutputDescriptor *outputDesc);
+
+        // return the strategy corresponding to a given stream type
+        static routing_strategy getStrategy(audio_stream_type_t stream);
+
+        // return appropriate device for streams handled by the specified strategy according to current
+        // phone state, connected devices...
+        // if fromCache is true, the device is returned from mDeviceForStrategy[],
+        // otherwise it is determine by current state
+        // (device connected,phone state, force use, a2dp output...)
+        // This allows to:
+        //  1 speed up process when the state is stable (when starting or stopping an output)
+        //  2 access to either current device selection (fromCache == true) or
+        // "future" device selection (fromCache == false) when called from a context
+        //  where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
+        //  before updateDevicesAndOutputs() is called.
+        virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy,
+                                                     bool fromCache);
+
+        // change the route of the specified output. Returns the number of ms we have slept to
+        // allow new routing to take effect in certain cases.
+        uint32_t setOutputDevice(audio_io_handle_t output,
+                             audio_devices_t device,
+                             bool force = false,
+                             int delayMs = 0);
+
+        // select input device corresponding to requested audio source
+        virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource);
+
+        // return io handle of active input or 0 if no input is active
+        //    Only considers inputs from physical devices (e.g. main mic, headset mic) when
+        //    ignoreVirtualInputs is true.
+        audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
+
+        // initialize volume curves for each strategy and device category
+        void initializeVolumeCurves();
+
+        // compute the actual volume for a given stream according to the requested index and a particular
+        // device
+        virtual float computeVolume(audio_stream_type_t stream, int index,
+                                    audio_io_handle_t output, audio_devices_t device);
+
+        // check that volume change is permitted, compute and send new volume to audio hardware
+        status_t checkAndSetVolume(audio_stream_type_t stream, int index, audio_io_handle_t output,
+                                   audio_devices_t device, int delayMs = 0, bool force = false);
+
+        // apply all stream volumes to the specified output and device
+        void applyStreamVolumes(audio_io_handle_t output, audio_devices_t device, int delayMs = 0, bool force = false);
+
+        // Mute or unmute all streams handled by the specified strategy on the specified output
+        void setStrategyMute(routing_strategy strategy,
+                             bool on,
+                             audio_io_handle_t output,
+                             int delayMs = 0,
+                             audio_devices_t device = (audio_devices_t)0);
+
+        // Mute or unmute the stream on the specified output
+        void setStreamMute(audio_stream_type_t stream,
+                           bool on,
+                           audio_io_handle_t output,
+                           int delayMs = 0,
+                           audio_devices_t device = (audio_devices_t)0);
+
+        // handle special cases for sonification strategy while in call: mute streams or replace by
+        // a special tone in the device used for communication
+        void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
+
+        // true if device is in a telephony or VoIP call
+        virtual bool isInCall();
+
+        // true if given state represents a device in a telephony or VoIP call
+        virtual bool isStateInCall(int state);
+
+        // when a device is connected, checks if an open output can be routed
+        // to this device. If none is open, tries to open one of the available outputs.
+        // Returns an output suitable to this device or 0.
+        // when a device is disconnected, checks if an output is not used any more and
+        // returns its handle if any.
+        // transfers the audio tracks and effects from one output thread to another accordingly.
+        status_t checkOutputsForDevice(audio_devices_t device,
+                                       audio_policy_dev_state_t state,
+                                       SortedVector<audio_io_handle_t>& outputs,
+                                       const String8 address);
+
+        // close an output and its companion duplicating output.
+        void closeOutput(audio_io_handle_t output);
+
+        // checks and if necessary changes outputs used for all strategies.
+        // must be called every time a condition that affects the output choice for a given strategy
+        // changes: connected device, phone state, force use...
+        // Must be called before updateDevicesAndOutputs()
+        void checkOutputForStrategy(routing_strategy strategy);
+
+        // Same as checkOutputForStrategy() but for a all strategies in order of priority
+        void checkOutputForAllStrategies();
+
+        // manages A2DP output suspend/restore according to phone state and BT SCO usage
+        void checkA2dpSuspend();
+
+        // returns the A2DP output handle if it is open or 0 otherwise
+        audio_io_handle_t getA2dpOutput();
+
+        // selects the most appropriate device on output for current state
+        // must be called every time a condition that affects the device choice for a given output is
+        // changed: connected device, phone state, force use, output start, output stop..
+        // see getDeviceForStrategy() for the use of fromCache parameter
+
+        audio_devices_t getNewDevice(audio_io_handle_t output, bool fromCache);
+        // updates cache of device used by all strategies (mDeviceForStrategy[])
+        // must be called every time a condition that affects the device choice for a given strategy is
+        // changed: connected device, phone state, force use...
+        // cached values are used by getDeviceForStrategy() if parameter fromCache is true.
+         // Must be called after checkOutputForAllStrategies()
+
+        void updateDevicesAndOutputs();
+
+        virtual uint32_t getMaxEffectsCpuLoad();
+        virtual uint32_t getMaxEffectsMemory();
+#ifdef AUDIO_POLICY_TEST
+        virtual     bool        threadLoop();
+                    void        exit();
+        int testOutputIndex(audio_io_handle_t output);
+#endif //AUDIO_POLICY_TEST
+
+        status_t setEffectEnabled(EffectDescriptor *pDesc, bool enabled);
+
+        // returns the category the device belongs to with regard to volume curve management
+        static device_category getDeviceCategory(audio_devices_t device);
+
+        // extract one device relevant for volume control from multiple device selection
+        static audio_devices_t getDeviceForVolume(audio_devices_t device);
+
+        SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device,
+                        DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> openOutputs);
+        bool vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
+                                           SortedVector<audio_io_handle_t>& outputs2);
+
+        // mute/unmute strategies using an incompatible device combination
+        // if muting, wait for the audio in pcm buffer to be drained before proceeding
+        // if unmuting, unmute only after the specified delay
+        // Returns the number of ms waited
+        uint32_t  checkDeviceMuteStrategies(AudioOutputDescriptor *outputDesc,
+                                            audio_devices_t prevDevice,
+                                            uint32_t delayMs);
+
+        audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
+                                       audio_output_flags_t flags);
+        IOProfile *getInputProfile(audio_devices_t device,
+                                   uint32_t samplingRate,
+                                   audio_format_t format,
+                                   audio_channel_mask_t channelMask);
+        IOProfile *getProfileForDirectOutput(audio_devices_t device,
+                                                       uint32_t samplingRate,
+                                                       audio_format_t format,
+                                                       audio_channel_mask_t channelMask,
+                                                       audio_output_flags_t flags);
+
+        audio_io_handle_t selectOutputForEffects(const SortedVector<audio_io_handle_t>& outputs);
+
+        bool isNonOffloadableEffectEnabled();
+
+        //
+        // Audio policy configuration file parsing (audio_policy.conf)
+        //
+        static uint32_t stringToEnum(const struct StringToEnum *table,
+                                     size_t size,
+                                     const char *name);
+        static const char *enumToString(const struct StringToEnum *table,
+                                      size_t size,
+                                      uint32_t value);
+        static bool stringToBool(const char *value);
+        static audio_output_flags_t parseFlagNames(char *name);
+        static audio_devices_t parseDeviceNames(char *name);
+        void loadSamplingRates(char *name, IOProfile *profile);
+        void loadFormats(char *name, IOProfile *profile);
+        void loadOutChannels(char *name, IOProfile *profile);
+        void loadInChannels(char *name, IOProfile *profile);
+        status_t loadOutput(cnode *root,  HwModule *module);
+        status_t loadInput(cnode *root,  HwModule *module);
+        void loadHwModule(cnode *root);
+        void loadHwModules(cnode *root);
+        void loadGlobalConfig(cnode *root);
+        status_t loadAudioPolicyConfig(const char *path);
+        void defaultAudioPolicyConfig(void);
+
+
+        AudioPolicyClientInterface *mpClientInterface;  // audio policy client interface
+        audio_io_handle_t mPrimaryOutput;              // primary output handle
+        // list of descriptors for outputs currently opened
+        DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> mOutputs;
+        // copy of mOutputs before setDeviceConnectionState() opens new outputs
+        // reset to mOutputs when updateDevicesAndOutputs() is called.
+        DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> mPreviousOutputs;
+        DefaultKeyedVector<audio_io_handle_t, AudioInputDescriptor *> mInputs;     // list of input descriptors
+        DeviceVector  mAvailableOutputDevices; // bit field of all available output devices
+        DeviceVector  mAvailableInputDevices; // bit field of all available input devices
+                                                // without AUDIO_DEVICE_BIT_IN to allow direct bit
+                                                // field comparisons
+        int mPhoneState;                                                    // current phone state
+        audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT];   // current forced use configuration
+
+        StreamDescriptor mStreams[AUDIO_STREAM_CNT];           // stream descriptors for volume control
+        bool    mLimitRingtoneVolume;                                       // limit ringtone volume to music volume if headset connected
+        audio_devices_t mDeviceForStrategy[NUM_STRATEGIES];
+        float   mLastVoiceVolume;                                           // last voice volume value sent to audio HAL
+
+        // Maximum CPU load allocated to audio effects in 0.1 MIPS (ARMv5TE, 0 WS memory) units
+        static const uint32_t MAX_EFFECTS_CPU_LOAD = 1000;
+        // Maximum memory allocated to audio effects in KB
+        static const uint32_t MAX_EFFECTS_MEMORY = 512;
+        uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects
+        uint32_t mTotalEffectsMemory;  // current memory used by effects
+        KeyedVector<int, EffectDescriptor *> mEffects;  // list of registered audio effects
+        bool    mA2dpSuspended;  // true if A2DP output is suspended
+        sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
+        bool mSpeakerDrcEnabled;// true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER path
+                                // to boost soft sounds, used to adjust volume curves accordingly
+
+        Vector <HwModule *> mHwModules;
+        volatile int32_t mNextUniqueId;
+
+#ifdef AUDIO_POLICY_TEST
+        Mutex   mLock;
+        Condition mWaitWorkCV;
+
+        int             mCurOutput;
+        bool            mDirectOutput;
+        audio_io_handle_t mTestOutputs[NUM_TEST_OUTPUTS];
+        int             mTestInput;
+        uint32_t        mTestDevice;
+        uint32_t        mTestSamplingRate;
+        uint32_t        mTestFormat;
+        uint32_t        mTestChannels;
+        uint32_t        mTestLatencyMs;
+#endif //AUDIO_POLICY_TEST
+
+private:
+        static float volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc,
+                int indexInUi);
+        // updates device caching and output for streams that can influence the
+        //    routing of notifications
+        void handleNotificationRoutingForStream(audio_stream_type_t stream);
+        static bool isVirtualInputDevice(audio_devices_t device);
+        uint32_t nextUniqueId();
+        // converts device address to string sent to audio HAL via setParameters
+        static String8 addressToParameter(audio_devices_t device, const String8 address);
+};
+
+};
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp
similarity index 63%
rename from services/audioflinger/AudioPolicyService.cpp
rename to services/audiopolicy/AudioPolicyService.cpp
index a37272d..4a708a0 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audiopolicy/AudioPolicyService.cpp
@@ -60,7 +60,8 @@
 // ----------------------------------------------------------------------------
 
 AudioPolicyService::AudioPolicyService()
-    : BnAudioPolicyService() , mpAudioPolicyDev(NULL) , mpAudioPolicy(NULL)
+    : BnAudioPolicyService(), mpAudioPolicyDev(NULL), mpAudioPolicy(NULL),
+      mAudioPolicyManager(NULL), mAudioPolicyClient(NULL)
 {
     char value[PROPERTY_VALUE_MAX];
     const struct hw_module_t *module;
@@ -75,28 +76,40 @@
     mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
     // start output activity command thread
     mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
+
+#ifdef USE_LEGACY_AUDIO_POLICY
+    ALOGI("AudioPolicyService CSTOR in legacy mode");
+
     /* instantiate the audio policy manager */
     rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
-    if (rc)
+    if (rc) {
         return;
-
+    }
     rc = audio_policy_dev_open(module, &mpAudioPolicyDev);
     ALOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc));
-    if (rc)
+    if (rc) {
         return;
+    }
 
     rc = mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev, &aps_ops, this,
                                                &mpAudioPolicy);
     ALOGE_IF(rc, "couldn't create audio policy (%s)", strerror(-rc));
-    if (rc)
+    if (rc) {
         return;
+    }
 
     rc = mpAudioPolicy->init_check(mpAudioPolicy);
     ALOGE_IF(rc, "couldn't init_check the audio policy (%s)", strerror(-rc));
-    if (rc)
+    if (rc) {
         return;
-
+    }
     ALOGI("Loaded audio policy from %s (%s)", module->name, module->id);
+#else
+    ALOGI("AudioPolicyService CSTOR in new mode");
+
+    mAudioPolicyClient = new AudioPolicyClient(this);
+    mAudioPolicyManager = new AudioPolicyManager(mAudioPolicyClient);
+#endif
 
     // load audio pre processing modules
     if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
@@ -126,450 +139,19 @@
     }
     mInputs.clear();
 
-    if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL)
+#ifdef USE_LEGACY_AUDIO_POLICY
+    if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) {
         mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
-    if (mpAudioPolicyDev != NULL)
+    }
+    if (mpAudioPolicyDev != NULL) {
         audio_policy_dev_close(mpAudioPolicyDev);
+    }
+#else
+    delete mAudioPolicyManager;
+    delete mAudioPolicyClient;
+#endif
 }
 
-status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
-                                                  audio_policy_dev_state_t state,
-                                                  const char *device_address)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!settingsAllowed()) {
-        return PERMISSION_DENIED;
-    }
-    if (!audio_is_output_device(device) && !audio_is_input_device(device)) {
-        return BAD_VALUE;
-    }
-    if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE &&
-            state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
-        return BAD_VALUE;
-    }
-
-    ALOGV("setDeviceConnectionState()");
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device,
-                                                      state, device_address);
-}
-
-audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
-                                                              audio_devices_t device,
-                                                              const char *device_address)
-{
-    if (mpAudioPolicy == NULL) {
-        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
-    }
-    return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device,
-                                                      device_address);
-}
-
-status_t AudioPolicyService::setPhoneState(audio_mode_t state)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!settingsAllowed()) {
-        return PERMISSION_DENIED;
-    }
-    if (uint32_t(state) >= AUDIO_MODE_CNT) {
-        return BAD_VALUE;
-    }
-
-    ALOGV("setPhoneState()");
-
-    // TODO: check if it is more appropriate to do it in platform specific policy manager
-    AudioSystem::setMode(state);
-
-    Mutex::Autolock _l(mLock);
-    mpAudioPolicy->set_phone_state(mpAudioPolicy, state);
-    return NO_ERROR;
-}
-
-status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
-                                         audio_policy_forced_cfg_t config)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!settingsAllowed()) {
-        return PERMISSION_DENIED;
-    }
-    if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
-        return BAD_VALUE;
-    }
-    if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) {
-        return BAD_VALUE;
-    }
-    ALOGV("setForceUse()");
-    Mutex::Autolock _l(mLock);
-    mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config);
-    return NO_ERROR;
-}
-
-audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage)
-{
-    if (mpAudioPolicy == NULL) {
-        return AUDIO_POLICY_FORCE_NONE;
-    }
-    if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
-        return AUDIO_POLICY_FORCE_NONE;
-    }
-    return mpAudioPolicy->get_force_use(mpAudioPolicy, usage);
-}
-
-audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
-                                    uint32_t samplingRate,
-                                    audio_format_t format,
-                                    audio_channel_mask_t channelMask,
-                                    audio_output_flags_t flags,
-                                    const audio_offload_info_t *offloadInfo)
-{
-    if (mpAudioPolicy == NULL) {
-        return 0;
-    }
-    ALOGV("getOutput()");
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate,
-                                    format, channelMask, flags, offloadInfo);
-}
-
-status_t AudioPolicyService::startOutput(audio_io_handle_t output,
-                                         audio_stream_type_t stream,
-                                         int session)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    ALOGV("startOutput()");
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session);
-}
-
-status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
-                                        audio_stream_type_t stream,
-                                        int session)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    ALOGV("stopOutput()");
-    mOutputCommandThread->stopOutputCommand(output, stream, session);
-    return NO_ERROR;
-}
-
-status_t  AudioPolicyService::doStopOutput(audio_io_handle_t output,
-                                      audio_stream_type_t stream,
-                                      int session)
-{
-    ALOGV("doStopOutput from tid %d", gettid());
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
-}
-
-void AudioPolicyService::releaseOutput(audio_io_handle_t output)
-{
-    if (mpAudioPolicy == NULL) {
-        return;
-    }
-    ALOGV("releaseOutput()");
-    mOutputCommandThread->releaseOutputCommand(output);
-}
-
-void AudioPolicyService::doReleaseOutput(audio_io_handle_t output)
-{
-    ALOGV("doReleaseOutput from tid %d", gettid());
-    Mutex::Autolock _l(mLock);
-    mpAudioPolicy->release_output(mpAudioPolicy, output);
-}
-
-audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource,
-                                    uint32_t samplingRate,
-                                    audio_format_t format,
-                                    audio_channel_mask_t channelMask,
-                                    int audioSession)
-{
-    if (mpAudioPolicy == NULL) {
-        return 0;
-    }
-    // already checked by client, but double-check in case the client wrapper is bypassed
-    if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD) {
-        return 0;
-    }
-
-    if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
-        return 0;
-    }
-
-    Mutex::Autolock _l(mLock);
-    // the audio_in_acoustics_t parameter is ignored by get_input()
-    audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
-                                                   format, channelMask, (audio_in_acoustics_t) 0);
-
-    if (input == 0) {
-        return input;
-    }
-    // create audio pre processors according to input source
-    audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ?
-                                    AUDIO_SOURCE_VOICE_RECOGNITION : inputSource;
-
-    ssize_t index = mInputSources.indexOfKey(aliasSource);
-    if (index < 0) {
-        return input;
-    }
-    ssize_t idx = mInputs.indexOfKey(input);
-    InputDesc *inputDesc;
-    if (idx < 0) {
-        inputDesc = new InputDesc(audioSession);
-        mInputs.add(input, inputDesc);
-    } else {
-        inputDesc = mInputs.valueAt(idx);
-    }
-
-    Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
-    for (size_t i = 0; i < effects.size(); i++) {
-        EffectDesc *effect = effects[i];
-        sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input);
-        status_t status = fx->initCheck();
-        if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGW("Failed to create Fx %s on input %d", effect->mName, input);
-            // fx goes out of scope and strong ref on AudioEffect is released
-            continue;
-        }
-        for (size_t j = 0; j < effect->mParams.size(); j++) {
-            fx->setParameter(effect->mParams[j]);
-        }
-        inputDesc->mEffects.add(fx);
-    }
-    setPreProcessorEnabled(inputDesc, true);
-    return input;
-}
-
-status_t AudioPolicyService::startInput(audio_io_handle_t input)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    Mutex::Autolock _l(mLock);
-
-    return mpAudioPolicy->start_input(mpAudioPolicy, input);
-}
-
-status_t AudioPolicyService::stopInput(audio_io_handle_t input)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    Mutex::Autolock _l(mLock);
-
-    return mpAudioPolicy->stop_input(mpAudioPolicy, input);
-}
-
-void AudioPolicyService::releaseInput(audio_io_handle_t input)
-{
-    if (mpAudioPolicy == NULL) {
-        return;
-    }
-    Mutex::Autolock _l(mLock);
-    mpAudioPolicy->release_input(mpAudioPolicy, input);
-
-    ssize_t index = mInputs.indexOfKey(input);
-    if (index < 0) {
-        return;
-    }
-    InputDesc *inputDesc = mInputs.valueAt(index);
-    setPreProcessorEnabled(inputDesc, false);
-    delete inputDesc;
-    mInputs.removeItemsAt(index);
-}
-
-status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream,
-                                            int indexMin,
-                                            int indexMax)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!settingsAllowed()) {
-        return PERMISSION_DENIED;
-    }
-    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
-        return BAD_VALUE;
-    }
-    Mutex::Autolock _l(mLock);
-    mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax);
-    return NO_ERROR;
-}
-
-status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream,
-                                                  int index,
-                                                  audio_devices_t device)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!settingsAllowed()) {
-        return PERMISSION_DENIED;
-    }
-    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
-        return BAD_VALUE;
-    }
-    Mutex::Autolock _l(mLock);
-    if (mpAudioPolicy->set_stream_volume_index_for_device) {
-        return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy,
-                                                                stream,
-                                                                index,
-                                                                device);
-    } else {
-        return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
-    }
-}
-
-status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream,
-                                                  int *index,
-                                                  audio_devices_t device)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
-        return BAD_VALUE;
-    }
-    Mutex::Autolock _l(mLock);
-    if (mpAudioPolicy->get_stream_volume_index_for_device) {
-        return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy,
-                                                                stream,
-                                                                index,
-                                                                device);
-    } else {
-        return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
-    }
-}
-
-uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
-{
-    if (mpAudioPolicy == NULL) {
-        return 0;
-    }
-    return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream);
-}
-
-//audio policy: use audio_device_t appropriately
-
-audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
-{
-    if (mpAudioPolicy == NULL) {
-        return (audio_devices_t)0;
-    }
-    return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
-}
-
-audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc);
-}
-
-status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
-                                audio_io_handle_t io,
-                                uint32_t strategy,
-                                int session,
-                                int id)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id);
-}
-
-status_t AudioPolicyService::unregisterEffect(int id)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    return mpAudioPolicy->unregister_effect(mpAudioPolicy, id);
-}
-
-status_t AudioPolicyService::setEffectEnabled(int id, bool enabled)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled);
-}
-
-bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
-{
-    if (mpAudioPolicy == NULL) {
-        return 0;
-    }
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
-}
-
-bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
-{
-    if (mpAudioPolicy == NULL) {
-        return 0;
-    }
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs);
-}
-
-bool AudioPolicyService::isSourceActive(audio_source_t source) const
-{
-    if (mpAudioPolicy == NULL) {
-        return false;
-    }
-    if (mpAudioPolicy->is_source_active == 0) {
-        return false;
-    }
-    Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->is_source_active(mpAudioPolicy, source);
-}
-
-status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
-                                                       effect_descriptor_t *descriptors,
-                                                       uint32_t *count)
-{
-
-    if (mpAudioPolicy == NULL) {
-        *count = 0;
-        return NO_INIT;
-    }
-    Mutex::Autolock _l(mLock);
-    status_t status = NO_ERROR;
-
-    size_t index;
-    for (index = 0; index < mInputs.size(); index++) {
-        if (mInputs.valueAt(index)->mSessionId == audioSession) {
-            break;
-        }
-    }
-    if (index == mInputs.size()) {
-        *count = 0;
-        return BAD_VALUE;
-    }
-    Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects;
-
-    for (size_t i = 0; i < effects.size(); i++) {
-        effect_descriptor_t desc = effects[i]->descriptor();
-        if (i < *count) {
-            descriptors[i] = desc;
-        }
-    }
-    if (effects.size() > *count) {
-        status = NO_MEMORY;
-    }
-    *count = effects.size();
-    return status;
-}
 
 void AudioPolicyService::binderDied(const wp<IBinder>& who) {
     ALOGW("binderDied() %p, calling pid %d", who.unsafe_get(),
@@ -595,7 +177,11 @@
     char buffer[SIZE];
     String8 result;
 
+#ifdef USE_LEGACY_AUDIO_POLICY
     snprintf(buffer, SIZE, "PolicyManager Interface: %p\n", mpAudioPolicy);
+#else
+    snprintf(buffer, SIZE, "AudioPolicyManager: %p\n", mAudioPolicyManager);
+#endif
     result.append(buffer);
     snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
     result.append(buffer);
@@ -606,7 +192,7 @@
     return NO_ERROR;
 }
 
-status_t AudioPolicyService::dump(int fd, const Vector<String16>& args)
+status_t AudioPolicyService::dump(int fd, const Vector<String16>& args __unused)
 {
     if (!dumpAllowed()) {
         dumpPermissionDenial(fd);
@@ -625,9 +211,15 @@
             mTonePlaybackThread->dump(fd);
         }
 
+#ifdef USE_LEGACY_AUDIO_POLICY
         if (mpAudioPolicy) {
             mpAudioPolicy->dump(mpAudioPolicy, fd);
         }
+#else
+        if (mAudioPolicyManager) {
+            mAudioPolicyManager->dump(fd);
+        }
+#endif
 
         if (locked) mLock.unlock();
     }
@@ -1114,11 +706,13 @@
 int AudioPolicyService::startTone(audio_policy_tone_t tone,
                                   audio_stream_type_t stream)
 {
-    if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION)
+    if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) {
         ALOGE("startTone: illegal tone requested (%d)", tone);
-    if (stream != AUDIO_STREAM_VOICE_CALL)
+    }
+    if (stream != AUDIO_STREAM_VOICE_CALL) {
         ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
             tone);
+    }
     mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
                                           AUDIO_STREAM_VOICE_CALL);
     return 0;
@@ -1135,21 +729,6 @@
     return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
 }
 
-bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
-{
-    if (mpAudioPolicy == NULL) {
-        ALOGV("mpAudioPolicy == NULL");
-        return false;
-    }
-
-    if (mpAudioPolicy->is_offload_supported == NULL) {
-        ALOGV("HAL does not implement is_offload_supported");
-        return false;
-    }
-
-    return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
-}
-
 // ----------------------------------------------------------------------------
 // Audio pre-processing configuration
 // ----------------------------------------------------------------------------
@@ -1448,42 +1027,18 @@
     return NO_ERROR;
 }
 
-/* implementation of the interface to the policy manager */
 extern "C" {
-
-
-static audio_module_handle_t aps_load_hw_module(void *service,
-                                             const char *name)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-
-    return af->loadHwModule(name);
-}
-
-// deprecated: replaced by aps_open_output_on_module()
-static audio_io_handle_t aps_open_output(void *service,
+audio_module_handle_t aps_load_hw_module(void *service __unused,
+                                             const char *name);
+audio_io_handle_t aps_open_output(void *service __unused,
                                          audio_devices_t *pDevices,
                                          uint32_t *pSamplingRate,
                                          audio_format_t *pFormat,
                                          audio_channel_mask_t *pChannelMask,
                                          uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
+                                         audio_output_flags_t flags);
 
-    return af->openOutput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
-                          pLatencyMs, flags);
-}
-
-static audio_io_handle_t aps_open_output_on_module(void *service,
+audio_io_handle_t aps_open_output_on_module(void *service __unused,
                                                    audio_module_handle_t module,
                                                    audio_devices_t *pDevices,
                                                    uint32_t *pSamplingRate,
@@ -1491,192 +1046,63 @@
                                                    audio_channel_mask_t *pChannelMask,
                                                    uint32_t *pLatencyMs,
                                                    audio_output_flags_t flags,
-                                                   const audio_offload_info_t *offloadInfo)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-    return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
-                          pLatencyMs, flags, offloadInfo);
-}
-
-static audio_io_handle_t aps_open_dup_output(void *service,
+                                                   const audio_offload_info_t *offloadInfo);
+audio_io_handle_t aps_open_dup_output(void *service __unused,
                                                  audio_io_handle_t output1,
-                                                 audio_io_handle_t output2)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-    return af->openDuplicateOutput(output1, output2);
-}
-
-static int aps_close_output(void *service, audio_io_handle_t output)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
-        return PERMISSION_DENIED;
-
-    return af->closeOutput(output);
-}
-
-static int aps_suspend_output(void *service, audio_io_handle_t output)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return PERMISSION_DENIED;
-    }
-
-    return af->suspendOutput(output);
-}
-
-static int aps_restore_output(void *service, audio_io_handle_t output)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return PERMISSION_DENIED;
-    }
-
-    return af->restoreOutput(output);
-}
-
-// deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored
-static audio_io_handle_t aps_open_input(void *service,
+                                                 audio_io_handle_t output2);
+int aps_close_output(void *service __unused, audio_io_handle_t output);
+int aps_suspend_output(void *service __unused, audio_io_handle_t output);
+int aps_restore_output(void *service __unused, audio_io_handle_t output);
+audio_io_handle_t aps_open_input(void *service __unused,
                                         audio_devices_t *pDevices,
                                         uint32_t *pSamplingRate,
                                         audio_format_t *pFormat,
                                         audio_channel_mask_t *pChannelMask,
-                                        audio_in_acoustics_t acoustics)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-
-    return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
-}
-
-static audio_io_handle_t aps_open_input_on_module(void *service,
+                                        audio_in_acoustics_t acoustics __unused);
+audio_io_handle_t aps_open_input_on_module(void *service __unused,
                                                   audio_module_handle_t module,
                                                   audio_devices_t *pDevices,
                                                   uint32_t *pSamplingRate,
                                                   audio_format_t *pFormat,
-                                                  audio_channel_mask_t *pChannelMask)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-
-    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
-}
-
-static int aps_close_input(void *service, audio_io_handle_t input)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
-        return PERMISSION_DENIED;
-
-    return af->closeInput(input);
-}
-
-static int aps_set_stream_output(void *service, audio_stream_type_t stream,
-                                     audio_io_handle_t output)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
-        return PERMISSION_DENIED;
-
-    return af->setStreamOutput(stream, output);
-}
-
-static int aps_move_effects(void *service, int session,
+                                                  audio_channel_mask_t *pChannelMask);
+int aps_close_input(void *service __unused, audio_io_handle_t input);
+int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream);
+int aps_move_effects(void *service __unused, int session,
                                 audio_io_handle_t src_output,
-                                audio_io_handle_t dst_output)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
-        return PERMISSION_DENIED;
-
-    return af->moveEffects(session, src_output, dst_output);
-}
-
-static char * aps_get_parameters(void *service, audio_io_handle_t io_handle,
-                                     const char *keys)
-{
-    String8 result = AudioSystem::getParameters(io_handle, String8(keys));
-    return strdup(result.string());
-}
-
-static void aps_set_parameters(void *service, audio_io_handle_t io_handle,
-                                   const char *kv_pairs, int delay_ms)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms);
-}
-
-static int aps_set_stream_volume(void *service, audio_stream_type_t stream,
+                                audio_io_handle_t dst_output);
+char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle,
+                                     const char *keys);
+void aps_set_parameters(void *service, audio_io_handle_t io_handle,
+                                   const char *kv_pairs, int delay_ms);
+int aps_set_stream_volume(void *service, audio_stream_type_t stream,
                                      float volume, audio_io_handle_t output,
-                                     int delay_ms)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->setStreamVolume(stream, volume, output,
-                                               delay_ms);
-}
-
-static int aps_start_tone(void *service, audio_policy_tone_t tone,
-                              audio_stream_type_t stream)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->startTone(tone, stream);
-}
-
-static int aps_stop_tone(void *service)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->stopTone();
-}
-
-static int aps_set_voice_volume(void *service, float volume, int delay_ms)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->setVoiceVolume(volume, delay_ms);
-}
-
-}; // extern "C"
+                                     int delay_ms);
+int aps_start_tone(void *service, audio_policy_tone_t tone,
+                              audio_stream_type_t stream);
+int aps_stop_tone(void *service);
+int aps_set_voice_volume(void *service, float volume, int delay_ms);
+};
 
 namespace {
     struct audio_policy_service_ops aps_ops = {
-        open_output           : aps_open_output,
-        open_duplicate_output : aps_open_dup_output,
-        close_output          : aps_close_output,
-        suspend_output        : aps_suspend_output,
-        restore_output        : aps_restore_output,
-        open_input            : aps_open_input,
-        close_input           : aps_close_input,
-        set_stream_volume     : aps_set_stream_volume,
-        set_stream_output     : aps_set_stream_output,
-        set_parameters        : aps_set_parameters,
-        get_parameters        : aps_get_parameters,
-        start_tone            : aps_start_tone,
-        stop_tone             : aps_stop_tone,
-        set_voice_volume      : aps_set_voice_volume,
-        move_effects          : aps_move_effects,
-        load_hw_module        : aps_load_hw_module,
-        open_output_on_module : aps_open_output_on_module,
-        open_input_on_module  : aps_open_input_on_module,
+        .open_output           = aps_open_output,
+        .open_duplicate_output = aps_open_dup_output,
+        .close_output          = aps_close_output,
+        .suspend_output        = aps_suspend_output,
+        .restore_output        = aps_restore_output,
+        .open_input            = aps_open_input,
+        .close_input           = aps_close_input,
+        .set_stream_volume     = aps_set_stream_volume,
+        .invalidate_stream     = aps_invalidate_stream,
+        .set_parameters        = aps_set_parameters,
+        .get_parameters        = aps_get_parameters,
+        .start_tone            = aps_start_tone,
+        .stop_tone             = aps_stop_tone,
+        .set_voice_volume      = aps_set_voice_volume,
+        .move_effects          = aps_move_effects,
+        .load_hw_module        = aps_load_hw_module,
+        .open_output_on_module = aps_open_output_on_module,
+        .open_input_on_module  = aps_open_input_on_module,
     };
 }; // namespace <unnamed>
 
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
similarity index 75%
rename from services/audioflinger/AudioPolicyService.h
rename to services/audiopolicy/AudioPolicyService.h
index ae053a9..cdc90d0 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -30,6 +30,8 @@
 #include <media/IAudioPolicyService.h>
 #include <media/ToneGenerator.h>
 #include <media/AudioEffect.h>
+#include <hardware_legacy/AudioPolicyInterface.h>
+#include "AudioPolicyManager.h"
 
 namespace android {
 
@@ -38,7 +40,6 @@
 class AudioPolicyService :
     public BinderService<AudioPolicyService>,
     public BnAudioPolicyService,
-//    public AudioPolicyClientInterface,
     public IBinder::DeathRecipient
 {
     friend class BinderService<AudioPolicyService>;
@@ -313,6 +314,91 @@
         Vector< sp<AudioEffect> >mEffects;
     };
 
+    class AudioPolicyClient : public AudioPolicyClientInterface
+    {
+     public:
+        AudioPolicyClient(AudioPolicyService *service) : mAudioPolicyService(service) {}
+        virtual ~AudioPolicyClient() {}
+
+        //
+        // Audio HW module functions
+        //
+
+        // loads a HW module.
+        virtual audio_module_handle_t loadHwModule(const char *name);
+
+        //
+        // Audio output Control functions
+        //
+
+        // opens an audio output with the requested parameters. The parameter values can indicate to use the default values
+        // in case the audio policy manager has no specific requirements for the output being opened.
+        // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream.
+        // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
+        virtual audio_io_handle_t openOutput(audio_module_handle_t module,
+                                             audio_devices_t *pDevices,
+                                             uint32_t *pSamplingRate,
+                                             audio_format_t *pFormat,
+                                             audio_channel_mask_t *pChannelMask,
+                                             uint32_t *pLatencyMs,
+                                             audio_output_flags_t flags,
+                                             const audio_offload_info_t *offloadInfo = NULL);
+        // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
+        // a special mixer thread in the AudioFlinger.
+        virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2);
+        // closes the output stream
+        virtual status_t closeOutput(audio_io_handle_t output);
+        // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in
+        // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded.
+        virtual status_t suspendOutput(audio_io_handle_t output);
+        // restores a suspended output.
+        virtual status_t restoreOutput(audio_io_handle_t output);
+
+        //
+        // Audio input Control functions
+        //
+
+        // opens an audio input
+        virtual audio_io_handle_t openInput(audio_module_handle_t module,
+                                            audio_devices_t *pDevices,
+                                            uint32_t *pSamplingRate,
+                                            audio_format_t *pFormat,
+                                            audio_channel_mask_t *pChannelMask);
+        // closes an audio input
+        virtual status_t closeInput(audio_io_handle_t input);
+        //
+        // misc control functions
+        //
+
+        // set a stream volume for a particular output. For the same user setting, a given stream type can have different volumes
+        // for each output (destination device) it is attached to.
+        virtual status_t setStreamVolume(audio_stream_type_t stream, float volume, audio_io_handle_t output, int delayMs = 0);
+
+        // invalidate a stream type, causing a reroute to an unspecified new output
+        virtual status_t invalidateStream(audio_stream_type_t stream);
+
+        // function enabling to send proprietary informations directly from audio policy manager to audio hardware interface.
+        virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0);
+        // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
+        virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
+
+        // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
+        // over a telephony device during a phone call.
+        virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
+        virtual status_t stopTone();
+
+        // set down link audio volume.
+        virtual status_t setVoiceVolume(float volume, int delayMs = 0);
+
+        // move effect to the specified output
+        virtual status_t moveEffects(int session,
+                                         audio_io_handle_t srcOutput,
+                                         audio_io_handle_t dstOutput);
+
+     private:
+        AudioPolicyService *mAudioPolicyService;
+    };
+
     static const char * const kInputSourceNames[AUDIO_SOURCE_CNT -1];
 
     void setPreProcessorEnabled(const InputDesc *inputDesc, bool enabled);
@@ -344,6 +430,9 @@
     sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
     struct audio_policy_device *mpAudioPolicyDev;
     struct audio_policy *mpAudioPolicy;
+    AudioPolicyManager *mAudioPolicyManager;
+    AudioPolicyClient *mAudioPolicyClient;
+
     KeyedVector< audio_source_t, InputSourceDesc* > mInputSources;
     KeyedVector< audio_io_handle_t, InputDesc* > mInputs;
 };
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 51ba698..2f485b9 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -1,3 +1,17 @@
+# Copyright 2010 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 LOCAL_PATH:= $(call my-dir)
 
 #
@@ -53,6 +67,7 @@
 
 LOCAL_C_INCLUDES += \
     system/media/camera/include \
+    system/media/private/camera/include \
     external/jpeg
 
 
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.cpp b/services/camera/libcameraservice/CameraDeviceFactory.cpp
index 7fdf304..bfef50e 100644
--- a/services/camera/libcameraservice/CameraDeviceFactory.cpp
+++ b/services/camera/libcameraservice/CameraDeviceFactory.cpp
@@ -46,6 +46,8 @@
             device = new Camera2Device(cameraId);
             break;
         case CAMERA_DEVICE_API_VERSION_3_0:
+        case CAMERA_DEVICE_API_VERSION_3_1:
+        case CAMERA_DEVICE_API_VERSION_3_2:
             device = new Camera3Device(cameraId);
             break;
         default:
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 9ce7daf..02bca1f 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1,24 +1,24 @@
 /*
-**
-** Copyright (C) 2008, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 #define LOG_TAG "CameraService"
 //#define LOG_NDEBUG 0
 
 #include <stdio.h>
+#include <string.h>
 #include <sys/types.h>
 #include <pthread.h>
 
@@ -32,10 +32,13 @@
 #include <gui/Surface.h>
 #include <hardware/hardware.h>
 #include <media/AudioSystem.h>
+#include <media/IMediaHTTPService.h>
 #include <media/mediaplayer.h>
 #include <utils/Errors.h>
 #include <utils/Log.h>
 #include <utils/String16.h>
+#include <utils/Trace.h>
+#include <system/camera_vendor_tags.h>
 
 #include "CameraService.h"
 #include "api1/CameraClient.h"
@@ -130,6 +133,12 @@
             mModule->set_callbacks(this);
         }
 
+        VendorTagDescriptor::clearGlobalVendorTagDescriptor();
+
+        if (mModule->common.module_api_version >= CAMERA_MODULE_API_VERSION_2_2) {
+            setUpVendorTags();
+        }
+
         CameraDeviceFactory::registerService(this);
     }
 }
@@ -141,6 +150,7 @@
         }
     }
 
+    VendorTagDescriptor::clearGlobalVendorTagDescriptor();
     gCameraService = NULL;
 }
 
@@ -269,6 +279,22 @@
     return ret;
 }
 
+status_t CameraService::getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) {
+    if (!mModule) {
+        ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
+        return -ENODEV;
+    }
+
+    if (mModule->common.module_api_version < CAMERA_MODULE_API_VERSION_2_2) {
+        // TODO: Remove this check once HAL1 shim is in place.
+        ALOGW("%s: Only HAL module version V2.2 or higher supports vendor tags", __FUNCTION__);
+        return -EOPNOTSUPP;
+    }
+
+    desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
+    return OK;
+}
+
 int CameraService::getDeviceVersion(int cameraId, int* facing) {
     struct camera_info info;
     if (mModule->get_camera_info(cameraId, &info) != OK) {
@@ -298,6 +324,8 @@
       case CAMERA_DEVICE_API_VERSION_2_0:
       case CAMERA_DEVICE_API_VERSION_2_1:
       case CAMERA_DEVICE_API_VERSION_3_0:
+      case CAMERA_DEVICE_API_VERSION_3_1:
+      case CAMERA_DEVICE_API_VERSION_3_2:
         return true;
       default:
         return false;
@@ -306,6 +334,44 @@
     return false;
 }
 
+bool CameraService::setUpVendorTags() {
+    vendor_tag_ops_t vOps = vendor_tag_ops_t();
+
+    // Check if vendor operations have been implemented
+    if (mModule->get_vendor_tag_ops == NULL) {
+        ALOGI("%s: No vendor tags defined for this device.", __FUNCTION__);
+        return false;
+    }
+
+    ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops");
+    mModule->get_vendor_tag_ops(&vOps);
+    ATRACE_END();
+
+    // Ensure all vendor operations are present
+    if (vOps.get_tag_count == NULL || vOps.get_all_tags == NULL ||
+            vOps.get_section_name == NULL || vOps.get_tag_name == NULL ||
+            vOps.get_tag_type == NULL) {
+        ALOGE("%s: Vendor tag operations not fully defined. Ignoring definitions."
+               , __FUNCTION__);
+        return false;
+    }
+
+    // Read all vendor tag definitions into a descriptor
+    sp<VendorTagDescriptor> desc;
+    status_t res;
+    if ((res = VendorTagDescriptor::createDescriptorFromOps(&vOps, /*out*/desc))
+            != OK) {
+        ALOGE("%s: Could not generate descriptor from vendor tag operations,"
+              "received error %s (%d). Camera clients will not be able to use"
+              "vendor tags", __FUNCTION__, strerror(res), res);
+        return false;
+    }
+
+    // Set the global descriptor to use with camera metadata
+    VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
+    return true;
+}
+
 status_t CameraService::validateConnect(int cameraId,
                                     /*inout*/
                                     int& clientUid) const {
@@ -455,6 +521,8 @@
           case CAMERA_DEVICE_API_VERSION_2_0:
           case CAMERA_DEVICE_API_VERSION_2_1:
           case CAMERA_DEVICE_API_VERSION_3_0:
+          case CAMERA_DEVICE_API_VERSION_3_1:
+          case CAMERA_DEVICE_API_VERSION_3_2:
             client = new Camera2Client(this, cameraClient,
                     clientPackageName, cameraId,
                     facing, callingPid, clientUid, getpid(),
@@ -541,6 +609,8 @@
           case CAMERA_DEVICE_API_VERSION_2_0:
           case CAMERA_DEVICE_API_VERSION_2_1:
           case CAMERA_DEVICE_API_VERSION_3_0:
+          case CAMERA_DEVICE_API_VERSION_3_1:
+          case CAMERA_DEVICE_API_VERSION_3_2:
             client = new ProCamera2Client(this, cameraCb, String16(),
                     cameraId, facing, callingPid, USE_CALLING_UID, getpid());
             break;
@@ -619,6 +689,8 @@
           case CAMERA_DEVICE_API_VERSION_2_0:
           case CAMERA_DEVICE_API_VERSION_2_1:
           case CAMERA_DEVICE_API_VERSION_3_0:
+          case CAMERA_DEVICE_API_VERSION_3_1:
+          case CAMERA_DEVICE_API_VERSION_3_2:
             client = new CameraDeviceClient(this, cameraCb, String16(),
                     cameraId, facing, callingPid, USE_CALLING_UID, getpid());
             break;
@@ -876,7 +948,7 @@
 
 MediaPlayer* CameraService::newMediaPlayer(const char *file) {
     MediaPlayer* mp = new MediaPlayer();
-    if (mp->setDataSource(file, NULL) == NO_ERROR) {
+    if (mp->setDataSource(NULL /* httpService */, file, NULL) == NO_ERROR) {
         mp->setAudioStreamType(AUDIO_STREAM_ENFORCED_AUDIBLE);
         mp->prepare();
     } else {
@@ -1044,7 +1116,8 @@
         // Reset the client PID to allow server-initiated disconnect,
         // and to prevent further calls by client.
         mClientPid = getCallingPid();
-        notifyError();
+        CaptureResultExtras resultExtras; // a dummy result (invalid)
+        notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE, resultExtras);
         disconnect();
     }
 }
@@ -1073,7 +1146,8 @@
     return client;
 }
 
-void CameraService::Client::notifyError() {
+void CameraService::Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+        const CaptureResultExtras& resultExtras) {
     mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
 }
 
@@ -1127,7 +1201,8 @@
 CameraService::ProClient::~ProClient() {
 }
 
-void CameraService::ProClient::notifyError() {
+void CameraService::ProClient::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+        const CaptureResultExtras& resultExtras) {
     mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
 }
 
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index ad6a582..76ea7be 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -1,19 +1,18 @@
 /*
-**
-** Copyright (C) 2008, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 #ifndef ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
 #define ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
@@ -31,6 +30,8 @@
 #include <camera/IProCameraCallbacks.h>
 #include <camera/camera2/ICameraDeviceUser.h>
 #include <camera/camera2/ICameraDeviceCallbacks.h>
+#include <camera/VendorTagDescriptor.h>
+#include <camera/CaptureResult.h>
 
 #include <camera/ICameraServiceListener.h>
 
@@ -73,6 +74,7 @@
                                       struct CameraInfo* cameraInfo);
     virtual status_t    getCameraCharacteristics(int cameraId,
                                                  CameraMetadata* cameraInfo);
+    virtual status_t    getCameraVendorTagDescriptor(/*out*/ sp<VendorTagDescriptor>& desc);
 
     virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
             const String16& clientPackageName, int clientUid,
@@ -181,7 +183,9 @@
         status_t                        finishCameraOps();
 
         // Notify client about a fatal error
-        virtual void                    notifyError() = 0;
+        virtual void                    notifyError(
+                ICameraDeviceCallbacks::CameraErrorCode errorCode,
+                const CaptureResultExtras& resultExtras) = 0;
     private:
         AppOpsManager                   mAppOpsManager;
 
@@ -258,7 +262,8 @@
         // convert client from cookie. Client lock should be acquired before getting Client.
         static Client*       getClientFromCookie(void* user);
 
-        virtual void         notifyError();
+        virtual void         notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+                                         const CaptureResultExtras& resultExtras);
 
         // Initialized in constructor
 
@@ -306,7 +311,8 @@
         virtual void          onExclusiveLockStolen() = 0;
 
     protected:
-        virtual void          notifyError();
+        virtual void          notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+                                          const CaptureResultExtras& resultExtras);
 
         sp<IProCameraCallbacks> mRemoteCallback;
     }; // class ProClient
@@ -387,6 +393,8 @@
     // Helpers
 
     bool                isValidCameraId(int cameraId);
+
+    bool                setUpVendorTags();
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index af23557..0447979 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -118,7 +118,9 @@
             mZslProcessorThread = zslProc;
             break;
         }
-        case CAMERA_DEVICE_API_VERSION_3_0:{
+        case CAMERA_DEVICE_API_VERSION_3_0:
+        case CAMERA_DEVICE_API_VERSION_3_1:
+        case CAMERA_DEVICE_API_VERSION_3_2: {
             sp<ZslProcessor3> zslProc =
                     new ZslProcessor3(this, mCaptureSequencer);
             mZslProcessor = zslProc;
@@ -238,7 +240,7 @@
 
     result.append("    Scene mode: ");
     switch (p.sceneMode) {
-        case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED:
+        case ANDROID_CONTROL_SCENE_MODE_DISABLED:
             result.append("AUTO\n"); break;
         CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_ACTION)
         CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PORTRAIT)
@@ -816,6 +818,8 @@
             return res;
         }
         outputStreams.push(getZslStreamId());
+    } else {
+        mZslProcessor->deleteStream();
     }
 
     outputStreams.push(getPreviewStreamId());
@@ -1162,7 +1166,7 @@
          * Handle quirk mode for AF in scene modes
          */
         if (l.mParameters.quirks.triggerAfWithAuto &&
-                l.mParameters.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED &&
+                l.mParameters.sceneMode != ANDROID_CONTROL_SCENE_MODE_DISABLED &&
                 l.mParameters.focusMode != Parameters::FOCUS_MODE_AUTO &&
                 !l.mParameters.focusingAreas[0].isEmpty()) {
             ALOGV("%s: Quirk: Switching from focusMode %d to AUTO",
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index d2ac79c..c266213 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -110,11 +110,13 @@
     if (!mCallbackToApp && mCallbackConsumer == 0) {
         // Create CPU buffer queue endpoint, since app hasn't given us one
         // Make it async to avoid disconnect deadlocks
-        sp<BufferQueue> bq = new BufferQueue();
-        mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount);
+        sp<IGraphicBufferProducer> producer;
+        sp<IGraphicBufferConsumer> consumer;
+        BufferQueue::createBufferQueue(&producer, &consumer);
+        mCallbackConsumer = new CpuConsumer(consumer, kCallbackHeapCount);
         mCallbackConsumer->setFrameAvailableListener(this);
         mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
-        mCallbackWindow = new Surface(bq);
+        mCallbackWindow = new Surface(producer);
     }
 
     if (mCallbackStreamId != NO_STREAM) {
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index f5c28ed..8268f65 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -106,13 +106,12 @@
     }
 }
 
-void CaptureSequencer::onFrameAvailable(int32_t requestId,
-        const CameraMetadata &frame) {
-    ALOGV("%s: Listener found new frame", __FUNCTION__);
+void CaptureSequencer::onResultAvailable(const CaptureResult &result) {
     ATRACE_CALL();
+    ALOGV("%s: New result available.", __FUNCTION__);
     Mutex::Autolock l(mInputMutex);
-    mNewFrameId = requestId;
-    mNewFrame = frame;
+    mNewFrameId = result.mResultExtras.requestId;
+    mNewFrame = result.mMetadata;
     if (!mNewFrameReceived) {
         mNewFrameReceived = true;
         mNewFrameSignal.signal();
@@ -585,12 +584,15 @@
         entry = mNewFrame.find(ANDROID_SENSOR_TIMESTAMP);
         if (entry.count == 0) {
             ALOGE("No timestamp field in capture frame!");
-        }
-        if (entry.data.i64[0] != mCaptureTimestamp) {
-            ALOGW("Mismatched capture timestamps: Metadata frame %" PRId64 ","
-                    " captured buffer %" PRId64,
-                    entry.data.i64[0],
-                    mCaptureTimestamp);
+        } else if (entry.count == 1) {
+            if (entry.data.i64[0] != mCaptureTimestamp) {
+                ALOGW("Mismatched capture timestamps: Metadata frame %" PRId64 ","
+                        " captured buffer %" PRId64,
+                        entry.data.i64[0],
+                        mCaptureTimestamp);
+            }
+        } else {
+            ALOGE("Timestamp metadata is malformed!");
         }
         client->removeFrameListener(mCaptureId, mCaptureId + 1, this);
 
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index 9fb4ee7..d42ab13 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -24,6 +24,7 @@
 #include <utils/Mutex.h>
 #include <utils/Condition.h>
 #include "camera/CameraMetadata.h"
+#include "camera/CaptureResult.h"
 #include "Parameters.h"
 #include "FrameProcessor.h"
 
@@ -61,8 +62,8 @@
     // Notifications about AE state changes
     void notifyAutoExposure(uint8_t newState, int triggerId);
 
-    // Notifications from the frame processor
-    virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame);
+    // Notification from the frame processor
+    virtual void onResultAvailable(const CaptureResult &result);
 
     // Notifications from the JPEG processor
     void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer);
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index dd5b27c..69bea24 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -55,7 +55,7 @@
 FrameProcessor::~FrameProcessor() {
 }
 
-bool FrameProcessor::processSingleFrame(CameraMetadata &frame,
+bool FrameProcessor::processSingleFrame(CaptureResult &frame,
                                         const sp<CameraDeviceBase> &device) {
 
     sp<Camera2Client> client = mClient.promote();
@@ -66,19 +66,19 @@
     bool partialResult = false;
     if (mUsePartialQuirk) {
         camera_metadata_entry_t entry;
-        entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+        entry = frame.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
         if (entry.count > 0 &&
                 entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
             partialResult = true;
         }
     }
 
-    if (!partialResult && processFaceDetect(frame, client) != OK) {
+    if (!partialResult && processFaceDetect(frame.mMetadata, client) != OK) {
         return false;
     }
 
     if (mSynthesize3ANotify) {
-        process3aState(frame, client);
+        process3aState(frame.mMetadata, client);
     }
 
     return FrameProcessorBase::processSingleFrame(frame, device);
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 856ad32..514bd1a 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -51,7 +51,7 @@
 
     void processNewFrames(const sp<Camera2Client> &client);
 
-    virtual bool processSingleFrame(CameraMetadata &frame,
+    virtual bool processSingleFrame(CaptureResult &frame,
                                     const sp<CameraDeviceBase> &device);
 
     status_t processFaceDetect(const CameraMetadata &frame,
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index 2de7a2b..964d278 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -83,11 +83,13 @@
 
     if (mCaptureConsumer == 0) {
         // Create CPU buffer queue endpoint
-        sp<BufferQueue> bq = new BufferQueue();
-        mCaptureConsumer = new CpuConsumer(bq, 1);
+        sp<IGraphicBufferProducer> producer;
+        sp<IGraphicBufferConsumer> consumer;
+        BufferQueue::createBufferQueue(&producer, &consumer);
+        mCaptureConsumer = new CpuConsumer(consumer, 1);
         mCaptureConsumer->setFrameAvailableListener(this);
         mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
-        mCaptureWindow = new Surface(bq);
+        mCaptureWindow = new Surface(producer);
         // Create memory for API consumption
         mCaptureHeap = new MemoryHeapBase(maxJpegSize.data.i32[0], 0,
                                        "Camera2Client::CaptureHeap");
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 07654c0..5bfb969 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -16,7 +16,7 @@
 
 #define LOG_TAG "Camera2-Parameters"
 #define ATRACE_TAG ATRACE_TAG_CAMERA
-// #define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
@@ -92,6 +92,26 @@
         staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2);
     if (!availableFpsRanges.count) return NO_INIT;
 
+    previewFpsRange[0] = availableFpsRanges.data.i32[0];
+    previewFpsRange[1] = availableFpsRanges.data.i32[1];
+
+    params.set(CameraParameters::KEY_PREVIEW_FPS_RANGE,
+            String8::format("%d,%d",
+                    previewFpsRange[0] * kFpsToApiScale,
+                    previewFpsRange[1] * kFpsToApiScale));
+
+    {
+        String8 supportedPreviewFpsRange;
+        for (size_t i=0; i < availableFpsRanges.count; i += 2) {
+            if (i != 0) supportedPreviewFpsRange += ",";
+            supportedPreviewFpsRange += String8::format("(%d,%d)",
+                    availableFpsRanges.data.i32[i] * kFpsToApiScale,
+                    availableFpsRanges.data.i32[i+1] * kFpsToApiScale);
+        }
+        params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE,
+                supportedPreviewFpsRange);
+    }
+
     previewFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP;
     params.set(CameraParameters::KEY_PREVIEW_FORMAT,
             formatEnumToString(previewFormat)); // NV21
@@ -159,9 +179,6 @@
                 supportedPreviewFormats);
     }
 
-    previewFpsRange[0] = availableFpsRanges.data.i32[0];
-    previewFpsRange[1] = availableFpsRanges.data.i32[1];
-
     // PREVIEW_FRAME_RATE / SUPPORTED_PREVIEW_FRAME_RATES are deprecated, but
     // still have to do something sane for them
 
@@ -170,27 +187,6 @@
     params.set(CameraParameters::KEY_PREVIEW_FRAME_RATE,
             previewFps);
 
-    // PREVIEW_FPS_RANGE
-    // -- Order matters. Set range after single value to so that a roundtrip
-    //    of setParameters(getParameters()) would keep the FPS range in higher
-    //    order.
-    params.set(CameraParameters::KEY_PREVIEW_FPS_RANGE,
-            String8::format("%d,%d",
-                    previewFpsRange[0] * kFpsToApiScale,
-                    previewFpsRange[1] * kFpsToApiScale));
-
-    {
-        String8 supportedPreviewFpsRange;
-        for (size_t i=0; i < availableFpsRanges.count; i += 2) {
-            if (i != 0) supportedPreviewFpsRange += ",";
-            supportedPreviewFpsRange += String8::format("(%d,%d)",
-                    availableFpsRanges.data.i32[i] * kFpsToApiScale,
-                    availableFpsRanges.data.i32[i+1] * kFpsToApiScale);
-        }
-        params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE,
-                supportedPreviewFpsRange);
-    }
-
     {
         SortedVector<int32_t> sortedPreviewFrameRates;
 
@@ -470,7 +466,7 @@
                 supportedAntibanding);
     }
 
-    sceneMode = ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+    sceneMode = ANDROID_CONTROL_SCENE_MODE_DISABLED;
     params.set(CameraParameters::KEY_SCENE_MODE,
             CameraParameters::SCENE_MODE_AUTO);
 
@@ -486,7 +482,7 @@
             if (addComma) supportedSceneModes += ",";
             addComma = true;
             switch (availableSceneModes.data.u8[i]) {
-                case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED:
+                case ANDROID_CONTROL_SCENE_MODE_DISABLED:
                     noSceneModes = true;
                     break;
                 case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY:
@@ -668,13 +664,13 @@
     focusState = ANDROID_CONTROL_AF_STATE_INACTIVE;
     shadowFocusMode = FOCUS_MODE_INVALID;
 
-    camera_metadata_ro_entry_t max3aRegions =
-        staticInfo(ANDROID_CONTROL_MAX_REGIONS, 1, 1);
-    if (!max3aRegions.count) return NO_INIT;
+    camera_metadata_ro_entry_t max3aRegions = staticInfo(ANDROID_CONTROL_MAX_REGIONS,
+            Parameters::NUM_REGION, Parameters::NUM_REGION);
+    if (max3aRegions.count != Parameters::NUM_REGION) return NO_INIT;
 
     int32_t maxNumFocusAreas = 0;
     if (focusMode != Parameters::FOCUS_MODE_FIXED) {
-        maxNumFocusAreas = max3aRegions.data.i32[0];
+        maxNumFocusAreas = max3aRegions.data.i32[Parameters::REGION_AF];
     }
     params.set(CameraParameters::KEY_MAX_NUM_FOCUS_AREAS, maxNumFocusAreas);
     params.set(CameraParameters::KEY_FOCUS_AREAS,
@@ -734,7 +730,7 @@
 
     meteringAreas.add(Parameters::Area(0, 0, 0, 0, 0));
     params.set(CameraParameters::KEY_MAX_NUM_METERING_AREAS,
-            max3aRegions.data.i32[0]);
+            max3aRegions.data.i32[Parameters::REGION_AE]);
     params.set(CameraParameters::KEY_METERING_AREAS,
             "(0,0,0,0,0)");
 
@@ -1088,7 +1084,7 @@
 status_t Parameters::set(const String8& paramString) {
     status_t res;
 
-    CameraParameters2 newParams(paramString);
+    CameraParameters newParams(paramString);
 
     // TODO: Currently ignoring any changes to supposedly read-only parameters
     // such as supported preview sizes, etc. Should probably produce an error if
@@ -1131,73 +1127,29 @@
     // RECORDING_HINT (always supported)
     validatedParams.recordingHint = boolFromString(
         newParams.get(CameraParameters::KEY_RECORDING_HINT) );
-    IF_ALOGV() { // Avoid unused variable warning
-        bool recordingHintChanged =
-                validatedParams.recordingHint != recordingHint;
-        if (recordingHintChanged) {
-            ALOGV("%s: Recording hint changed to %d",
-                  __FUNCTION__, validatedParams.recordingHint);
-        }
-    }
+    bool recordingHintChanged = validatedParams.recordingHint != recordingHint;
+    ALOGV_IF(recordingHintChanged, "%s: Recording hint changed to %d",
+            __FUNCTION__, recordingHintChanged);
 
     // PREVIEW_FPS_RANGE
+    bool fpsRangeChanged = false;
+    int32_t lastSetFpsRange[2];
 
-    /**
-     * Use the single FPS value if it was set later than the range.
-     * Otherwise, use the range value.
-     */
-    bool fpsUseSingleValue;
-    {
-        const char *fpsRange, *fpsSingle;
+    params.getPreviewFpsRange(&lastSetFpsRange[0], &lastSetFpsRange[1]);
+    lastSetFpsRange[0] /= kFpsToApiScale;
+    lastSetFpsRange[1] /= kFpsToApiScale;
 
-        fpsRange = newParams.get(CameraParameters::KEY_PREVIEW_FRAME_RATE);
-        fpsSingle = newParams.get(CameraParameters::KEY_PREVIEW_FPS_RANGE);
-
-        /**
-         * Pick either the range or the single key if only one was set.
-         *
-         * If both are set, pick the one that has greater set order.
-         */
-        if (fpsRange == NULL && fpsSingle == NULL) {
-            ALOGE("%s: FPS was not set. One of %s or %s must be set.",
-                  __FUNCTION__, CameraParameters::KEY_PREVIEW_FRAME_RATE,
-                  CameraParameters::KEY_PREVIEW_FPS_RANGE);
-            return BAD_VALUE;
-        } else if (fpsRange == NULL) {
-            fpsUseSingleValue = true;
-            ALOGV("%s: FPS range not set, using FPS single value",
-                  __FUNCTION__);
-        } else if (fpsSingle == NULL) {
-            fpsUseSingleValue = false;
-            ALOGV("%s: FPS single not set, using FPS range value",
-                  __FUNCTION__);
-        } else {
-            int fpsKeyOrder;
-            res = newParams.compareSetOrder(
-                    CameraParameters::KEY_PREVIEW_FRAME_RATE,
-                    CameraParameters::KEY_PREVIEW_FPS_RANGE,
-                    &fpsKeyOrder);
-            LOG_ALWAYS_FATAL_IF(res != OK, "Impossibly bad FPS keys");
-
-            fpsUseSingleValue = (fpsKeyOrder > 0);
-
-        }
-
-        ALOGV("%s: Preview FPS value is used from '%s'",
-              __FUNCTION__, fpsUseSingleValue ? "single" : "range");
-    }
     newParams.getPreviewFpsRange(&validatedParams.previewFpsRange[0],
             &validatedParams.previewFpsRange[1]);
-
     validatedParams.previewFpsRange[0] /= kFpsToApiScale;
     validatedParams.previewFpsRange[1] /= kFpsToApiScale;
 
-    // Ignore the FPS range if the FPS single has higher precedence
-    if (!fpsUseSingleValue) {
-        ALOGV("%s: Preview FPS range (%d, %d)", __FUNCTION__,
-                validatedParams.previewFpsRange[0],
-                validatedParams.previewFpsRange[1]);
+    // Compare the FPS range value from the last set() to the current set()
+    // to determine if the client has changed it
+    if (validatedParams.previewFpsRange[0] != lastSetFpsRange[0] ||
+            validatedParams.previewFpsRange[1] != lastSetFpsRange[1]) {
 
+        fpsRangeChanged = true;
         camera_metadata_ro_entry_t availablePreviewFpsRanges =
             staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2);
         for (i = 0; i < availablePreviewFpsRanges.count; i += 2) {
@@ -1248,13 +1200,14 @@
         }
     }
 
-    // PREVIEW_FRAME_RATE Deprecated
-    // - Use only if the single FPS value was set later than the FPS range
-    if (fpsUseSingleValue) {
+    // PREVIEW_FRAME_RATE Deprecated, only use if the preview fps range is
+    // unchanged this time.  The single-value FPS is the same as the minimum of
+    // the range.  To detect whether the application has changed the value of
+    // previewFps, compare against their last-set preview FPS.
+    if (!fpsRangeChanged) {
         int previewFps = newParams.getPreviewFrameRate();
-        ALOGV("%s: Preview FPS single value requested: %d",
-              __FUNCTION__, previewFps);
-        {
+        int lastSetPreviewFps = params.getPreviewFrameRate();
+        if (previewFps != lastSetPreviewFps || recordingHintChanged) {
             camera_metadata_ro_entry_t availableFrameRates =
                 staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES);
             /**
@@ -1323,35 +1276,6 @@
         }
     }
 
-    /**
-     * Update Preview FPS and Preview FPS ranges based on
-     * what we actually set.
-     *
-     * This updates the API-visible (Camera.Parameters#getParameters) values of
-     * the FPS fields, not only the internal versions.
-     *
-     * Order matters: The value that was set last takes precedence.
-     * - If the client does a setParameters(getParameters()) we retain
-     *   the same order for preview FPS.
-     */
-    if (!fpsUseSingleValue) {
-        // Set fps single, then fps range (range wins)
-        newParams.setPreviewFrameRate(
-                fpsFromRange(/*min*/validatedParams.previewFpsRange[0],
-                             /*max*/validatedParams.previewFpsRange[1]));
-        newParams.setPreviewFpsRange(
-                validatedParams.previewFpsRange[0] * kFpsToApiScale,
-                validatedParams.previewFpsRange[1] * kFpsToApiScale);
-    } else {
-        // Set fps range, then fps single (single wins)
-        newParams.setPreviewFpsRange(
-                validatedParams.previewFpsRange[0] * kFpsToApiScale,
-                validatedParams.previewFpsRange[1] * kFpsToApiScale);
-        // Set this to the same value, but with higher priority
-        newParams.setPreviewFrameRate(
-                newParams.getPreviewFrameRate());
-    }
-
     // PICTURE_SIZE
     newParams.getPictureSize(&validatedParams.pictureWidth,
             &validatedParams.pictureHeight);
@@ -1522,7 +1446,7 @@
         newParams.get(CameraParameters::KEY_SCENE_MODE) );
     if (validatedParams.sceneMode != sceneMode &&
             validatedParams.sceneMode !=
-            ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) {
+            ANDROID_CONTROL_SCENE_MODE_DISABLED) {
         camera_metadata_ro_entry_t availableSceneModes =
             staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
         for (i = 0; i < availableSceneModes.count; i++) {
@@ -1537,7 +1461,7 @@
         }
     }
     bool sceneModeSet =
-            validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+            validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_DISABLED;
 
     // FLASH_MODE
     if (sceneModeSet) {
@@ -1667,10 +1591,11 @@
     // FOCUS_AREAS
     res = parseAreas(newParams.get(CameraParameters::KEY_FOCUS_AREAS),
             &validatedParams.focusingAreas);
-    size_t max3aRegions =
-        (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS, 1, 1).data.i32[0];
+    size_t maxAfRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS,
+              Parameters::NUM_REGION, Parameters::NUM_REGION).
+              data.i32[Parameters::REGION_AF];
     if (res == OK) res = validateAreas(validatedParams.focusingAreas,
-            max3aRegions, AREA_KIND_FOCUS);
+            maxAfRegions, AREA_KIND_FOCUS);
     if (res != OK) {
         ALOGE("%s: Requested focus areas are malformed: %s",
                 __FUNCTION__, newParams.get(CameraParameters::KEY_FOCUS_AREAS));
@@ -1700,10 +1625,13 @@
         newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK));
 
     // METERING_AREAS
+    size_t maxAeRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS,
+            Parameters::NUM_REGION, Parameters::NUM_REGION).
+            data.i32[Parameters::REGION_AE];
     res = parseAreas(newParams.get(CameraParameters::KEY_METERING_AREAS),
             &validatedParams.meteringAreas);
     if (res == OK) {
-        res = validateAreas(validatedParams.meteringAreas, max3aRegions,
+        res = validateAreas(validatedParams.meteringAreas, maxAeRegions,
                             AREA_KIND_METERING);
     }
     if (res != OK) {
@@ -1852,7 +1780,7 @@
     // (face detection statistics and face priority scene mode). Map from other
     // to the other.
     bool sceneModeActive =
-            sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+            sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_DISABLED;
     uint8_t reqControlMode = ANDROID_CONTROL_MODE_AUTO;
     if (enableFaceDetect || sceneModeActive) {
         reqControlMode = ANDROID_CONTROL_MODE_USE_SCENE_MODE;
@@ -1864,7 +1792,7 @@
     uint8_t reqSceneMode =
             sceneModeActive ? sceneMode :
             enableFaceDetect ? (uint8_t)ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY :
-            (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+            (uint8_t)ANDROID_CONTROL_SCENE_MODE_DISABLED;
     res = request->update(ANDROID_CONTROL_SCENE_MODE,
             &reqSceneMode, 1);
     if (res != OK) return res;
@@ -1985,6 +1913,23 @@
             reqMeteringAreas, reqMeteringAreasSize);
     if (res != OK) return res;
 
+    // Set awb regions to be the same as the metering regions if allowed
+    size_t maxAwbRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS,
+            Parameters::NUM_REGION, Parameters::NUM_REGION).
+            data.i32[Parameters::REGION_AWB];
+    if (maxAwbRegions > 0) {
+        if (maxAwbRegions >= meteringAreas.size()) {
+            res = request->update(ANDROID_CONTROL_AWB_REGIONS,
+                    reqMeteringAreas, reqMeteringAreasSize);
+        } else {
+            // Ensure the awb regions are zeroed if the region count is too high.
+            int32_t zeroedAwbAreas[5] = {0, 0, 0, 0, 0};
+            res = request->update(ANDROID_CONTROL_AWB_REGIONS,
+                    zeroedAwbAreas, sizeof(zeroedAwbAreas)/sizeof(int32_t));
+        }
+        if (res != OK) return res;
+    }
+
     delete[] reqMeteringAreas;
 
     /* don't include jpeg thumbnail size - it's valid for
@@ -2225,9 +2170,9 @@
 int Parameters::sceneModeStringToEnum(const char *sceneMode) {
     return
         !sceneMode ?
-            ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED :
+            ANDROID_CONTROL_SCENE_MODE_DISABLED :
         !strcmp(sceneMode, CameraParameters::SCENE_MODE_AUTO) ?
-            ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED :
+            ANDROID_CONTROL_SCENE_MODE_DISABLED :
         !strcmp(sceneMode, CameraParameters::SCENE_MODE_ACTION) ?
             ANDROID_CONTROL_SCENE_MODE_ACTION :
         !strcmp(sceneMode, CameraParameters::SCENE_MODE_PORTRAIT) ?
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index da07ccf..60c4687 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -25,7 +25,6 @@
 #include <utils/Vector.h>
 #include <utils/KeyedVector.h>
 #include <camera/CameraParameters.h>
-#include <camera/CameraParameters2.h>
 #include <camera/CameraMetadata.h>
 
 namespace android {
@@ -33,7 +32,7 @@
 
 /**
  * Current camera state; this is the full state of the Camera under the old
- * camera API (contents of the CameraParameters2 object in a more-efficient
+ * camera API (contents of the CameraParameters object in a more-efficient
  * format, plus other state). The enum values are mostly based off the
  * corresponding camera2 enums, not the camera1 strings. A few are defined here
  * if they don't cleanly map to camera2 values.
@@ -114,6 +113,14 @@
     bool autoExposureLock;
     bool autoWhiteBalanceLock;
 
+    // 3A region types, for use with ANDROID_CONTROL_MAX_REGIONS
+    enum region_t {
+        REGION_AE = 0,
+        REGION_AWB,
+        REGION_AF,
+        NUM_REGION // Number of region types
+    } region;
+
     Vector<Area> meteringAreas;
 
     int zoom;
@@ -129,7 +136,7 @@
         LIGHTFX_HDR
     } lightFx;
 
-    CameraParameters2 params;
+    CameraParameters params;
     String8 paramsFlattened;
 
     // These parameters are also part of the camera API-visible state, but not
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 77ae7ec..2064e2c 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -319,13 +319,15 @@
         // Create CPU buffer queue endpoint. We need one more buffer here so that we can
         // always acquire and free a buffer when the heap is full; otherwise the consumer
         // will have buffers in flight we'll never clear out.
-        sp<BufferQueue> bq = new BufferQueue();
-        mRecordingConsumer = new BufferItemConsumer(bq,
+        sp<IGraphicBufferProducer> producer;
+        sp<IGraphicBufferConsumer> consumer;
+        BufferQueue::createBufferQueue(&producer, &consumer);
+        mRecordingConsumer = new BufferItemConsumer(consumer,
                 GRALLOC_USAGE_HW_VIDEO_ENCODER,
                 mRecordingHeapCount + 1);
         mRecordingConsumer->setFrameAvailableListener(this);
         mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
-        mRecordingWindow = new Surface(bq);
+        mRecordingWindow = new Surface(producer);
         newConsumer = true;
         // Allocate memory later, since we don't know buffer size until receipt
     }
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 130f81a..2a2a5af 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -73,18 +73,19 @@
     }
 }
 
-void ZslProcessor::onFrameAvailable(int32_t /*requestId*/,
-        const CameraMetadata &frame) {
+void ZslProcessor::onResultAvailable(const CaptureResult &result) {
+    ATRACE_CALL();
+    ALOGV("%s:", __FUNCTION__);
     Mutex::Autolock l(mInputMutex);
     camera_metadata_ro_entry_t entry;
-    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+    entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
     nsecs_t timestamp = entry.data.i64[0];
     (void)timestamp;
     ALOGVV("Got preview frame for timestamp %" PRId64, timestamp);
 
     if (mState != RUNNING) return;
 
-    mFrameList.editItemAt(mFrameListHead) = frame;
+    mFrameList.editItemAt(mFrameListHead) = result.mMetadata;
     mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
 
     findMatchesLocked();
@@ -130,13 +131,15 @@
 
     if (mZslConsumer == 0) {
         // Create CPU buffer queue endpoint
-        sp<BufferQueue> bq = new BufferQueue();
-        mZslConsumer = new BufferItemConsumer(bq,
+        sp<IGraphicBufferProducer> producer;
+        sp<IGraphicBufferConsumer> consumer;
+        BufferQueue::createBufferQueue(&producer, &consumer);
+        mZslConsumer = new BufferItemConsumer(consumer,
             GRALLOC_USAGE_HW_CAMERA_ZSL,
             kZslBufferDepth);
         mZslConsumer->setFrameAvailableListener(this);
         mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
-        mZslWindow = new Surface(bq);
+        mZslWindow = new Surface(producer);
     }
 
     if (mZslStreamId != NO_STREAM) {
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
index 6d3cb85..f4cf0c8 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -24,6 +24,7 @@
 #include <utils/Condition.h>
 #include <gui/BufferItemConsumer.h>
 #include <camera/CameraMetadata.h>
+#include <camera/CaptureResult.h>
 
 #include "common/CameraDeviceBase.h"
 #include "api1/client2/ZslProcessorInterface.h"
@@ -54,7 +55,7 @@
     // From mZslConsumer
     virtual void onFrameAvailable();
     // From FrameProcessor
-    virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame);
+    virtual void onResultAvailable(const CaptureResult &result);
 
     virtual void onBufferReleased(buffer_handle_t *handle);
 
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 2fce2b6..1dcb718 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -63,18 +63,19 @@
     deleteStream();
 }
 
-void ZslProcessor3::onFrameAvailable(int32_t /*requestId*/,
-                                     const CameraMetadata &frame) {
+void ZslProcessor3::onResultAvailable(const CaptureResult &result) {
+    ATRACE_CALL();
+    ALOGV("%s:", __FUNCTION__);
     Mutex::Autolock l(mInputMutex);
     camera_metadata_ro_entry_t entry;
-    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+    entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
     nsecs_t timestamp = entry.data.i64[0];
     (void)timestamp;
     ALOGVV("Got preview metadata for timestamp %" PRId64, timestamp);
 
     if (mState != RUNNING) return;
 
-    mFrameList.editItemAt(mFrameListHead) = frame;
+    mFrameList.editItemAt(mFrameListHead) = result.mMetadata;
     mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
 }
 
@@ -275,6 +276,15 @@
             return INVALID_OPERATION;
         }
 
+        // Flush device to clear out all in-flight requests pending in HAL.
+        res = client->getCameraDevice()->flush();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Failed to flush device: "
+                "%s (%d)",
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+
         // Update JPEG settings
         {
             SharedParameters::Lock l(client->getParameters());
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
index d2f8322..4c52a64 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
@@ -50,8 +50,8 @@
     ZslProcessor3(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
     ~ZslProcessor3();
 
-    // From FrameProcessor
-    virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame);
+    // From FrameProcessor::FilteredListener
+    virtual void onResultAvailable(const CaptureResult &result);
 
     /**
      ****************************************
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 142da9e..5a48a62 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -16,7 +16,7 @@
 
 #define LOG_TAG "CameraDeviceClient"
 #define ATRACE_TAG ATRACE_TAG_CAMERA
-// #define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
 
 #include <cutils/properties.h>
 #include <utils/Log.h>
@@ -91,79 +91,101 @@
 }
 
 status_t CameraDeviceClient::submitRequest(sp<CaptureRequest> request,
-                                         bool streaming) {
+                                         bool streaming,
+                                         /*out*/
+                                         int64_t* lastFrameNumber) {
+    List<sp<CaptureRequest> > requestList;
+    requestList.push_back(request);
+    return submitRequestList(requestList, streaming, lastFrameNumber);
+}
+
+status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > requests,
+                                               bool streaming, int64_t* lastFrameNumber) {
     ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
+    ALOGV("%s-start of function. Request list size %d", __FUNCTION__, requests.size());
 
     status_t res;
-
     if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
     if (!mDevice.get()) return DEAD_OBJECT;
 
-    if (request == 0) {
+    if (requests.empty()) {
         ALOGE("%s: Camera %d: Sent null request. Rejecting request.",
               __FUNCTION__, mCameraId);
         return BAD_VALUE;
     }
 
-    CameraMetadata metadata(request->mMetadata);
+    List<const CameraMetadata> metadataRequestList;
+    int32_t requestId = mRequestIdCounter;
+    uint32_t loopCounter = 0;
 
-    if (metadata.isEmpty()) {
-        ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.",
-               __FUNCTION__, mCameraId);
-        return BAD_VALUE;
-    } else if (request->mSurfaceList.size() == 0) {
-        ALOGE("%s: Camera %d: Requests must have at least one surface target. "
-              "Rejecting request.", __FUNCTION__, mCameraId);
-        return BAD_VALUE;
-    }
-
-    if (!enforceRequestPermissions(metadata)) {
-        // Callee logs
-        return PERMISSION_DENIED;
-    }
-
-    /**
-     * Write in the output stream IDs which we calculate from
-     * the capture request's list of surface targets
-     */
-    Vector<int32_t> outputStreamIds;
-    outputStreamIds.setCapacity(request->mSurfaceList.size());
-    for (size_t i = 0; i < request->mSurfaceList.size(); ++i) {
-        sp<Surface> surface = request->mSurfaceList[i];
-
-        if (surface == 0) continue;
-
-        sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
-        int idx = mStreamMap.indexOfKey(gbp->asBinder());
-
-        // Trying to submit request with surface that wasn't created
-        if (idx == NAME_NOT_FOUND) {
-            ALOGE("%s: Camera %d: Tried to submit a request with a surface that"
-                  " we have not called createStream on",
-                  __FUNCTION__, mCameraId);
+    for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end(); ++it) {
+        sp<CaptureRequest> request = *it;
+        if (request == 0) {
+            ALOGE("%s: Camera %d: Sent null request.",
+                    __FUNCTION__, mCameraId);
             return BAD_VALUE;
         }
 
-        int streamId = mStreamMap.valueAt(idx);
-        outputStreamIds.push_back(streamId);
-        ALOGV("%s: Camera %d: Appending output stream %d to request",
-              __FUNCTION__, mCameraId, streamId);
+        CameraMetadata metadata(request->mMetadata);
+        if (metadata.isEmpty()) {
+            ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.",
+                   __FUNCTION__, mCameraId);
+            return BAD_VALUE;
+        } else if (request->mSurfaceList.isEmpty()) {
+            ALOGE("%s: Camera %d: Requests must have at least one surface target. "
+                  "Rejecting request.", __FUNCTION__, mCameraId);
+            return BAD_VALUE;
+        }
+
+        if (!enforceRequestPermissions(metadata)) {
+            // Callee logs
+            return PERMISSION_DENIED;
+        }
+
+        /**
+         * Write in the output stream IDs which we calculate from
+         * the capture request's list of surface targets
+         */
+        Vector<int32_t> outputStreamIds;
+        outputStreamIds.setCapacity(request->mSurfaceList.size());
+        for (size_t i = 0; i < request->mSurfaceList.size(); ++i) {
+            sp<Surface> surface = request->mSurfaceList[i];
+            if (surface == 0) continue;
+
+            sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
+            int idx = mStreamMap.indexOfKey(gbp->asBinder());
+
+            // Trying to submit request with surface that wasn't created
+            if (idx == NAME_NOT_FOUND) {
+                ALOGE("%s: Camera %d: Tried to submit a request with a surface that"
+                      " we have not called createStream on",
+                      __FUNCTION__, mCameraId);
+                return BAD_VALUE;
+            }
+
+            int streamId = mStreamMap.valueAt(idx);
+            outputStreamIds.push_back(streamId);
+            ALOGV("%s: Camera %d: Appending output stream %d to request",
+                  __FUNCTION__, mCameraId, streamId);
+        }
+
+        metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
+                        outputStreamIds.size());
+
+        metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
+        loopCounter++; // loopCounter starts from 1
+        ALOGV("%s: Camera %d: Creating request with ID %d (%d of %d)",
+              __FUNCTION__, mCameraId, requestId, loopCounter, requests.size());
+
+        metadataRequestList.push_back(metadata);
     }
-
-    metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
-                    outputStreamIds.size());
-
-    int32_t requestId = mRequestIdCounter++;
-    metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
-    ALOGV("%s: Camera %d: Submitting request with ID %d",
-          __FUNCTION__, mCameraId, requestId);
+    mRequestIdCounter++;
 
     if (streaming) {
-        res = mDevice->setStreamingRequest(metadata);
+        res = mDevice->setStreamingRequestList(metadataRequestList, lastFrameNumber);
         if (res != OK) {
             ALOGE("%s: Camera %d:  Got error %d after trying to set streaming "
                   "request", __FUNCTION__, mCameraId, res);
@@ -171,11 +193,12 @@
             mStreamingRequestList.push_back(requestId);
         }
     } else {
-        res = mDevice->capture(metadata);
+        res = mDevice->captureList(metadataRequestList, lastFrameNumber);
         if (res != OK) {
             ALOGE("%s: Camera %d: Got error %d after trying to set capture",
-                  __FUNCTION__, mCameraId, res);
+                __FUNCTION__, mCameraId, res);
         }
+        ALOGV("%s: requestId = %d ", __FUNCTION__, requestId);
     }
 
     ALOGV("%s: Camera %d: End of function", __FUNCTION__, mCameraId);
@@ -186,7 +209,7 @@
     return res;
 }
 
-status_t CameraDeviceClient::cancelRequest(int requestId) {
+status_t CameraDeviceClient::cancelRequest(int requestId, int64_t* lastFrameNumber) {
     ATRACE_CALL();
     ALOGV("%s, requestId = %d", __FUNCTION__, requestId);
 
@@ -212,7 +235,7 @@
         return BAD_VALUE;
     }
 
-    res = mDevice->clearStreamingRequest();
+    res = mDevice->clearStreamingRequest(lastFrameNumber);
 
     if (res == OK) {
         ALOGV("%s: Camera %d: Successfully cleared streaming request",
@@ -259,8 +282,6 @@
     } else if (res == OK) {
         mStreamMap.removeItemsAt(index);
 
-        ALOGV("%s: Camera %d: Successfully deleted stream ID (%d)",
-              __FUNCTION__, mCameraId, streamId);
     }
 
     return res;
@@ -465,7 +486,7 @@
     return res;
 }
 
-status_t CameraDeviceClient::flush() {
+status_t CameraDeviceClient::flush(int64_t* lastFrameNumber) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
@@ -476,7 +497,8 @@
 
     if (!mDevice.get()) return DEAD_OBJECT;
 
-    return mDevice->flush();
+    mStreamingRequestList.clear();
+    return mDevice->flush(lastFrameNumber);
 }
 
 status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) {
@@ -493,13 +515,13 @@
     return dumpDevice(fd, args);
 }
 
-
-void CameraDeviceClient::notifyError() {
+void CameraDeviceClient::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+                                     const CaptureResultExtras& resultExtras) {
     // Thread safe. Don't bother locking.
     sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
 
     if (remoteCb != 0) {
-        remoteCb->onDeviceError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE);
+        remoteCb->onDeviceError(errorCode, resultExtras);
     }
 }
 
@@ -512,12 +534,12 @@
     }
 }
 
-void CameraDeviceClient::notifyShutter(int requestId,
+void CameraDeviceClient::notifyShutter(const CaptureResultExtras& resultExtras,
         nsecs_t timestamp) {
     // Thread safe. Don't bother locking.
     sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
     if (remoteCb != 0) {
-        remoteCb->onCaptureStarted(requestId, timestamp);
+        remoteCb->onCaptureStarted(resultExtras, timestamp);
     }
 }
 
@@ -552,16 +574,14 @@
 }
 
 /** Device-related methods */
-void CameraDeviceClient::onFrameAvailable(int32_t requestId,
-        const CameraMetadata& frame) {
+void CameraDeviceClient::onResultAvailable(const CaptureResult& result) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
     // Thread-safe. No lock necessary.
     sp<ICameraDeviceCallbacks> remoteCb = mRemoteCallback;
     if (remoteCb != NULL) {
-        ALOGV("%s: frame = %p ", __FUNCTION__, &frame);
-        remoteCb->onResultReceived(requestId, frame);
+        remoteCb->onResultReceived(result.mMetadata, result.mResultExtras);
     }
 }
 
@@ -635,26 +655,56 @@
         return INVALID_OPERATION;
     }
 
+    camera_metadata_ro_entry_t entryFacing = staticInfo.find(ANDROID_LENS_FACING);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: Can't find android.lens.facing in "
+                "static metadata!", __FUNCTION__, mCameraId);
+        return INVALID_OPERATION;
+    }
+
     int32_t& flags = *transform;
 
+    bool mirror = (entryFacing.data.u8[0] == ANDROID_LENS_FACING_FRONT);
     int orientation = entry.data.i32[0];
-    switch (orientation) {
-        case 0:
-            flags = 0;
-            break;
-        case 90:
-            flags = NATIVE_WINDOW_TRANSFORM_ROT_90;
-            break;
-        case 180:
-            flags = NATIVE_WINDOW_TRANSFORM_ROT_180;
-            break;
-        case 270:
-            flags = NATIVE_WINDOW_TRANSFORM_ROT_270;
-            break;
-        default:
-            ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
-                  __FUNCTION__, orientation);
-            return INVALID_OPERATION;
+    if (!mirror) {
+        switch (orientation) {
+            case 0:
+                flags = 0;
+                break;
+            case 90:
+                flags = NATIVE_WINDOW_TRANSFORM_ROT_90;
+                break;
+            case 180:
+                flags = NATIVE_WINDOW_TRANSFORM_ROT_180;
+                break;
+            case 270:
+                flags = NATIVE_WINDOW_TRANSFORM_ROT_270;
+                break;
+            default:
+                ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
+                      __FUNCTION__, orientation);
+                return INVALID_OPERATION;
+        }
+    } else {
+        switch (orientation) {
+            case 0:
+                flags = HAL_TRANSFORM_FLIP_H;
+                break;
+            case 90:
+                flags = HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90;
+                break;
+            case 180:
+                flags = HAL_TRANSFORM_FLIP_V;
+                break;
+            case 270:
+                flags = HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90;
+                break;
+            default:
+                ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
+                      __FUNCTION__, orientation);
+                return INVALID_OPERATION;
+        }
+
     }
 
     /**
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index b9c16aa..0b37784 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -63,9 +63,18 @@
      */
 
     // Note that the callee gets a copy of the metadata.
-    virtual int           submitRequest(sp<CaptureRequest> request,
-                                        bool streaming = false);
-    virtual status_t      cancelRequest(int requestId);
+    virtual status_t           submitRequest(sp<CaptureRequest> request,
+                                             bool streaming = false,
+                                             /*out*/
+                                             int64_t* lastFrameNumber = NULL);
+    // List of requests are copied.
+    virtual status_t           submitRequestList(List<sp<CaptureRequest> > requests,
+                                                 bool streaming = false,
+                                                 /*out*/
+                                                 int64_t* lastFrameNumber = NULL);
+    virtual status_t      cancelRequest(int requestId,
+                                        /*out*/
+                                        int64_t* lastFrameNumber = NULL);
 
     // Returns -EBUSY if device is not idle
     virtual status_t      deleteStream(int streamId);
@@ -89,7 +98,8 @@
     virtual status_t      waitUntilIdle();
 
     // Flush all active and pending requests as fast as possible
-    virtual status_t      flush();
+    virtual status_t      flush(/*out*/
+                                int64_t* lastFrameNumber = NULL);
 
     /**
      * Interface used by CameraService
@@ -114,16 +124,16 @@
      */
 
     virtual void notifyIdle();
-    virtual void notifyError();
-    virtual void notifyShutter(int requestId, nsecs_t timestamp);
+    virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+                             const CaptureResultExtras& resultExtras);
+    virtual void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp);
 
     /**
      * Interface used by independent components of CameraDeviceClient.
      */
 protected:
     /** FilteredListener implementation **/
-    virtual void          onFrameAvailable(int32_t requestId,
-                                           const CameraMetadata& frame);
+    virtual void          onResultAvailable(const CaptureResult& result);
     virtual void          detachDevice();
 
     // Calculate the ANativeWindow transform from android.sensor.orientation
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
index 1a7a7a7..0f6d278 100644
--- a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
+++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
@@ -373,9 +373,7 @@
     Camera2ClientBase::detachDevice();
 }
 
-/** Device-related methods */
-void ProCamera2Client::onFrameAvailable(int32_t requestId,
-                                        const CameraMetadata& frame) {
+void ProCamera2Client::onResultAvailable(const CaptureResult& result) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
@@ -383,13 +381,12 @@
     SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
 
     if (mRemoteCallback != NULL) {
-        CameraMetadata tmp(frame);
+        CameraMetadata tmp(result.mMetadata);
         camera_metadata_t* meta = tmp.release();
         ALOGV("%s: meta = %p ", __FUNCTION__, meta);
-        mRemoteCallback->onResultReceived(requestId, meta);
+        mRemoteCallback->onResultReceived(result.mResultExtras.requestId, meta);
         tmp.acquire(meta);
     }
-
 }
 
 bool ProCamera2Client::enforceRequestPermissions(CameraMetadata& metadata) {
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.h b/services/camera/libcameraservice/api_pro/ProCamera2Client.h
index 8a0f547..9d83122 100644
--- a/services/camera/libcameraservice/api_pro/ProCamera2Client.h
+++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.h
@@ -21,6 +21,7 @@
 #include "common/FrameProcessorBase.h"
 #include "common/Camera2ClientBase.h"
 #include "device2/Camera2Device.h"
+#include "camera/CaptureResult.h"
 
 namespace android {
 
@@ -97,8 +98,8 @@
 
 protected:
     /** FilteredListener implementation **/
-    virtual void          onFrameAvailable(int32_t requestId,
-                                           const CameraMetadata& frame);
+    virtual void onResultAvailable(const CaptureResult& result);
+
     virtual void          detachDevice();
 
 private:
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 6a88c87..19efd30 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -221,10 +221,11 @@
 /** Device-related methods */
 
 template <typename TClientBase>
-void Camera2ClientBase<TClientBase>::notifyError(int errorCode, int arg1,
-                                                 int arg2) {
-    ALOGE("Error condition %d reported by HAL, arguments %d, %d", errorCode,
-          arg1, arg2);
+void Camera2ClientBase<TClientBase>::notifyError(
+        ICameraDeviceCallbacks::CameraErrorCode errorCode,
+        const CaptureResultExtras& resultExtras) {
+    ALOGE("Error condition %d reported by HAL, requestId %" PRId32, errorCode,
+          resultExtras.requestId);
 }
 
 template <typename TClientBase>
@@ -233,13 +234,13 @@
 }
 
 template <typename TClientBase>
-void Camera2ClientBase<TClientBase>::notifyShutter(int requestId,
+void Camera2ClientBase<TClientBase>::notifyShutter(const CaptureResultExtras& resultExtras,
                                                    nsecs_t timestamp) {
-    (void)requestId;
+    (void)resultExtras;
     (void)timestamp;
 
-    ALOGV("%s: Shutter notification for request id %d at time %" PRId64,
-            __FUNCTION__, requestId, timestamp);
+    ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64,
+            __FUNCTION__, resultExtras.requestId, timestamp);
 }
 
 template <typename TClientBase>
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 61e44f0..9feca93 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -18,6 +18,7 @@
 #define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H
 
 #include "common/CameraDeviceBase.h"
+#include "camera/CaptureResult.h"
 
 namespace android {
 
@@ -61,9 +62,11 @@
      * CameraDeviceBase::NotificationListener implementation
      */
 
-    virtual void          notifyError(int errorCode, int arg1, int arg2);
+    virtual void          notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+                                      const CaptureResultExtras& resultExtras);
     virtual void          notifyIdle();
-    virtual void          notifyShutter(int requestId, nsecs_t timestamp);
+    virtual void          notifyShutter(const CaptureResultExtras& resultExtras,
+                                        nsecs_t timestamp);
     virtual void          notifyAutoFocus(uint8_t newState, int triggerId);
     virtual void          notifyAutoExposure(uint8_t newState, int triggerId);
     virtual void          notifyAutoWhitebalance(uint8_t newState,
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index e80abf1..7597b10 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -22,9 +22,12 @@
 #include <utils/String16.h>
 #include <utils/Vector.h>
 #include <utils/Timers.h>
+#include <utils/List.h>
 
+#include <camera/camera2/ICameraDeviceCallbacks.h>
 #include "hardware/camera2.h"
 #include "camera/CameraMetadata.h"
+#include "camera/CaptureResult.h"
 
 namespace android {
 
@@ -44,7 +47,7 @@
     virtual status_t initialize(camera_module_t *module) = 0;
     virtual status_t disconnect() = 0;
 
-    virtual status_t dump(int fd, const Vector<String16>& args) = 0;
+    virtual status_t dump(int fd, const Vector<String16> &args) = 0;
 
     /**
      * The device's static characteristics metadata buffer
@@ -54,19 +57,37 @@
     /**
      * Submit request for capture. The CameraDevice takes ownership of the
      * passed-in buffer.
+     * Output lastFrameNumber is the expected frame number of this request.
      */
-    virtual status_t capture(CameraMetadata &request) = 0;
+    virtual status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL) = 0;
+
+    /**
+     * Submit a list of requests.
+     * Output lastFrameNumber is the expected last frame number of the list of requests.
+     */
+    virtual status_t captureList(const List<const CameraMetadata> &requests,
+                                 int64_t *lastFrameNumber = NULL) = 0;
 
     /**
      * Submit request for streaming. The CameraDevice makes a copy of the
      * passed-in buffer and the caller retains ownership.
+     * Output lastFrameNumber is the last frame number of the previous streaming request.
      */
-    virtual status_t setStreamingRequest(const CameraMetadata &request) = 0;
+    virtual status_t setStreamingRequest(const CameraMetadata &request,
+                                         int64_t *lastFrameNumber = NULL) = 0;
+
+    /**
+     * Submit a list of requests for streaming.
+     * Output lastFrameNumber is the last frame number of the previous streaming request.
+     */
+    virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                             int64_t *lastFrameNumber = NULL) = 0;
 
     /**
      * Clear the streaming request slot.
+     * Output lastFrameNumber is the last frame number of the previous streaming request.
      */
-    virtual status_t clearStreamingRequest() = 0;
+    virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL) = 0;
 
     /**
      * Wait until a request with the given ID has been dequeued by the
@@ -142,11 +163,12 @@
         // API1 and API2.
 
         // Required for API 1 and 2
-        virtual void notifyError(int errorCode, int arg1, int arg2) = 0;
+        virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+                                 const CaptureResultExtras &resultExtras) = 0;
 
         // Required only for API2
         virtual void notifyIdle() = 0;
-        virtual void notifyShutter(int requestId,
+        virtual void notifyShutter(const CaptureResultExtras &resultExtras,
                 nsecs_t timestamp) = 0;
 
         // Required only for API1
@@ -179,11 +201,12 @@
     virtual status_t waitForNextFrame(nsecs_t timeout) = 0;
 
     /**
-     * Get next metadata frame from the frame queue. Returns NULL if the queue
-     * is empty; caller takes ownership of the metadata buffer.
-     * May be called concurrently to most methods, except for waitForNextFrame
+     * Get next capture result frame from the result queue. Returns NOT_ENOUGH_DATA
+     * if the queue is empty; caller takes ownership of the metadata buffer inside
+     * the capture result object's metadata field.
+     * May be called concurrently to most methods, except for waitForNextFrame.
      */
-    virtual status_t getNextFrame(CameraMetadata *frame) = 0;
+    virtual status_t getNextResult(CaptureResult *frame) = 0;
 
     /**
      * Trigger auto-focus. The latest ID used in a trigger autofocus or cancel
@@ -224,8 +247,9 @@
     /**
      * Flush all pending and in-flight requests. Blocks until flush is
      * complete.
+     * Output lastFrameNumber is the last frame number of the previous streaming request.
      */
-    virtual status_t flush() = 0;
+    virtual status_t flush(int64_t *lastFrameNumber = NULL) = 0;
 
 };
 
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index 4d31667..f6a971a 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -99,15 +99,17 @@
 void FrameProcessorBase::processNewFrames(const sp<CameraDeviceBase> &device) {
     status_t res;
     ATRACE_CALL();
-    CameraMetadata frame;
+    CaptureResult result;
 
     ALOGV("%s: Camera %d: Process new frames", __FUNCTION__, device->getId());
 
-    while ( (res = device->getNextFrame(&frame)) == OK) {
+    while ( (res = device->getNextResult(&result)) == OK) {
 
+        // TODO: instead of getting frame number from metadata, we should read
+        // this from result.mResultExtras when CameraDeviceBase interface is fixed.
         camera_metadata_entry_t entry;
 
-        entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
+        entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
         if (entry.count == 0) {
             ALOGE("%s: Camera %d: Error reading frame number",
                     __FUNCTION__, device->getId());
@@ -115,13 +117,13 @@
         }
         ATRACE_INT("cam2_frame", entry.data.i32[0]);
 
-        if (!processSingleFrame(frame, device)) {
+        if (!processSingleFrame(result, device)) {
             break;
         }
 
-        if (!frame.isEmpty()) {
+        if (!result.mMetadata.isEmpty()) {
             Mutex::Autolock al(mLastFrameMutex);
-            mLastFrame.acquire(frame);
+            mLastFrame.acquire(result.mMetadata);
         }
     }
     if (res != NOT_ENOUGH_DATA) {
@@ -133,21 +135,22 @@
     return;
 }
 
-bool FrameProcessorBase::processSingleFrame(CameraMetadata &frame,
-                                           const sp<CameraDeviceBase> &device) {
+bool FrameProcessorBase::processSingleFrame(CaptureResult &result,
+                                            const sp<CameraDeviceBase> &device) {
     ALOGV("%s: Camera %d: Process single frame (is empty? %d)",
-          __FUNCTION__, device->getId(), frame.isEmpty());
-    return processListeners(frame, device) == OK;
+          __FUNCTION__, device->getId(), result.mMetadata.isEmpty());
+    return processListeners(result, device) == OK;
 }
 
-status_t FrameProcessorBase::processListeners(const CameraMetadata &frame,
+status_t FrameProcessorBase::processListeners(const CaptureResult &result,
         const sp<CameraDeviceBase> &device) {
     ATRACE_CALL();
+
     camera_metadata_ro_entry_t entry;
 
     // Quirks: Don't deliver partial results to listeners that don't want them
     bool quirkIsPartial = false;
-    entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+    entry = result.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
     if (entry.count != 0 &&
             entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
         ALOGV("%s: Camera %d: Not forwarding partial result to listeners",
@@ -155,10 +158,13 @@
         quirkIsPartial = true;
     }
 
-    entry = frame.find(ANDROID_REQUEST_ID);
+    // TODO: instead of getting requestID from CameraMetadata, we should get it
+    // from CaptureResultExtras. This will require changing Camera2Device.
+    // Currently Camera2Device uses MetadataQueue to store results, which does not
+    // include CaptureResultExtras.
+    entry = result.mMetadata.find(ANDROID_REQUEST_ID);
     if (entry.count == 0) {
-        ALOGE("%s: Camera %d: Error reading frame id",
-                __FUNCTION__, device->getId());
+        ALOGE("%s: Camera %d: Error reading frame id", __FUNCTION__, device->getId());
         return BAD_VALUE;
     }
     int32_t requestId = entry.data.i32[0];
@@ -169,9 +175,8 @@
 
         List<RangeListener>::iterator item = mRangeListeners.begin();
         while (item != mRangeListeners.end()) {
-            if (requestId >= item->minId &&
-                    requestId < item->maxId &&
-                    (!quirkIsPartial || item->quirkSendPartials) ) {
+            if (requestId >= item->minId && requestId < item->maxId &&
+                    (!quirkIsPartial || item->quirkSendPartials)) {
                 sp<FilteredListener> listener = item->listener.promote();
                 if (listener == 0) {
                     item = mRangeListeners.erase(item);
@@ -183,10 +188,12 @@
             item++;
         }
     }
-    ALOGV("Got %zu range listeners out of %zu", listeners.size(), mRangeListeners.size());
+    ALOGV("%s: Camera %d: Got %zu range listeners out of %zu", __FUNCTION__,
+          device->getId(), listeners.size(), mRangeListeners.size());
+
     List<sp<FilteredListener> >::iterator item = listeners.begin();
     for (; item != listeners.end(); item++) {
-        (*item)->onFrameAvailable(requestId, frame);
+        (*item)->onResultAvailable(result);
     }
     return OK;
 }
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.h b/services/camera/libcameraservice/common/FrameProcessorBase.h
index 89b608a..15a014e 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.h
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.h
@@ -23,6 +23,7 @@
 #include <utils/KeyedVector.h>
 #include <utils/List.h>
 #include <camera/CameraMetadata.h>
+#include <camera/CaptureResult.h>
 
 namespace android {
 
@@ -39,8 +40,7 @@
     virtual ~FrameProcessorBase();
 
     struct FilteredListener: virtual public RefBase {
-        virtual void onFrameAvailable(int32_t requestId,
-                                      const CameraMetadata &frame) = 0;
+        virtual void onResultAvailable(const CaptureResult &result) = 0;
     };
 
     // Register a listener for a range of IDs [minId, maxId). Multiple listeners
@@ -72,10 +72,10 @@
 
     void processNewFrames(const sp<CameraDeviceBase> &device);
 
-    virtual bool processSingleFrame(CameraMetadata &frame,
+    virtual bool processSingleFrame(CaptureResult &result,
                                     const sp<CameraDeviceBase> &device);
 
-    status_t processListeners(const CameraMetadata &frame,
+    status_t processListeners(const CaptureResult &result,
                               const sp<CameraDeviceBase> &device);
 
     CameraMetadata mLastFrame;
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index 2966d82..c33c166 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -112,20 +112,6 @@
         return res;
     }
 
-    res = device->ops->get_metadata_vendor_tag_ops(device, &mVendorTagOps);
-    if (res != OK ) {
-        ALOGE("%s: Camera %d: Unable to retrieve tag ops from device: %s (%d)",
-                __FUNCTION__, mId, strerror(-res), res);
-        device->common.close(&device->common);
-        return res;
-    }
-    res = set_camera_metadata_vendor_tag_ops(mVendorTagOps);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to set tag ops: %s (%d)",
-            __FUNCTION__, mId, strerror(-res), res);
-        device->common.close(&device->common);
-        return res;
-    }
     res = device->ops->set_notify_callback(device, notificationCallback,
             NULL);
     if (res != OK) {
@@ -213,7 +199,7 @@
     return mDeviceInfo;
 }
 
-status_t Camera2Device::capture(CameraMetadata &request) {
+status_t Camera2Device::capture(CameraMetadata &request, int64_t* /*lastFrameNumber*/) {
     ATRACE_CALL();
     ALOGV("%s: E", __FUNCTION__);
 
@@ -221,15 +207,29 @@
     return OK;
 }
 
+status_t Camera2Device::captureList(const List<const CameraMetadata> &requests,
+                                    int64_t* /*lastFrameNumber*/) {
+    ATRACE_CALL();
+    ALOGE("%s: Camera2Device burst capture not implemented", __FUNCTION__);
+    return INVALID_OPERATION;
+}
 
-status_t Camera2Device::setStreamingRequest(const CameraMetadata &request) {
+status_t Camera2Device::setStreamingRequest(const CameraMetadata &request,
+                                            int64_t* /*lastFrameNumber*/) {
     ATRACE_CALL();
     ALOGV("%s: E", __FUNCTION__);
     CameraMetadata streamRequest(request);
     return mRequestQueue.setStreamSlot(streamRequest.release());
 }
 
-status_t Camera2Device::clearStreamingRequest() {
+status_t Camera2Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                                int64_t* /*lastFrameNumber*/) {
+    ATRACE_CALL();
+    ALOGE("%s, Camera2Device streaming burst not implemented", __FUNCTION__);
+    return INVALID_OPERATION;
+}
+
+status_t Camera2Device::clearStreamingRequest(int64_t* /*lastFrameNumber*/) {
     ATRACE_CALL();
     return mRequestQueue.setStreamSlot(NULL);
 }
@@ -462,7 +462,13 @@
     if (listener != NULL) {
         switch (msg_type) {
             case CAMERA2_MSG_ERROR:
-                listener->notifyError(ext1, ext2, ext3);
+                // TODO: This needs to be fixed. ext2 and ext3 need to be considered.
+                listener->notifyError(
+                        ((ext1 == CAMERA2_MSG_ERROR_DEVICE)
+                        || (ext1 == CAMERA2_MSG_ERROR_HARDWARE)) ?
+                                ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE :
+                                ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE,
+                        CaptureResultExtras());
                 break;
             case CAMERA2_MSG_SHUTTER: {
                 // TODO: Only needed for camera2 API, which is unsupported
@@ -491,16 +497,22 @@
     return mFrameQueue.waitForBuffer(timeout);
 }
 
-status_t Camera2Device::getNextFrame(CameraMetadata *frame) {
+status_t Camera2Device::getNextResult(CaptureResult *result) {
     ATRACE_CALL();
+    ALOGV("%s: get CaptureResult", __FUNCTION__);
+    if (result == NULL) {
+        ALOGE("%s: result pointer is NULL", __FUNCTION__);
+        return BAD_VALUE;
+    }
     status_t res;
     camera_metadata_t *rawFrame;
     res = mFrameQueue.dequeue(&rawFrame);
-    if (rawFrame  == NULL) {
+    if (rawFrame == NULL) {
         return NOT_ENOUGH_DATA;
     } else if (res == OK) {
-        frame->acquire(rawFrame);
+        result->mMetadata.acquire(rawFrame);
     }
+
     return res;
 }
 
@@ -570,7 +582,7 @@
     return res;
 }
 
-status_t Camera2Device::flush() {
+status_t Camera2Device::flush(int64_t* /*lastFrameNumber*/) {
     ATRACE_CALL();
 
     mRequestQueue.clear();
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 1f53c56..22a13ac 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -47,9 +47,14 @@
     virtual status_t disconnect();
     virtual status_t dump(int fd, const Vector<String16>& args);
     virtual const CameraMetadata& info() const;
-    virtual status_t capture(CameraMetadata &request);
-    virtual status_t setStreamingRequest(const CameraMetadata &request);
-    virtual status_t clearStreamingRequest();
+    virtual status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL);
+    virtual status_t captureList(const List<const CameraMetadata> &requests,
+                                 int64_t *lastFrameNumber = NULL);
+    virtual status_t setStreamingRequest(const CameraMetadata &request,
+                                         int64_t *lastFrameNumber = NULL);
+    virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                             int64_t *lastFrameNumber = NULL);
+    virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL);
     virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
     virtual status_t createStream(sp<ANativeWindow> consumer,
             uint32_t width, uint32_t height, int format, size_t size,
@@ -65,20 +70,19 @@
     virtual status_t setNotifyCallback(NotificationListener *listener);
     virtual bool     willNotify3A();
     virtual status_t waitForNextFrame(nsecs_t timeout);
-    virtual status_t getNextFrame(CameraMetadata *frame);
+    virtual status_t getNextResult(CaptureResult *frame);
     virtual status_t triggerAutofocus(uint32_t id);
     virtual status_t triggerCancelAutofocus(uint32_t id);
     virtual status_t triggerPrecaptureMetering(uint32_t id);
     virtual status_t pushReprocessBuffer(int reprocessStreamId,
             buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
     // Flush implemented as just a wait
-    virtual status_t flush();
+    virtual status_t flush(int64_t *lastFrameNumber = NULL);
   private:
     const int mId;
     camera2_device_t *mHal2Device;
 
     CameraMetadata mDeviceInfo;
-    vendor_tag_query_ops_t *mVendorTagOps;
 
     /**
      * Queue class for both sending requests to a camera2 device, and for
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 1d4768c..2f874f5 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -102,8 +102,10 @@
 
     camera3_device_t *device;
 
+    ATRACE_BEGIN("camera3->open");
     res = module->common.methods->open(&module->common, deviceName.string(),
             reinterpret_cast<hw_device_t**>(&device));
+    ATRACE_END();
 
     if (res != OK) {
         SET_ERR_L("Could not open camera: %s (%d)", strerror(-res), res);
@@ -112,9 +114,9 @@
 
     /** Cross-check device version */
 
-    if (device->common.version != CAMERA_DEVICE_API_VERSION_3_0) {
+    if (device->common.version < CAMERA_DEVICE_API_VERSION_3_0) {
         SET_ERR_L("Could not open camera: "
-                "Camera device is not version %x, reports %x instead",
+                "Camera device should be at least %x, reports %x instead",
                 CAMERA_DEVICE_API_VERSION_3_0,
                 device->common.version);
         device->common.close(&device->common);
@@ -128,7 +130,7 @@
     if (info.device_version != device->common.version) {
         SET_ERR_L("HAL reporting mismatched camera_info version (%x)"
                 " and device version (%x).",
-                device->common.version, info.device_version);
+                info.device_version, device->common.version);
         device->common.close(&device->common);
         return BAD_VALUE;
     }
@@ -146,24 +148,6 @@
         return BAD_VALUE;
     }
 
-    /** Get vendor metadata tags */
-
-    mVendorTagOps.get_camera_vendor_section_name = NULL;
-
-    ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops");
-    device->ops->get_metadata_vendor_tag_ops(device, &mVendorTagOps);
-    ATRACE_END();
-
-    if (mVendorTagOps.get_camera_vendor_section_name != NULL) {
-        res = set_camera_metadata_vendor_tag_ops(&mVendorTagOps);
-        if (res != OK) {
-            SET_ERR_L("Unable to set tag ops: %s (%d)",
-                    strerror(-res), res);
-            device->common.close(&device->common);
-            return res;
-        }
-    }
-
     /** Start up status tracker thread */
     mStatusTracker = new StatusTracker(this);
     res = mStatusTracker->run(String8::format("C3Dev-%d-Status", mId).string());
@@ -271,7 +255,9 @@
         mStatusTracker.clear();
 
         if (mHal3Device != NULL) {
+            ATRACE_BEGIN("camera3->close");
             mHal3Device->common.close(&mHal3Device->common);
+            ATRACE_END();
             mHal3Device = NULL;
         }
 
@@ -298,6 +284,53 @@
     return gotLock;
 }
 
+ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
+    // TODO: replace below with availableStreamConfiguration for HAL3.2+.
+    camera_metadata_ro_entry availableJpegSizes =
+            mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
+    if (availableJpegSizes.count == 0 || availableJpegSizes.count % 2 != 0) {
+        ALOGE("%s: Camera %d: Can't find find valid available jpeg sizes in static metadata!",
+                __FUNCTION__, mId);
+        return BAD_VALUE;
+    }
+
+    // Get max jpeg size (area-wise).
+    int32_t maxJpegWidth = 0, maxJpegHeight = 0;
+    bool foundMax = false;
+    for (size_t i = 0; i < availableJpegSizes.count; i += 2) {
+        if ((availableJpegSizes.data.i32[i] * availableJpegSizes.data.i32[i + 1])
+                > (maxJpegWidth * maxJpegHeight)) {
+            maxJpegWidth = availableJpegSizes.data.i32[i];
+            maxJpegHeight = availableJpegSizes.data.i32[i + 1];
+            foundMax = true;
+        }
+    }
+    if (!foundMax) {
+        return BAD_VALUE;
+    }
+
+    // Get max jpeg buffer size
+    ssize_t maxJpegBufferSize = 0;
+    camera_metadata_ro_entry jpegMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
+    if (jpegMaxSize.count == 0) {
+        ALOGE("%s: Camera %d: Can't find maximum JPEG size in static metadata!", __FUNCTION__, mId);
+        return BAD_VALUE;
+    }
+    maxJpegBufferSize = jpegMaxSize.data.i32[0];
+
+    // Calculate final jpeg buffer size for the given resolution.
+    float scaleFactor = ((float) (width * height)) / (maxJpegWidth * maxJpegHeight);
+    ssize_t jpegBufferSize = scaleFactor * maxJpegBufferSize;
+    // Bound the buffer size to [MIN_JPEG_BUFFER_SIZE, maxJpegBufferSize].
+    if (jpegBufferSize > maxJpegBufferSize) {
+        jpegBufferSize = maxJpegBufferSize;
+    } else if (jpegBufferSize < kMinJpegBufferSize) {
+        jpegBufferSize = kMinJpegBufferSize;
+    }
+
+    return jpegBufferSize;
+}
+
 status_t Camera3Device::dump(int fd, const Vector<String16> &args) {
     ATRACE_CALL();
     (void)args;
@@ -386,14 +419,7 @@
     return mDeviceInfo;
 }
 
-status_t Camera3Device::capture(CameraMetadata &request) {
-    ATRACE_CALL();
-    status_t res;
-    Mutex::Autolock il(mInterfaceLock);
-    Mutex::Autolock l(mLock);
-
-    // TODO: take ownership of the request
-
+status_t Camera3Device::checkStatusOkToCaptureLocked() {
     switch (mStatus) {
         case STATUS_ERROR:
             CLOGE("Device has encountered a serious error");
@@ -402,7 +428,6 @@
             CLOGE("Device not initialized");
             return INVALID_OPERATION;
         case STATUS_UNCONFIGURED:
-            // May be lazily configuring streams, will check during setup
         case STATUS_CONFIGURED:
         case STATUS_ACTIVE:
             // OK
@@ -411,71 +436,119 @@
             SET_ERR_L("Unexpected status: %d", mStatus);
             return INVALID_OPERATION;
     }
+    return OK;
+}
 
-    sp<CaptureRequest> newRequest = setUpRequestLocked(request);
-    if (newRequest == NULL) {
-        CLOGE("Can't create capture request");
+status_t Camera3Device::convertMetadataListToRequestListLocked(
+        const List<const CameraMetadata> &metadataList, RequestList *requestList) {
+    if (requestList == NULL) {
+        CLOGE("requestList cannot be NULL.");
         return BAD_VALUE;
     }
 
-    res = mRequestThread->queueRequest(newRequest);
-    if (res == OK) {
-        waitUntilStateThenRelock(/*active*/ true, kActiveTimeout);
-        if (res != OK) {
-            SET_ERR_L("Can't transition to active in %f seconds!",
-                    kActiveTimeout/1e9);
+    int32_t burstId = 0;
+    for (List<const CameraMetadata>::const_iterator it = metadataList.begin();
+            it != metadataList.end(); ++it) {
+        sp<CaptureRequest> newRequest = setUpRequestLocked(*it);
+        if (newRequest == 0) {
+            CLOGE("Can't create capture request");
+            return BAD_VALUE;
         }
-        ALOGV("Camera %d: Capture request enqueued", mId);
+
+        // Setup burst Id and request Id
+        newRequest->mResultExtras.burstId = burstId++;
+        if (it->exists(ANDROID_REQUEST_ID)) {
+            if (it->find(ANDROID_REQUEST_ID).count == 0) {
+                CLOGE("RequestID entry exists; but must not be empty in metadata");
+                return BAD_VALUE;
+            }
+            newRequest->mResultExtras.requestId = it->find(ANDROID_REQUEST_ID).data.i32[0];
+        } else {
+            CLOGE("RequestID does not exist in metadata");
+            return BAD_VALUE;
+        }
+
+        requestList->push_back(newRequest);
+
+        ALOGV("%s: requestId = %" PRId32, __FUNCTION__, newRequest->mResultExtras.requestId);
     }
-    return res;
+    return OK;
 }
 
-
-status_t Camera3Device::setStreamingRequest(const CameraMetadata &request) {
+status_t Camera3Device::capture(CameraMetadata &request, int64_t* /*lastFrameNumber*/) {
     ATRACE_CALL();
-    status_t res;
+
+    List<const CameraMetadata> requests;
+    requests.push_back(request);
+    return captureList(requests, /*lastFrameNumber*/NULL);
+}
+
+status_t Camera3Device::submitRequestsHelper(
+        const List<const CameraMetadata> &requests, bool repeating,
+        /*out*/
+        int64_t *lastFrameNumber) {
+    ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
-    switch (mStatus) {
-        case STATUS_ERROR:
-            CLOGE("Device has encountered a serious error");
-            return INVALID_OPERATION;
-        case STATUS_UNINITIALIZED:
-            CLOGE("Device not initialized");
-            return INVALID_OPERATION;
-        case STATUS_UNCONFIGURED:
-            // May be lazily configuring streams, will check during setup
-        case STATUS_CONFIGURED:
-        case STATUS_ACTIVE:
-            // OK
-            break;
-        default:
-            SET_ERR_L("Unexpected status: %d", mStatus);
-            return INVALID_OPERATION;
+    status_t res = checkStatusOkToCaptureLocked();
+    if (res != OK) {
+        // error logged by previous call
+        return res;
     }
 
-    sp<CaptureRequest> newRepeatingRequest = setUpRequestLocked(request);
-    if (newRepeatingRequest == NULL) {
-        CLOGE("Can't create repeating request");
-        return BAD_VALUE;
+    RequestList requestList;
+
+    res = convertMetadataListToRequestListLocked(requests, /*out*/&requestList);
+    if (res != OK) {
+        // error logged by previous call
+        return res;
     }
 
-    RequestList newRepeatingRequests;
-    newRepeatingRequests.push_back(newRepeatingRequest);
+    if (repeating) {
+        res = mRequestThread->setRepeatingRequests(requestList, lastFrameNumber);
+    } else {
+        res = mRequestThread->queueRequestList(requestList, lastFrameNumber);
+    }
 
-    res = mRequestThread->setRepeatingRequests(newRepeatingRequests);
     if (res == OK) {
-        waitUntilStateThenRelock(/*active*/ true, kActiveTimeout);
+        waitUntilStateThenRelock(/*active*/true, kActiveTimeout);
         if (res != OK) {
             SET_ERR_L("Can't transition to active in %f seconds!",
                     kActiveTimeout/1e9);
         }
-        ALOGV("Camera %d: Repeating request set", mId);
+        ALOGV("Camera %d: Capture request %" PRId32 " enqueued", mId,
+              (*(requestList.begin()))->mResultExtras.requestId);
+    } else {
+        CLOGE("Cannot queue request. Impossible.");
+        return BAD_VALUE;
     }
+
     return res;
 }
 
+status_t Camera3Device::captureList(const List<const CameraMetadata> &requests,
+                                    int64_t *lastFrameNumber) {
+    ATRACE_CALL();
+
+    return submitRequestsHelper(requests, /*repeating*/false, lastFrameNumber);
+}
+
+status_t Camera3Device::setStreamingRequest(const CameraMetadata &request,
+                                            int64_t* /*lastFrameNumber*/) {
+    ATRACE_CALL();
+
+    List<const CameraMetadata> requests;
+    requests.push_back(request);
+    return setStreamingRequestList(requests, /*lastFrameNumber*/NULL);
+}
+
+status_t Camera3Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                                int64_t *lastFrameNumber) {
+    ATRACE_CALL();
+
+    return submitRequestsHelper(requests, /*repeating*/true, lastFrameNumber);
+}
 
 sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
         const CameraMetadata &request) {
@@ -497,7 +570,7 @@
     return newRequest;
 }
 
-status_t Camera3Device::clearStreamingRequest() {
+status_t Camera3Device::clearStreamingRequest(int64_t *lastFrameNumber) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
@@ -519,7 +592,8 @@
             return INVALID_OPERATION;
     }
     ALOGV("Camera %d: Clearing repeating request", mId);
-    return mRequestThread->clearRepeatingRequests();
+
+    return mRequestThread->clearRepeatingRequests(lastFrameNumber);
 }
 
 status_t Camera3Device::waitUntilRequestReceived(int32_t requestId, nsecs_t timeout) {
@@ -714,8 +788,17 @@
 
     sp<Camera3OutputStream> newStream;
     if (format == HAL_PIXEL_FORMAT_BLOB) {
+        ssize_t jpegBufferSize = getJpegBufferSize(width, height);
+        if (jpegBufferSize > 0) {
+            ALOGV("%s: Overwrite Jpeg output buffer size from %zu to %zu",
+                    __FUNCTION__, size, jpegBufferSize);
+        } else {
+            SET_ERR_L("Invalid jpeg buffer size %zd", jpegBufferSize);
+            return BAD_VALUE;
+        }
+
         newStream = new Camera3OutputStream(mNextStreamId, consumer,
-                width, height, size, format);
+                width, height, jpegBufferSize, format);
     } else {
         newStream = new Camera3OutputStream(mNextStreamId, consumer,
                 width, height, format);
@@ -840,16 +923,20 @@
     }
 
     sp<Camera3StreamInterface> deletedStream;
+    ssize_t outputStreamIdx = mOutputStreams.indexOfKey(id);
     if (mInputStream != NULL && id == mInputStream->getId()) {
         deletedStream = mInputStream;
         mInputStream.clear();
     } else {
-        ssize_t idx = mOutputStreams.indexOfKey(id);
-        if (idx == NAME_NOT_FOUND) {
+        if (outputStreamIdx == NAME_NOT_FOUND) {
             CLOGE("Stream %d does not exist", id);
             return BAD_VALUE;
         }
-        deletedStream = mOutputStreams.editValueAt(idx);
+    }
+
+    // Delete output stream or the output part of a bi-directional stream.
+    if (outputStreamIdx != NAME_NOT_FOUND) {
+        deletedStream = mOutputStreams.editValueAt(outputStreamIdx);
         mOutputStreams.removeItem(id);
     }
 
@@ -918,6 +1005,10 @@
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
+    return waitUntilDrainedLocked();
+}
+
+status_t Camera3Device::waitUntilDrainedLocked() {
     switch (mStatus) {
         case STATUS_UNINITIALIZED:
         case STATUS_UNCONFIGURED:
@@ -1030,7 +1121,7 @@
     return OK;
 }
 
-status_t Camera3Device::getNextFrame(CameraMetadata *frame) {
+status_t Camera3Device::getNextResult(CaptureResult *frame) {
     ATRACE_CALL();
     Mutex::Autolock l(mOutputLock);
 
@@ -1038,8 +1129,14 @@
         return NOT_ENOUGH_DATA;
     }
 
-    CameraMetadata &result = *(mResultQueue.begin());
-    frame->acquire(result);
+    if (frame == NULL) {
+        ALOGE("%s: argument cannot be NULL", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    CaptureResult &result = *(mResultQueue.begin());
+    frame->mResultExtras = result.mResultExtras;
+    frame->mMetadata.acquire(result.mMetadata);
     mResultQueue.erase(mResultQueue.begin());
 
     return OK;
@@ -1117,14 +1214,25 @@
     return INVALID_OPERATION;
 }
 
-status_t Camera3Device::flush() {
+status_t Camera3Device::flush(int64_t *frameNumber) {
     ATRACE_CALL();
     ALOGV("%s: Camera %d: Flushing all requests", __FUNCTION__, mId);
     Mutex::Autolock il(mInterfaceLock);
-    Mutex::Autolock l(mLock);
 
-    mRequestThread->clear();
-    return mHal3Device->ops->flush(mHal3Device);
+    {
+        Mutex::Autolock l(mLock);
+        mRequestThread->clear(/*out*/frameNumber);
+    }
+
+    status_t res;
+    if (mHal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_1) {
+        res = mHal3Device->ops->flush(mHal3Device);
+    } else {
+        Mutex::Autolock l(mLock);
+        res = waitUntilDrainedLocked();
+    }
+
+    return res;
 }
 
 /**
@@ -1392,13 +1500,13 @@
  * In-flight request management
  */
 
-status_t Camera3Device::registerInFlight(int32_t frameNumber,
-        int32_t requestId, int32_t numBuffers) {
+status_t Camera3Device::registerInFlight(uint32_t frameNumber,
+        int32_t numBuffers, CaptureResultExtras resultExtras) {
     ATRACE_CALL();
     Mutex::Autolock l(mInFlightLock);
 
     ssize_t res;
-    res = mInFlightMap.add(frameNumber, InFlightRequest(requestId, numBuffers));
+    res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras));
     if (res < 0) return res;
 
     return OK;
@@ -1410,8 +1518,8 @@
  * to the output frame queue
  */
 bool Camera3Device::processPartial3AQuirk(
-        int32_t frameNumber, int32_t requestId,
-        const CameraMetadata& partial) {
+        uint32_t frameNumber,
+        const CameraMetadata& partial, const CaptureResultExtras& resultExtras) {
 
     // Check if all 3A states are present
     // The full list of fields is
@@ -1460,7 +1568,7 @@
     ALOGVV("%s: Camera %d: Frame %d, Request ID %d: AF mode %d, AWB mode %d, "
         "AF state %d, AE state %d, AWB state %d, "
         "AF trigger %d, AE precapture trigger %d",
-        __FUNCTION__, mId, frameNumber, requestId,
+        __FUNCTION__, mId, frameNumber, resultExtras.requestId,
         afMode, awbMode,
         afState, aeState, awbState,
         afTriggerId, aeTriggerId);
@@ -1475,58 +1583,63 @@
 
     Mutex::Autolock l(mOutputLock);
 
-    CameraMetadata& min3AResult =
-            *mResultQueue.insert(
-                mResultQueue.end(),
-                CameraMetadata(kMinimal3AResultEntries, /*dataCapacity*/ 0));
+    CaptureResult captureResult;
+    captureResult.mResultExtras = resultExtras;
+    captureResult.mMetadata = CameraMetadata(kMinimal3AResultEntries, /*dataCapacity*/ 0);
+    // TODO: change this to sp<CaptureResult>. This will need other changes, including,
+    // but not limited to CameraDeviceBase::getNextResult
+    CaptureResult& min3AResult =
+            *mResultQueue.insert(mResultQueue.end(), captureResult);
 
-    if (!insert3AResult(min3AResult, ANDROID_REQUEST_FRAME_COUNT,
-            &frameNumber, frameNumber)) {
+    if (!insert3AResult(min3AResult.mMetadata, ANDROID_REQUEST_FRAME_COUNT,
+            // TODO: This is problematic casting. Need to fix CameraMetadata.
+            reinterpret_cast<int32_t*>(&frameNumber), frameNumber)) {
         return false;
     }
 
-    if (!insert3AResult(min3AResult, ANDROID_REQUEST_ID,
+    int32_t requestId = resultExtras.requestId;
+    if (!insert3AResult(min3AResult.mMetadata, ANDROID_REQUEST_ID,
             &requestId, frameNumber)) {
         return false;
     }
 
     static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
-    if (!insert3AResult(min3AResult, ANDROID_QUIRKS_PARTIAL_RESULT,
+    if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT,
             &partialResult, frameNumber)) {
         return false;
     }
 
-    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_MODE,
+    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_MODE,
             &afMode, frameNumber)) {
         return false;
     }
 
-    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AWB_MODE,
+    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AWB_MODE,
             &awbMode, frameNumber)) {
         return false;
     }
 
-    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AE_STATE,
+    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_STATE,
             &aeState, frameNumber)) {
         return false;
     }
 
-    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_STATE,
+    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_STATE,
             &afState, frameNumber)) {
         return false;
     }
 
-    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AWB_STATE,
+    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AWB_STATE,
             &awbState, frameNumber)) {
         return false;
     }
 
-    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_TRIGGER_ID,
+    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_TRIGGER_ID,
             &afTriggerId, frameNumber)) {
         return false;
     }
 
-    if (!insert3AResult(min3AResult, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_PRECAPTURE_ID,
             &aeTriggerId, frameNumber)) {
         return false;
     }
@@ -1538,7 +1651,7 @@
 
 template<typename T>
 bool Camera3Device::get3AResult(const CameraMetadata& result, int32_t tag,
-        T* value, int32_t frameNumber) {
+        T* value, uint32_t frameNumber) {
     (void) frameNumber;
 
     camera_metadata_ro_entry_t entry;
@@ -1563,7 +1676,7 @@
 
 template<typename T>
 bool Camera3Device::insert3AResult(CameraMetadata& result, int32_t tag,
-        const T* value, int32_t frameNumber) {
+        const T* value, uint32_t frameNumber) {
     if (result.update(tag, value, 1) != NO_ERROR) {
         mResultQueue.erase(--mResultQueue.end(), mResultQueue.end());
         SET_ERR("Frame %d: Failed to set %s in partial metadata",
@@ -1590,11 +1703,12 @@
     }
     bool partialResultQuirk = false;
     CameraMetadata collectedQuirkResult;
+    CaptureResultExtras resultExtras;
 
-    // Get capture timestamp from list of in-flight requests, where it was added
-    // by the shutter notification for this frame. Then update the in-flight
-    // status and remove the in-flight entry if all result data has been
-    // received.
+    // Get capture timestamp and resultExtras from list of in-flight requests,
+    // where it was added by the shutter notification for this frame.
+    // Then update the in-flight status and remove the in-flight entry if
+    // all result data has been received.
     nsecs_t timestamp = 0;
     {
         Mutex::Autolock l(mInFlightLock);
@@ -1605,6 +1719,10 @@
             return;
         }
         InFlightRequest &request = mInFlightMap.editValueAt(idx);
+        ALOGVV("%s: got InFlightRequest requestId = %" PRId32 ", frameNumber = %" PRId64
+                ", burstId = %" PRId32,
+                __FUNCTION__, request.resultExtras.requestId, request.resultExtras.frameNumber,
+                request.resultExtras.burstId);
 
         // Check if this result carries only partial metadata
         if (mUsePartialResultQuirk && result->result != NULL) {
@@ -1626,13 +1744,15 @@
                 if (!request.partialResultQuirk.haveSent3A) {
                     request.partialResultQuirk.haveSent3A =
                             processPartial3AQuirk(frameNumber,
-                                    request.requestId,
-                                    request.partialResultQuirk.collectedResult);
+                                    request.partialResultQuirk.collectedResult,
+                                    request.resultExtras);
                 }
             }
         }
 
         timestamp = request.captureTimestamp;
+        resultExtras = request.resultExtras;
+
         /**
          * One of the following must happen before it's legal to call process_capture_result,
          * unless partial metadata is being provided:
@@ -1668,8 +1788,10 @@
             return;
         }
 
-        // Check if everything has arrived for this result (buffers and metadata)
-        if (request.haveResultMetadata && request.numBuffersLeft == 0) {
+        // Check if everything has arrived for this result (buffers and metadata), remove it from
+        // InFlightMap if both arrived or HAL reports error for this request (i.e. during flush).
+        if ((request.requestStatus != OK) ||
+                (request.haveResultMetadata && request.numBuffersLeft == 0)) {
             ATRACE_ASYNC_END("frame capture", frameNumber);
             mInFlightMap.removeItemsAt(idx, 1);
         }
@@ -1689,19 +1811,21 @@
 
         gotResult = true;
 
-        if (frameNumber != mNextResultFrameNumber) {
+        // TODO: need to track errors for tighter bounds on expected frame number
+        if (frameNumber < mNextResultFrameNumber) {
             SET_ERR("Out-of-order capture result metadata submitted! "
                     "(got frame number %d, expecting %d)",
                     frameNumber, mNextResultFrameNumber);
             return;
         }
-        mNextResultFrameNumber++;
+        mNextResultFrameNumber = frameNumber + 1;
 
-        CameraMetadata captureResult;
-        captureResult = result->result;
+        CaptureResult captureResult;
+        captureResult.mResultExtras = resultExtras;
+        captureResult.mMetadata = result->result;
 
-        if (captureResult.update(ANDROID_REQUEST_FRAME_COUNT,
-                        (int32_t*)&frameNumber, 1) != OK) {
+        if (captureResult.mMetadata.update(ANDROID_REQUEST_FRAME_COUNT,
+                (int32_t*)&frameNumber, 1) != OK) {
             SET_ERR("Failed to set frame# in metadata (%d)",
                     frameNumber);
             gotResult = false;
@@ -1712,15 +1836,15 @@
 
         // Append any previous partials to form a complete result
         if (mUsePartialResultQuirk && !collectedQuirkResult.isEmpty()) {
-            captureResult.append(collectedQuirkResult);
+            captureResult.mMetadata.append(collectedQuirkResult);
         }
 
-        captureResult.sort();
+        captureResult.mMetadata.sort();
 
         // Check that there's a timestamp in the result metadata
 
         camera_metadata_entry entry =
-                captureResult.find(ANDROID_SENSOR_TIMESTAMP);
+                captureResult.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
         if (entry.count == 0) {
             SET_ERR("No timestamp provided by HAL for frame %d!",
                     frameNumber);
@@ -1734,9 +1858,13 @@
 
         if (gotResult) {
             // Valid result, insert into queue
-            CameraMetadata& queuedResult =
-                *mResultQueue.insert(mResultQueue.end(), CameraMetadata());
-            queuedResult.swap(captureResult);
+            List<CaptureResult>::iterator queuedResult =
+                    mResultQueue.insert(mResultQueue.end(), CaptureResult(captureResult));
+            ALOGVV("%s: result requestId = %" PRId32 ", frameNumber = %" PRId64
+                   ", burstId = %" PRId32, __FUNCTION__,
+                   queuedResult->mResultExtras.requestId,
+                   queuedResult->mResultExtras.frameNumber,
+                   queuedResult->mResultExtras.burstId);
         }
     } // scope for mOutputLock
 
@@ -1762,8 +1890,6 @@
 
 }
 
-
-
 void Camera3Device::notify(const camera3_notify_msg *msg) {
     ATRACE_CALL();
     NotificationListener *listener;
@@ -1790,18 +1916,32 @@
                     mId, __FUNCTION__, msg->message.error.frame_number,
                     streamId, msg->message.error.error_code);
 
+            CaptureResultExtras resultExtras;
             // Set request error status for the request in the in-flight tracking
             {
                 Mutex::Autolock l(mInFlightLock);
                 ssize_t idx = mInFlightMap.indexOfKey(msg->message.error.frame_number);
                 if (idx >= 0) {
-                    mInFlightMap.editValueAt(idx).requestStatus = msg->message.error.error_code;
+                    InFlightRequest &r = mInFlightMap.editValueAt(idx);
+                    r.requestStatus = msg->message.error.error_code;
+                    resultExtras = r.resultExtras;
+                } else {
+                    resultExtras.frameNumber = msg->message.error.frame_number;
+                    ALOGE("Camera %d: %s: cannot find in-flight request on frame %" PRId64
+                          " error", mId, __FUNCTION__, resultExtras.frameNumber);
                 }
             }
 
             if (listener != NULL) {
-                listener->notifyError(msg->message.error.error_code,
-                        msg->message.error.frame_number, streamId);
+                if (msg->message.error.error_code == CAMERA3_MSG_ERROR_DEVICE) {
+                    listener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
+                                          resultExtras);
+                } else {
+                    listener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE,
+                                          resultExtras);
+                }
+            } else {
+                ALOGE("Camera %d: %s: no listener available", mId, __FUNCTION__);
             }
             break;
         }
@@ -1812,16 +1952,17 @@
             // Verify ordering of shutter notifications
             {
                 Mutex::Autolock l(mOutputLock);
-                if (frameNumber != mNextShutterFrameNumber) {
+                // TODO: need to track errors for tighter bounds on expected frame number.
+                if (frameNumber < mNextShutterFrameNumber) {
                     SET_ERR("Shutter notification out-of-order. Expected "
                             "notification for frame %d, got frame %d",
                             mNextShutterFrameNumber, frameNumber);
                     break;
                 }
-                mNextShutterFrameNumber++;
+                mNextShutterFrameNumber = frameNumber + 1;
             }
 
-            int32_t requestId = -1;
+            CaptureResultExtras resultExtras;
 
             // Set timestamp for the request in the in-flight tracking
             // and get the request ID to send upstream
@@ -1831,7 +1972,7 @@
                 if (idx >= 0) {
                     InFlightRequest &r = mInFlightMap.editValueAt(idx);
                     r.captureTimestamp = timestamp;
-                    requestId = r.requestId;
+                    resultExtras = r.resultExtras;
                 }
             }
             if (idx < 0) {
@@ -1840,10 +1981,10 @@
                 break;
             }
             ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
-                    mId, __FUNCTION__, frameNumber, requestId, timestamp);
+                    mId, __FUNCTION__, frameNumber, resultExtras.requestId, timestamp);
             // Call listener, if any
             if (listener != NULL) {
-                listener->notifyShutter(requestId, timestamp);
+                listener->notifyShutter(resultExtras, timestamp);
             }
             break;
         }
@@ -1865,6 +2006,7 @@
     return retVal;
 }
 
+
 /**
  * RequestThread inner class methods
  */
@@ -1881,7 +2023,8 @@
         mDoPause(false),
         mPaused(true),
         mFrameNumber(0),
-        mLatestRequestId(NAME_NOT_FOUND) {
+        mLatestRequestId(NAME_NOT_FOUND),
+        mRepeatingLastFrameNumber(NO_IN_FLIGHT_REPEATING_FRAMES) {
     mStatusId = statusTracker->addComponent();
 }
 
@@ -1890,10 +2033,22 @@
     mReconfigured = true;
 }
 
-status_t Camera3Device::RequestThread::queueRequest(
-         sp<CaptureRequest> request) {
+status_t Camera3Device::RequestThread::queueRequestList(
+        List<sp<CaptureRequest> > &requests,
+        /*out*/
+        int64_t *lastFrameNumber) {
     Mutex::Autolock l(mRequestLock);
-    mRequestQueue.push_back(request);
+    for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end();
+            ++it) {
+        mRequestQueue.push_back(*it);
+    }
+
+    if (lastFrameNumber != NULL) {
+        *lastFrameNumber = mFrameNumber + mRequestQueue.size() - 1;
+        ALOGV("%s: requestId %d, mFrameNumber %" PRId32 ", lastFrameNumber %" PRId64 ".",
+              __FUNCTION__, (*(requests.begin()))->mResultExtras.requestId, mFrameNumber,
+              *lastFrameNumber);
+    }
 
     unpauseForNewRequests();
 
@@ -1957,28 +2112,43 @@
 }
 
 status_t Camera3Device::RequestThread::setRepeatingRequests(
-        const RequestList &requests) {
+        const RequestList &requests,
+        /*out*/
+        int64_t *lastFrameNumber) {
     Mutex::Autolock l(mRequestLock);
+    if (lastFrameNumber != NULL) {
+        *lastFrameNumber = mRepeatingLastFrameNumber;
+    }
     mRepeatingRequests.clear();
     mRepeatingRequests.insert(mRepeatingRequests.begin(),
             requests.begin(), requests.end());
 
     unpauseForNewRequests();
 
+    mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES;
     return OK;
 }
 
-status_t Camera3Device::RequestThread::clearRepeatingRequests() {
+status_t Camera3Device::RequestThread::clearRepeatingRequests(/*out*/int64_t *lastFrameNumber) {
     Mutex::Autolock l(mRequestLock);
     mRepeatingRequests.clear();
+    if (lastFrameNumber != NULL) {
+        *lastFrameNumber = mRepeatingLastFrameNumber;
+    }
+    mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES;
     return OK;
 }
 
-status_t Camera3Device::RequestThread::clear() {
+status_t Camera3Device::RequestThread::clear(/*out*/int64_t *lastFrameNumber) {
     Mutex::Autolock l(mRequestLock);
+    ALOGV("RequestThread::%s:", __FUNCTION__);
     mRepeatingRequests.clear();
     mRequestQueue.clear();
     mTriggerMap.clear();
+    if (lastFrameNumber != NULL) {
+        *lastFrameNumber = mRepeatingLastFrameNumber;
+    }
+    mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES;
     return OK;
 }
 
@@ -2030,6 +2200,7 @@
 
     // Create request to HAL
     camera3_capture_request_t request = camera3_capture_request_t();
+    request.frame_number = nextRequest->mResultExtras.frameNumber;
     Vector<camera3_stream_buffer_t> outputBuffers;
 
     // Get the request ID, if any
@@ -2050,7 +2221,7 @@
     if (res < 0) {
         SET_ERR("RequestThread: Unable to insert triggers "
                 "(capture request %d, HAL device: %s (%d)",
-                (mFrameNumber+1), strerror(-res), res);
+                request.frame_number, strerror(-res), res);
         cleanUpFailedRequest(request, nextRequest, outputBuffers);
         return false;
     }
@@ -2068,7 +2239,7 @@
         if (res != OK) {
             SET_ERR("RequestThread: Unable to insert dummy trigger IDs "
                     "(capture request %d, HAL device: %s (%d)",
-                    (mFrameNumber+1), strerror(-res), res);
+                    request.frame_number, strerror(-res), res);
             cleanUpFailedRequest(request, nextRequest, outputBuffers);
             return false;
         }
@@ -2092,7 +2263,7 @@
             if (e.count > 0) {
                 ALOGV("%s: Request (frame num %d) had AF trigger 0x%x",
                       __FUNCTION__,
-                      mFrameNumber+1,
+                      request.frame_number,
                       e.data.u8[0]);
             }
         }
@@ -2134,8 +2305,6 @@
         request.num_output_buffers++;
     }
 
-    request.frame_number = mFrameNumber++;
-
     // Log request in the in-flight queue
     sp<Camera3Device> parent = mParent.promote();
     if (parent == NULL) {
@@ -2144,8 +2313,13 @@
         return false;
     }
 
-    res = parent->registerInFlight(request.frame_number, requestId,
-            request.num_output_buffers);
+    res = parent->registerInFlight(request.frame_number,
+            request.num_output_buffers, nextRequest->mResultExtras);
+    ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
+           ", burstId = %" PRId32 ".",
+            __FUNCTION__,
+            nextRequest->mResultExtras.requestId, nextRequest->mResultExtras.frameNumber,
+            nextRequest->mResultExtras.burstId);
     if (res != OK) {
         SET_ERR("RequestThread: Unable to register new in-flight request:"
                 " %s (%d)", strerror(-res), res);
@@ -2222,6 +2396,7 @@
     return mLatestRequest;
 }
 
+
 void Camera3Device::RequestThread::cleanUpFailedRequest(
         camera3_capture_request_t &request,
         sp<CaptureRequest> &nextRequest,
@@ -2263,6 +2438,9 @@
                     ++firstRequest,
                     requests.end());
             // No need to wait any longer
+
+            mRepeatingLastFrameNumber = mFrameNumber + requests.size() - 1;
+
             break;
         }
 
@@ -2314,6 +2492,9 @@
         mReconfigured = false;
     }
 
+    if (nextRequest != NULL) {
+        nextRequest->mResultExtras.frameNumber = mFrameNumber++;
+    }
     return nextRequest;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 468f641..00ae771 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -24,6 +24,8 @@
 #include <utils/Thread.h>
 #include <utils/KeyedVector.h>
 #include <hardware/camera3.h>
+#include <camera/CaptureResult.h>
+#include <camera/camera2/ICameraDeviceUser.h>
 
 #include "common/CameraDeviceBase.h"
 #include "device3/StatusTracker.h"
@@ -54,7 +56,7 @@
 }
 
 /**
- * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0
+ * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0 or higher.
  */
 class Camera3Device :
             public CameraDeviceBase,
@@ -78,15 +80,21 @@
 
     // Capture and setStreamingRequest will configure streams if currently in
     // idle state
-    virtual status_t capture(CameraMetadata &request);
-    virtual status_t setStreamingRequest(const CameraMetadata &request);
-    virtual status_t clearStreamingRequest();
+    virtual status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL);
+    virtual status_t captureList(const List<const CameraMetadata> &requests,
+                                 int64_t *lastFrameNumber = NULL);
+    virtual status_t setStreamingRequest(const CameraMetadata &request,
+                                         int64_t *lastFrameNumber = NULL);
+    virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                             int64_t *lastFrameNumber = NULL);
+    virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL);
 
     virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
 
     // Actual stream creation/deletion is delayed until first request is submitted
     // If adding streams while actively capturing, will pause device before adding
-    // stream, reconfiguring device, and unpausing.
+    // stream, reconfiguring device, and unpausing. Note that, for JPEG stream, the
+    // buffer size may be overwritten by an more accurate value calculated by Camera3Device.
     virtual status_t createStream(sp<ANativeWindow> consumer,
             uint32_t width, uint32_t height, int format, size_t size,
             int *id);
@@ -116,7 +124,7 @@
     virtual status_t setNotifyCallback(NotificationListener *listener);
     virtual bool     willNotify3A();
     virtual status_t waitForNextFrame(nsecs_t timeout);
-    virtual status_t getNextFrame(CameraMetadata *frame);
+    virtual status_t getNextResult(CaptureResult *frame);
 
     virtual status_t triggerAutofocus(uint32_t id);
     virtual status_t triggerCancelAutofocus(uint32_t id);
@@ -125,7 +133,7 @@
     virtual status_t pushReprocessBuffer(int reprocessStreamId,
             buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
 
-    virtual status_t flush();
+    virtual status_t flush(int64_t *lastFrameNumber = NULL);
 
     // Methods called by subclasses
     void             notifyStatus(bool idle); // updates from StatusTracker
@@ -137,6 +145,8 @@
     static const nsecs_t       kShutdownTimeout   = 5000000000; // 5 sec
     static const nsecs_t       kActiveTimeout     = 500000000;  // 500 ms
     struct                     RequestTrigger;
+    // minimal jpeg buffer size: 256KB + blob header
+    static const ssize_t       kMinJpegBufferSize = 256 * 1024 + sizeof(camera3_jpeg_blob);
 
     // A lock to enforce serialization on the input/configure side
     // of the public interface.
@@ -157,7 +167,6 @@
     camera3_device_t          *mHal3Device;
 
     CameraMetadata             mDeviceInfo;
-    vendor_tag_query_ops_t     mVendorTagOps;
 
     enum Status {
         STATUS_ERROR,
@@ -199,9 +208,20 @@
         sp<camera3::Camera3Stream>          mInputStream;
         Vector<sp<camera3::Camera3OutputStreamInterface> >
                                             mOutputStreams;
+        CaptureResultExtras                 mResultExtras;
     };
     typedef List<sp<CaptureRequest> > RequestList;
 
+    status_t checkStatusOkToCaptureLocked();
+
+    status_t convertMetadataListToRequestListLocked(
+            const List<const CameraMetadata> &metadataList,
+            /*out*/
+            RequestList *requestList);
+
+    status_t submitRequestsHelper(const List<const CameraMetadata> &requests, bool repeating,
+                                  int64_t *lastFrameNumber = NULL);
+
     /**
      * Get the last request submitted to the hal by the request thread.
      *
@@ -237,6 +257,13 @@
     status_t waitUntilStateThenRelock(bool active, nsecs_t timeout);
 
     /**
+     * Implementation of waitUntilDrained. On success, will transition to IDLE state.
+     *
+     * Need to be called with mLock and mInterfaceLock held.
+     */
+    status_t waitUntilDrainedLocked();
+
+    /**
      * Do common work for setting up a streaming or single capture request.
      * On success, will transition to ACTIVE if in IDLE.
      */
@@ -270,6 +297,12 @@
      */
     bool               tryLockSpinRightRound(Mutex& lock);
 
+    /**
+     * Get Jpeg buffer size for a given jpeg resolution.
+     * Negative values are error codes.
+     */
+    ssize_t             getJpegBufferSize(uint32_t width, uint32_t height) const;
+
     struct RequestTrigger {
         // Metadata tag number, e.g. android.control.aePrecaptureTrigger
         uint32_t metadataTag;
@@ -308,15 +341,21 @@
          * on either. Use waitUntilPaused to wait until request queue
          * has emptied out.
          */
-        status_t setRepeatingRequests(const RequestList& requests);
-        status_t clearRepeatingRequests();
+        status_t setRepeatingRequests(const RequestList& requests,
+                                      /*out*/
+                                      int64_t *lastFrameNumber = NULL);
+        status_t clearRepeatingRequests(/*out*/
+                                        int64_t *lastFrameNumber = NULL);
 
-        status_t queueRequest(sp<CaptureRequest> request);
+        status_t queueRequestList(List<sp<CaptureRequest> > &requests,
+                                  /*out*/
+                                  int64_t *lastFrameNumber = NULL);
 
         /**
          * Remove all queued and repeating requests, and pending triggers
          */
-        status_t clear();
+        status_t clear(/*out*/
+                       int64_t *lastFrameNumber = NULL);
 
         /**
          * Queue a trigger to be dispatched with the next outgoing
@@ -429,6 +468,8 @@
         TriggerMap         mTriggerMap;
         TriggerMap         mTriggerRemovedMap;
         TriggerMap         mTriggerReplacedMap;
+
+        int64_t            mRepeatingLastFrameNumber;
     };
     sp<RequestThread> mRequestThread;
 
@@ -437,8 +478,6 @@
      */
 
     struct InFlightRequest {
-        // android.request.id for the request
-        int     requestId;
         // Set by notify() SHUTTER call.
         nsecs_t captureTimestamp;
         int     requestStatus;
@@ -447,6 +486,7 @@
         // Decremented by calls to process_capture_result with valid output
         // buffers
         int     numBuffersLeft;
+        CaptureResultExtras resultExtras;
 
         // Fields used by the partial result quirk only
         struct PartialResultQuirkInFlight {
@@ -462,20 +502,26 @@
 
         // Default constructor needed by KeyedVector
         InFlightRequest() :
-                requestId(0),
                 captureTimestamp(0),
                 requestStatus(OK),
                 haveResultMetadata(false),
                 numBuffersLeft(0) {
         }
 
-        InFlightRequest(int id, int numBuffers) :
-                requestId(id),
+        InFlightRequest(int numBuffers) :
                 captureTimestamp(0),
                 requestStatus(OK),
                 haveResultMetadata(false),
                 numBuffersLeft(numBuffers) {
         }
+
+        InFlightRequest(int numBuffers, CaptureResultExtras extras) :
+                captureTimestamp(0),
+                requestStatus(OK),
+                haveResultMetadata(false),
+                numBuffersLeft(numBuffers),
+                resultExtras(extras) {
+        }
     };
     // Map from frame number to the in-flight request state
     typedef KeyedVector<uint32_t, InFlightRequest> InFlightMap;
@@ -483,25 +529,25 @@
     Mutex                  mInFlightLock; // Protects mInFlightMap
     InFlightMap            mInFlightMap;
 
-    status_t registerInFlight(int32_t frameNumber, int32_t requestId,
-            int32_t numBuffers);
+    status_t registerInFlight(uint32_t frameNumber,
+            int32_t numBuffers, CaptureResultExtras resultExtras);
 
     /**
      * For the partial result quirk, check if all 3A state fields are available
      * and if so, queue up 3A-only result to the client. Returns true if 3A
      * is sent.
      */
-    bool processPartial3AQuirk(int32_t frameNumber, int32_t requestId,
-            const CameraMetadata& partial);
+    bool processPartial3AQuirk(uint32_t frameNumber,
+            const CameraMetadata& partial, const CaptureResultExtras& resultExtras);
 
     // Helpers for reading and writing 3A metadata into to/from partial results
     template<typename T>
     bool get3AResult(const CameraMetadata& result, int32_t tag,
-            T* value, int32_t frameNumber);
+            T* value, uint32_t frameNumber);
 
     template<typename T>
     bool insert3AResult(CameraMetadata &result, int32_t tag, const T* value,
-            int32_t frameNumber);
+            uint32_t frameNumber);
     /**
      * Tracking for idle detection
      */
@@ -518,7 +564,7 @@
 
     uint32_t               mNextResultFrameNumber;
     uint32_t               mNextShutterFrameNumber;
-    List<CameraMetadata>   mResultQueue;
+    List<CaptureResult>   mResultQueue;
     Condition              mResultSignal;
     NotificationListener  *mListener;
 
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index d662cc2..50a2c10 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -34,7 +34,8 @@
         Camera3Stream(id, type,
                 width, height, maxSize, format),
         mTotalBufferCount(0),
-        mDequeuedBufferCount(0),
+        mHandoutTotalBufferCount(0),
+        mHandoutOutputBufferCount(0),
         mFrameCount(0),
         mLastTimestamp(0) {
 
@@ -55,8 +56,8 @@
     nsecs_t signalTime = mCombinedFence->getSignalTime();
     ALOGV("%s: Stream %d: Has %zu outstanding buffers,"
             " buffer signal time is %" PRId64,
-            __FUNCTION__, mId, mDequeuedBufferCount, signalTime);
-    if (mDequeuedBufferCount > 0 || signalTime == INT64_MAX) {
+            __FUNCTION__, mId, mHandoutTotalBufferCount, signalTime);
+    if (mHandoutTotalBufferCount > 0 || signalTime == INT64_MAX) {
         return true;
     }
     return false;
@@ -75,7 +76,7 @@
     lines.appendFormat("      Frames produced: %d, last timestamp: %" PRId64 " ns\n",
             mFrameCount, mLastTimestamp);
     lines.appendFormat("      Total buffers: %zu, currently dequeued: %zu\n",
-            mTotalBufferCount, mDequeuedBufferCount);
+            mTotalBufferCount, mHandoutTotalBufferCount);
     write(fd, lines.string(), lines.size());
 }
 
@@ -104,6 +105,14 @@
     return mTotalBufferCount;
 }
 
+size_t Camera3IOStreamBase::getHandoutOutputBufferCountLocked() {
+    return mHandoutOutputBufferCount;
+}
+
+size_t Camera3IOStreamBase::getHandoutInputBufferCountLocked() {
+    return (mHandoutTotalBufferCount - mHandoutOutputBufferCount);
+}
+
 status_t Camera3IOStreamBase::disconnectLocked() {
     switch (mState) {
         case STATE_IN_RECONFIG:
@@ -117,9 +126,9 @@
             return -ENOTCONN;
     }
 
-    if (mDequeuedBufferCount > 0) {
+    if (mHandoutTotalBufferCount > 0) {
         ALOGE("%s: Can't disconnect with %zu buffers still dequeued!",
-                __FUNCTION__, mDequeuedBufferCount);
+                __FUNCTION__, mHandoutTotalBufferCount);
         return INVALID_OPERATION;
     }
 
@@ -130,7 +139,8 @@
                                               buffer_handle_t *handle,
                                               int acquireFence,
                                               int releaseFence,
-                                              camera3_buffer_status_t status) {
+                                              camera3_buffer_status_t status,
+                                              bool output) {
     /**
      * Note that all fences are now owned by HAL.
      */
@@ -144,14 +154,25 @@
     buffer.status = status;
 
     // Inform tracker about becoming busy
-    if (mDequeuedBufferCount == 0 && mState != STATE_IN_CONFIG &&
+    if (mHandoutTotalBufferCount == 0 && mState != STATE_IN_CONFIG &&
             mState != STATE_IN_RECONFIG) {
+        /**
+         * Avoid a spurious IDLE->ACTIVE->IDLE transition when using buffers
+         * before/after register_stream_buffers during initial configuration
+         * or re-configuration.
+         *
+         * TODO: IN_CONFIG and IN_RECONFIG checks only make sense for <HAL3.2
+         */
         sp<StatusTracker> statusTracker = mStatusTracker.promote();
         if (statusTracker != 0) {
             statusTracker->markComponentActive(mStatusId);
         }
     }
-    mDequeuedBufferCount++;
+    mHandoutTotalBufferCount++;
+
+    if (output) {
+        mHandoutOutputBufferCount++;
+    }
 }
 
 status_t Camera3IOStreamBase::getBufferPreconditionCheckLocked() const {
@@ -165,7 +186,7 @@
 
     // Only limit dequeue amount when fully configured
     if (mState == STATE_CONFIGURED &&
-            mDequeuedBufferCount == camera3_stream::max_buffers) {
+            mHandoutTotalBufferCount == camera3_stream::max_buffers) {
         ALOGE("%s: Stream %d: Already dequeued maximum number of simultaneous"
                 " buffers (%d)", __FUNCTION__, mId,
                 camera3_stream::max_buffers);
@@ -183,7 +204,7 @@
                 __FUNCTION__, mId, mState);
         return INVALID_OPERATION;
     }
-    if (mDequeuedBufferCount == 0) {
+    if (mHandoutTotalBufferCount == 0) {
         ALOGE("%s: Stream %d: No buffers outstanding to return", __FUNCTION__,
                 mId);
         return INVALID_OPERATION;
@@ -221,9 +242,20 @@
         mCombinedFence = Fence::merge(mName, mCombinedFence, releaseFence);
     }
 
-    mDequeuedBufferCount--;
-    if (mDequeuedBufferCount == 0 && mState != STATE_IN_CONFIG &&
+    if (output) {
+        mHandoutOutputBufferCount--;
+    }
+
+    mHandoutTotalBufferCount--;
+    if (mHandoutTotalBufferCount == 0 && mState != STATE_IN_CONFIG &&
             mState != STATE_IN_RECONFIG) {
+        /**
+         * Avoid a spurious IDLE->ACTIVE->IDLE transition when using buffers
+         * before/after register_stream_buffers during initial configuration
+         * or re-configuration.
+         *
+         * TODO: IN_CONFIG and IN_RECONFIG checks only make sense for <HAL3.2
+         */
         ALOGV("%s: Stream %d: All buffers returned; now idle", __FUNCTION__,
                 mId);
         sp<StatusTracker> statusTracker = mStatusTracker.promote();
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index fcb9d04..a35c290 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -48,7 +48,10 @@
   protected:
     size_t            mTotalBufferCount;
     // sum of input and output buffers that are currently acquired by HAL
-    size_t            mDequeuedBufferCount;
+    size_t            mHandoutTotalBufferCount;
+    // number of output buffers that are currently acquired by HAL. This will be
+    // Redundant when camera3 streams are no longer bidirectional streams.
+    size_t            mHandoutOutputBufferCount;
     Condition         mBufferReturnedSignal;
     uint32_t          mFrameCount;
     // Last received output buffer's timestamp
@@ -76,6 +79,10 @@
 
     virtual size_t   getBufferCountLocked();
 
+    virtual size_t   getHandoutOutputBufferCountLocked();
+
+    virtual size_t   getHandoutInputBufferCountLocked();
+
     virtual status_t getEndpointUsage(uint32_t *usage) = 0;
 
     status_t getBufferPreconditionCheckLocked() const;
@@ -92,7 +99,8 @@
                              buffer_handle_t *handle,
                              int acquire_fence,
                              int release_fence,
-                             camera3_buffer_status_t status);
+                             camera3_buffer_status_t status,
+                             bool output);
 
 }; // class Camera3IOStreamBase
 
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 5aa9a3e..319be1d 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -81,7 +81,7 @@
      * in which case we reassign it to acquire_fence
      */
     handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
-                        /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK);
+                        /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/false);
     mBuffersInFlight.push_back(bufferItem);
 
     return OK;
@@ -199,14 +199,36 @@
     assert(mMaxSize == 0);
     assert(camera3_stream::format != HAL_PIXEL_FORMAT_BLOB);
 
-    mTotalBufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS +
-                        camera3_stream::max_buffers;
-    mDequeuedBufferCount = 0;
+    mHandoutTotalBufferCount = 0;
     mFrameCount = 0;
 
     if (mConsumer.get() == 0) {
-        sp<BufferQueue> bq = new BufferQueue();
-        mConsumer = new BufferItemConsumer(bq, camera3_stream::usage,
+        sp<IGraphicBufferProducer> producer;
+        sp<IGraphicBufferConsumer> consumer;
+        BufferQueue::createBufferQueue(&producer, &consumer);
+
+        int minUndequeuedBuffers = 0;
+        res = producer->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers);
+        if (res != OK || minUndequeuedBuffers < 0) {
+            ALOGE("%s: Stream %d: Could not query min undequeued buffers (error %d, bufCount %d)",
+                  __FUNCTION__, mId, res, minUndequeuedBuffers);
+            return res;
+        }
+        size_t minBufs = static_cast<size_t>(minUndequeuedBuffers);
+        /*
+         * We promise never to 'acquire' more than camera3_stream::max_buffers
+         * at any one time.
+         *
+         * Boost the number up to meet the minimum required buffer count.
+         *
+         * (Note that this sets consumer-side buffer count only,
+         * and not the sum of producer+consumer side as in other camera streams).
+         */
+        mTotalBufferCount = camera3_stream::max_buffers > minBufs ?
+            camera3_stream::max_buffers : minBufs;
+        // TODO: somehow set the total buffer count when producer connects?
+
+        mConsumer = new BufferItemConsumer(consumer, camera3_stream::usage,
                                            mTotalBufferCount);
         mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
     }
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 681d684..ae49467 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -44,6 +44,8 @@
 
     virtual void     dump(int fd, const Vector<String16> &args) const;
 
+    // TODO: expose an interface to get the IGraphicBufferProducer
+
   private:
 
     typedef BufferItemConsumer::BufferItem BufferItem;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 682755d..7ec649b 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -119,7 +119,7 @@
      * in which case we reassign it to acquire_fence
      */
     handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
-                        /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK);
+                        /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/true);
 
     return OK;
 }
@@ -324,7 +324,7 @@
     }
 
     mTotalBufferCount = maxConsumerBuffers + camera3_stream::max_buffers;
-    mDequeuedBufferCount = 0;
+    mHandoutTotalBufferCount = 0;
     mFrameCount = 0;
     mLastTimestamp = 0;
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 70406f1..abfb602 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -23,6 +23,8 @@
 #include "device3/Camera3Stream.h"
 #include "device3/StatusTracker.h"
 
+#include <cutils/properties.h>
+
 namespace android {
 
 namespace camera3 {
@@ -137,6 +139,7 @@
     if (mState == STATE_CONSTRUCTED) {
         mState = STATE_IN_CONFIG;
     } else { // mState == STATE_CONFIGURED
+        LOG_ALWAYS_FATAL_IF(mState != STATE_CONFIGURED, "Invalid state: 0x%x", mState);
         mState = STATE_IN_RECONFIG;
     }
 
@@ -209,8 +212,30 @@
 status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
+    status_t res = OK;
 
-    status_t res = getBufferLocked(buffer);
+    // This function should be only called when the stream is configured already.
+    if (mState != STATE_CONFIGURED) {
+        ALOGE("%s: Stream %d: Can't get buffers if stream is not in CONFIGURED state %d",
+                __FUNCTION__, mId, mState);
+        return INVALID_OPERATION;
+    }
+
+    // Wait for new buffer returned back if we are running into the limit.
+    if (getHandoutOutputBufferCountLocked() == camera3_stream::max_buffers) {
+        ALOGV("%s: Already dequeued max output buffers (%d), wait for next returned one.",
+                __FUNCTION__, camera3_stream::max_buffers);
+        res = mOutputBufferReturnedSignal.waitRelative(mLock, kWaitForBufferDuration);
+        if (res != OK) {
+            if (res == TIMED_OUT) {
+                ALOGE("%s: wait for output buffer return timed out after %lldms", __FUNCTION__,
+                        kWaitForBufferDuration / 1000000LL);
+            }
+            return res;
+        }
+    }
+
+    res = getBufferLocked(buffer);
     if (res == OK) {
         fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/true);
     }
@@ -223,9 +248,18 @@
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
 
+    /**
+     * TODO: Check that the state is valid first.
+     *
+     * <HAL3.2 IN_CONFIG and IN_RECONFIG in addition to CONFIGURED.
+     * >= HAL3.2 CONFIGURED only
+     *
+     * Do this for getBuffer as well.
+     */
     status_t res = returnBufferLocked(buffer, timestamp);
     if (res == OK) {
         fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/true);
+        mOutputBufferReturnedSignal.signal();
     }
 
     return res;
@@ -234,8 +268,30 @@
 status_t Camera3Stream::getInputBuffer(camera3_stream_buffer *buffer) {
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
+    status_t res = OK;
 
-    status_t res = getInputBufferLocked(buffer);
+    // This function should be only called when the stream is configured already.
+    if (mState != STATE_CONFIGURED) {
+        ALOGE("%s: Stream %d: Can't get input buffers if stream is not in CONFIGURED state %d",
+                __FUNCTION__, mId, mState);
+        return INVALID_OPERATION;
+    }
+
+    // Wait for new buffer returned back if we are running into the limit.
+    if (getHandoutInputBufferCountLocked() == camera3_stream::max_buffers) {
+        ALOGV("%s: Already dequeued max input buffers (%d), wait for next returned one.",
+                __FUNCTION__, camera3_stream::max_buffers);
+        res = mInputBufferReturnedSignal.waitRelative(mLock, kWaitForBufferDuration);
+        if (res != OK) {
+            if (res == TIMED_OUT) {
+                ALOGE("%s: wait for input buffer return timed out after %lldms", __FUNCTION__,
+                        kWaitForBufferDuration / 1000000LL);
+            }
+            return res;
+        }
+    }
+
+    res = getInputBufferLocked(buffer);
     if (res == OK) {
         fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/false);
     }
@@ -250,6 +306,7 @@
     status_t res = returnInputBufferLocked(buffer);
     if (res == OK) {
         fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/false);
+        mInputBufferReturnedSignal.signal();
     }
     return res;
 }
@@ -314,12 +371,46 @@
 
 status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) {
     ATRACE_CALL();
+
+    /**
+     * >= CAMERA_DEVICE_API_VERSION_3_2:
+     *
+     * camera3_device_t->ops->register_stream_buffers() is not called and must
+     * be NULL.
+     */
+    if (hal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_2) {
+        ALOGV("%s: register_stream_buffers unused as of HAL3.2", __FUNCTION__);
+
+        /**
+         * Skip the NULL check if camera.dev.register_stream is 1.
+         *
+         * For development-validation purposes only.
+         *
+         * TODO: Remove the property check before shipping L (b/13914251).
+         */
+        char value[PROPERTY_VALUE_MAX] = { '\0', };
+        property_get("camera.dev.register_stream", value, "0");
+        int propInt = atoi(value);
+
+        if (propInt == 0 && hal3Device->ops->register_stream_buffers != NULL) {
+            ALOGE("%s: register_stream_buffers is deprecated in HAL3.2; "
+                    "must be set to NULL in camera3_device::ops", __FUNCTION__);
+            return INVALID_OPERATION;
+        } else {
+            ALOGD("%s: Skipping NULL check for deprecated register_stream_buffers");
+        }
+
+        return OK;
+    } else {
+        ALOGV("%s: register_stream_buffers using deprecated code path", __FUNCTION__);
+    }
+
     status_t res;
 
     size_t bufferCount = getBufferCountLocked();
 
     Vector<buffer_handle_t*> buffers;
-    buffers.insertAt(NULL, 0, bufferCount);
+    buffers.insertAt(/*prototype_item*/NULL, /*index*/0, bufferCount);
 
     camera3_stream_buffer_set bufferSet = camera3_stream_buffer_set();
     bufferSet.stream = this;
@@ -327,7 +418,7 @@
     bufferSet.buffers = buffers.editArray();
 
     Vector<camera3_stream_buffer_t> streamBuffers;
-    streamBuffers.insertAt(camera3_stream_buffer_t(), 0, bufferCount);
+    streamBuffers.insertAt(camera3_stream_buffer_t(), /*index*/0, bufferCount);
 
     // Register all buffers with the HAL. This means getting all the buffers
     // from the stream, providing them to the HAL with the
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 6eeb721..14f5387 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -82,6 +82,23 @@
  *    STATE_CONFIGURED     => STATE_CONSTRUCTED:
  *        When disconnect() is called after making sure stream is idle with
  *        waitUntilIdle().
+ *
+ * Status Tracking:
+ *    Each stream is tracked by StatusTracker as a separate component,
+ *    depending on the handed out buffer count. The state must be STATE_CONFIGURED
+ *    in order for the component to be marked.
+ *
+ *    It's marked in one of two ways:
+ *
+ *    - ACTIVE: One or more buffers have been handed out (with #getBuffer).
+ *    - IDLE: All buffers have been returned (with #returnBuffer), and their
+ *          respective release_fence(s) have been signaled.
+ *
+ *    A typical use case is output streams. When the HAL has any buffers
+ *    dequeued, the stream is marked ACTIVE. When the HAL returns all buffers
+ *    (e.g. if no capture requests are active), the stream is marked IDLE.
+ *    In this use case, the app consumer does not affect the component status.
+ *
  */
 class Camera3Stream :
         protected camera3_stream,
@@ -262,6 +279,12 @@
     // Get the total number of buffers in the queue
     virtual size_t   getBufferCountLocked() = 0;
 
+    // Get handout output buffer count.
+    virtual size_t   getHandoutOutputBufferCountLocked() = 0;
+
+    // Get handout input buffer count.
+    virtual size_t   getHandoutInputBufferCountLocked() = 0;
+
     // Get the usage flags for the other endpoint, or return
     // INVALID_OPERATION if they cannot be obtained.
     virtual status_t getEndpointUsage(uint32_t *usage) = 0;
@@ -274,6 +297,9 @@
   private:
     uint32_t oldUsage;
     uint32_t oldMaxBuffers;
+    Condition mOutputBufferReturnedSignal;
+    Condition mInputBufferReturnedSignal;
+    static const nsecs_t kWaitForBufferDuration = 3000000000LL; // 3000 ms
 
     // Gets all buffers from endpoint and registers them with the HAL.
     status_t registerBuffersLocked(camera3_device *hal3Device);
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
index 44d8188..05b3d1f 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -111,15 +111,17 @@
 } // namespace anonymous
 
 Camera3ZslStream::Camera3ZslStream(int id, uint32_t width, uint32_t height,
-        int depth) :
+        int bufferCount) :
         Camera3OutputStream(id, CAMERA3_STREAM_BIDIRECTIONAL,
                             width, height,
                             HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED),
-        mDepth(depth) {
+        mDepth(bufferCount) {
 
-    sp<BufferQueue> bq = new BufferQueue();
-    mProducer = new RingBufferConsumer(bq, GRALLOC_USAGE_HW_CAMERA_ZSL, depth);
-    mConsumer = new Surface(bq);
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    mProducer = new RingBufferConsumer(consumer, GRALLOC_USAGE_HW_CAMERA_ZSL, bufferCount);
+    mConsumer = new Surface(producer);
 }
 
 Camera3ZslStream::~Camera3ZslStream() {
@@ -174,7 +176,7 @@
      * in which case we reassign it to acquire_fence
      */
     handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
-                         /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK);
+                         /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/false);
 
     mBuffersInFlight.push_back(bufferItem);
 
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h
index c7f4490..6721832 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.h
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.h
@@ -37,10 +37,10 @@
         public Camera3OutputStream {
   public:
     /**
-     * Set up a ZSL stream of a given resolution. Depth is the number of buffers
+     * Set up a ZSL stream of a given resolution. bufferCount is the number of buffers
      * cached within the stream that can be retrieved for input.
      */
-    Camera3ZslStream(int id, uint32_t width, uint32_t height, int depth);
+    Camera3ZslStream(int id, uint32_t width, uint32_t height, int bufferCount);
     ~Camera3ZslStream();
 
     virtual void     dump(int fd, const Vector<String16> &args) const;
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h
index b4ad824..a03736d 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.h
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h
@@ -64,7 +64,7 @@
     // bufferCount parameter specifies how many buffers can be pinned for user
     // access at the same time.
     RingBufferConsumer(const sp<IGraphicBufferConsumer>& consumer, uint32_t consumerUsage,
-            int bufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS);
+            int bufferCount);
 
     virtual ~RingBufferConsumer();
 
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index 683fdf3..0c7fbbd 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -54,7 +54,7 @@
     }
 }
 
-status_t MediaLogService::dump(int fd, const Vector<String16>& args)
+status_t MediaLogService::dump(int fd, const Vector<String16>& args __unused)
 {
     // FIXME merge with similar but not identical code at services/audioflinger/ServiceUtilities.cpp
     static const String16 sDump("android.permission.DUMP");
diff --git a/tools/resampler_tools/fir.cpp b/tools/resampler_tools/fir.cpp
index 8c8a4ea..62eddca 100644
--- a/tools/resampler_tools/fir.cpp
+++ b/tools/resampler_tools/fir.cpp
@@ -20,15 +20,25 @@
 #include <stdlib.h>
 #include <string.h>
 
-static double sinc(double x) {
+static inline double sinc(double x) {
     if (fabs(x) == 0.0f) return 1.0f;
     return sin(x) / x;
 }
 
-static double sqr(double x) {
+static inline double sqr(double x) {
     return x*x;
 }
 
+static inline int64_t toint(double x, int64_t maxval) {
+    int64_t v;
+
+    v = static_cast<int64_t>(floor(x * maxval + 0.5));
+    if (v >= maxval) {
+        return maxval - 1; // error!
+    }
+    return v;
+}
+
 static double I0(double x) {
     // from the Numerical Recipes in C p. 237
     double ax,ans,y;
@@ -54,11 +64,12 @@
     return I0(beta * sqrt(1.0 - sqr((2.0*k)/N - 1.0))) / I0(beta);
 }
 
-
 static void usage(char* name) {
     fprintf(stderr,
-            "usage: %s [-h] [-d] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings] [-f {float|fixed}] [-b beta] [-v dBFS] [-l lerp]\n"
-            "       %s [-h] [-d] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings] [-f {float|fixed}] [-b beta] [-v dBFS] -p M/N\n"
+            "usage: %s [-h] [-d] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings]"
+            " [-f {float|fixed|fixed16}] [-b beta] [-v dBFS] [-l lerp]\n"
+            "       %s [-h] [-d] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings]"
+            " [-f {float|fixed|fixed16}] [-b beta] [-v dBFS] -p M/N\n"
             "    -h    this help message\n"
             "    -d    debug, print comma-separated coefficient table\n"
             "    -p    generate poly-phase filter coefficients, with sample increment M/N\n"
@@ -66,6 +77,7 @@
             "    -c    cut-off frequency (20478)\n"
             "    -n    number of zero-crossings on one side (8)\n"
             "    -l    number of lerping bits (4)\n"
+            "    -m    number of polyphases (related to -l, default 16)\n"
             "    -f    output format, can be fixed-point or floating-point (fixed)\n"
             "    -b    kaiser window parameter beta (7.865 [-80dB])\n"
             "    -v    attenuation in dBFS (0)\n",
@@ -77,8 +89,7 @@
 int main(int argc, char** argv)
 {
     // nc is the number of bits to store the coefficients
-    const int nc = 32;
-
+    int nc = 32;
     bool polyphase = false;
     unsigned int polyM = 160;
     unsigned int polyN = 147;
@@ -88,7 +99,6 @@
     double atten = 1;
     int format = 0;
 
-
     // in order to keep the errors associated with the linear
     // interpolation of the coefficients below the quantization error
     // we must satisfy:
@@ -104,7 +114,6 @@
     // Smith, J.O. Digital Audio Resampling Home Page
     // https://ccrma.stanford.edu/~jos/resample/, 2011-03-29
     //
-    int nz = 4;
 
     //         | 0.1102*(A - 8.7)                         A > 50
     //  beta = | 0.5842*(A - 21)^0.4 + 0.07886*(A - 21)   21 <= A <= 50
@@ -123,33 +132,33 @@
     //   100 dB   10.056
     double beta = 7.865;
 
-
     // 2*nzc = (A - 8) / (2.285 * dw)
     //      with dw the transition width = 2*pi*dF/Fs
     //
     int nzc = 8;
 
-    //
-    // Example:
-    // 44.1 KHz to 48 KHz resampling
-    // 100 dB rejection above 28 KHz
-    //   (the spectrum will fold around 24 KHz and we want 100 dB rejection
-    //    at the point where the folding reaches 20 KHz)
-    //  ...___|_____
-    //        |     \|
-    //        | ____/|\____
-    //        |/alias|     \
-    //  ------/------+------\---------> KHz
-    //       20     24     28
+    /*
+     * Example:
+     * 44.1 KHz to 48 KHz resampling
+     * 100 dB rejection above 28 KHz
+     *   (the spectrum will fold around 24 KHz and we want 100 dB rejection
+     *    at the point where the folding reaches 20 KHz)
+     *  ...___|_____
+     *        |     \|
+     *        | ____/|\____
+     *        |/alias|     \
+     *  ------/------+------\---------> KHz
+     *       20     24     28
+     *
+     * Transition band 8 KHz, or dw = 1.0472
+     *
+     * beta = 10.056
+     * nzc  = 20
+     */
 
-    // Transition band 8 KHz, or dw = 1.0472
-    //
-    // beta = 10.056
-    // nzc  = 20
-    //
-
+    int M = 1 << 4; // number of phases for interpolation
     int ch;
-    while ((ch = getopt(argc, argv, ":hds:c:n:f:l:b:p:v:")) != -1) {
+    while ((ch = getopt(argc, argv, ":hds:c:n:f:l:m:b:p:v:z:")) != -1) {
         switch (ch) {
             case 'd':
                 debug = true;
@@ -169,13 +178,26 @@
             case 'n':
                 nzc = atoi(optarg);
                 break;
+            case 'm':
+                M = atoi(optarg);
+                break;
             case 'l':
-                nz = atoi(optarg);
+                M = 1 << atoi(optarg);
                 break;
             case 'f':
-                if (!strcmp(optarg,"fixed")) format = 0;
-                else if (!strcmp(optarg,"float")) format = 1;
-                else usage(argv[0]);
+                if (!strcmp(optarg, "fixed")) {
+                    format = 0;
+                }
+                else if (!strcmp(optarg, "fixed16")) {
+                    format = 0;
+                    nc = 16;
+                }
+                else if (!strcmp(optarg, "float")) {
+                    format = 1;
+                }
+                else {
+                    usage(argv[0]);
+                }
                 break;
             case 'b':
                 beta = atof(optarg);
@@ -193,11 +215,14 @@
     // cut off frequency ratio Fc/Fs
     double Fcr = Fc / Fs;
 
-
     // total number of coefficients (one side)
-    const int M = (1 << nz);
+
     const int N = M * nzc;
 
+    // lerp (which is most useful if M is a power of 2)
+
+    int nz = 0; // recalculate nz as the bits needed to represent M
+    for (int i = M-1 ; i; i>>=1, nz++);
     // generate the right half of the filter
     if (!debug) {
         printf("// cmd-line: ");
@@ -207,7 +232,7 @@
         printf("\n");
         if (!polyphase) {
             printf("const int32_t RESAMPLE_FIR_SIZE           = %d;\n", N);
-            printf("const int32_t RESAMPLE_FIR_LERP_INT_BITS  = %d;\n", nz);
+            printf("const int32_t RESAMPLE_FIR_INT_PHASES     = %d;\n", M);
             printf("const int32_t RESAMPLE_FIR_NUM_COEF       = %d;\n", nzc);
         } else {
             printf("const int32_t RESAMPLE_FIR_SIZE           = %d;\n", 2*nzc*polyN);
@@ -224,7 +249,7 @@
         for (int i=0 ; i<=M ; i++) { // an extra set of coefs for interpolation
             for (int j=0 ; j<nzc ; j++) {
                 int ix = j*M + i;
-                double x = (2.0 * M_PI * ix * Fcr) / (1 << nz);
+                double x = (2.0 * M_PI * ix * Fcr) / M;
                 double y = kaiser(ix+N, 2*N, beta) * sinc(x) * 2.0 * Fcr;
                 y *= atten;
 
@@ -232,11 +257,13 @@
                     if (j == 0)
                         printf("\n    ");
                 }
-
                 if (!format) {
-                    int64_t yi = floor(y * ((1ULL<<(nc-1))) + 0.5);
-                    if (yi >= (1LL<<(nc-1))) yi = (1LL<<(nc-1))-1;
-                    printf("0x%08x, ", int32_t(yi));
+                    int64_t yi = toint(y, 1ULL<<(nc-1));
+                    if (nc > 16) {
+                        printf("0x%08x, ", int32_t(yi));
+                    } else {
+                        printf("0x%04x, ", int32_t(yi)&0xffff);
+                    }
                 } else {
                     printf("%.9g%s ", y, debug ? "," : "f,");
                 }
@@ -254,9 +281,12 @@
                 double y = kaiser(i+N, 2*N, beta) * sinc(x) * 2.0 * Fcr;;
                 y *= atten;
                 if (!format) {
-                    int64_t yi = floor(y * ((1ULL<<(nc-1))) + 0.5);
-                    if (yi >= (1LL<<(nc-1))) yi = (1LL<<(nc-1))-1;
-                    printf("0x%08x", int32_t(yi));
+                    int64_t yi = toint(y, 1ULL<<(nc-1));
+                    if (nc > 16) {
+                        printf("0x%08x, ", int32_t(yi));
+                    } else {
+                        printf("0x%04x, ", int32_t(yi)&0xffff);
+                    }
                 } else {
                     printf("%.9g%s", y, debug ? "" : "f");
                 }
@@ -277,5 +307,3 @@
 }
 
 // http://www.csee.umbc.edu/help/sound/AFsp-V2R1/html/audio/ResampAudio.html
-
-